summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2020-04-18 21:38:19 +0300
committerChuck Lever <chuck.lever@oracle.com>2020-04-27 17:58:30 +0300
commita7e429a6fa6d612d1dacde96c885dc1bb4a9f400 (patch)
tree6806da98b5a7441ae94f7e70da7e34d5c2d299f7
parent31c9590ae468478fe47dc0f5f0d3562b2f69450e (diff)
downloadlinux-a7e429a6fa6d612d1dacde96c885dc1bb4a9f400.tar.xz
SUNRPC: Fix GSS privacy computation of auth->au_ralign
When the au_ralign field was added to gss_unwrap_resp_priv, the wrong calculation was used. Setting au_rslack == au_ralign is probably correct for kerberos_v1 privacy, but kerberos_v2 privacy adds additional GSS data after the clear text RPC message. au_ralign needs to be smaller than au_rslack in that fairly common case. When xdr_buf_trim() is restored to gss_unwrap_kerberos_v2(), it does exactly what I feared it would: it trims off part of the clear text RPC message. However, that's because rpc_prepare_reply_pages() does not set up the rq_rcv_buf's tail correctly because au_ralign is too large. Fixing the au_ralign computation also corrects the alignment of rq_rcv_buf->pages so that the client does not have to shift reply data payloads after they are received. Fixes: 35e77d21baa0 ("SUNRPC: Add rpc_auth::au_ralign field") Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
-rw-r--r--include/linux/sunrpc/gss_api.h1
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c8
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c19
3 files changed, 19 insertions, 9 deletions
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index e9a79518d652..bc07e51f20d1 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -21,6 +21,7 @@
struct gss_ctx {
struct gss_api_mech *mech_type;
void *internal_ctx_id;
+ unsigned int slack, align;
};
#define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 7885f37e3688..ac5cac0dd24b 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -2032,7 +2032,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
struct kvec *head = rqstp->rq_rcv_buf.head;
struct rpc_auth *auth = cred->cr_auth;
- unsigned int savedlen = rcv_buf->len;
u32 offset, opaque_len, maj_stat;
__be32 *p;
@@ -2059,10 +2058,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
*/
xdr_init_decode(xdr, rcv_buf, p, rqstp);
- auth->au_rslack = auth->au_verfsize + 2 +
- XDR_QUADLEN(savedlen - rcv_buf->len);
- auth->au_ralign = auth->au_verfsize + 2 +
- XDR_QUADLEN(savedlen - rcv_buf->len);
+ auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
+ auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
+
return 0;
unwrap_failed:
trace_rpcgss_unwrap_failed(task);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index c7589e35d5d9..4905652e7567 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -262,7 +262,8 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
static u32
gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
- struct xdr_buf *buf)
+ struct xdr_buf *buf, unsigned int *slack,
+ unsigned int *align)
{
int signalg;
int sealalg;
@@ -280,6 +281,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
u32 conflen = kctx->gk5e->conflen;
int crypt_offset;
u8 *cksumkey;
+ unsigned int saved_len = buf->len;
dprintk("RPC: gss_unwrap_kerberos\n");
@@ -383,6 +385,10 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
if (gss_krb5_remove_padding(buf, blocksize))
return GSS_S_DEFECTIVE_TOKEN;
+ /* slack must include room for krb5 padding */
+ *slack = XDR_QUADLEN(saved_len - buf->len);
+ /* The GSS blob always precedes the RPC message payload */
+ *align = *slack;
return GSS_S_COMPLETE;
}
@@ -489,7 +495,8 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
static u32
gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
- struct xdr_buf *buf)
+ struct xdr_buf *buf, unsigned int *slack,
+ unsigned int *align)
{
time64_t now;
u8 *ptr;
@@ -583,6 +590,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
/* Trim off the trailing "extra count" and checksum blob */
buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
+ *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
+ *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
return GSS_S_COMPLETE;
}
@@ -617,9 +626,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW:
case ENCTYPE_ARCFOUR_HMAC:
- return gss_unwrap_kerberos_v1(kctx, offset, len, buf);
+ return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
+ &gctx->slack, &gctx->align);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
- return gss_unwrap_kerberos_v2(kctx, offset, len, buf);
+ return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
+ &gctx->slack, &gctx->align);
}
}