diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2017-06-23 17:17:52 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2017-06-28 14:21:44 -0400 |
commit | 3c22f326074d2306041b0c5c9df516464349564d (patch) | |
tree | d8a0a6176ff16fe85534b2a67e9233bcef2b2bd7 /net | |
parent | e77340e00300df9b6591d686f186eea60c67206f (diff) | |
download | blackbird-op-linux-3c22f326074d2306041b0c5c9df516464349564d.tar.gz blackbird-op-linux-3c22f326074d2306041b0c5c9df516464349564d.zip |
svcrdma: Improve Write chunk sanity checking
Identify malformed transport headers and unsupported chunk
combinations as early as possible.
- Reject RPC-over-RDMA messages that contain more than one Write
chunk, since this implementation does not support more than one per
message.
- Ensure that segment lengths are not crazy.
- Ensure that the chunk's segment count is not crazy.
With a 1KB inline threshold, the largest number of Write segments
that can be conveyed is about 60 (for a RDMA_NOMSG Reply message).
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 52 |
1 files changed, 47 insertions, 5 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 885ad9503ee0..cf8be18f297a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -117,6 +117,11 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp, rqstp->rq_arg.tail[0].iov_len = 0; } +/* This accommodates the largest possible Write chunk, + * in one segment. + */ +#define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT)) + /* This accommodates the largest possible Position-Zero * Read chunk or Reply chunk, in one segment. */ @@ -162,15 +167,52 @@ static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end) return p; } -static __be32 *xdr_check_write_list(__be32 *p, __be32 *end) +/* The segment count is limited to how many segments can + * fit in the transport header without overflowing the + * buffer. That's about 60 Write segments for a 1KB inline + * threshold. + */ +static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end, + u32 maxlen) { - __be32 *next; + u32 i, segcount; + + segcount = be32_to_cpup(p++); + for (i = 0; i < segcount; i++) { + p++; /* handle */ + if (be32_to_cpup(p++) > maxlen) + return NULL; + p += 2; /* offset */ + + if (p > end) + return NULL; + } + + return p; +} +/* Sanity check the Write list. + * + * Implementation limits: + * - This implementation supports only one Write chunk. + * + * Sanity checks: + * - Write list does not overflow buffer. + * - Segment size limited by largest NFS data payload. + * + * Returns pointer to the following Reply chunk. + */ +static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end) +{ + u32 chcount; + + chcount = 0; while (*p++ != xdr_zero) { - next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz; - if (next > end) + p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG); + if (!p) + return NULL; + if (chcount++ > 1) return NULL; - p = next; } return p; } |