diff options
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/auth_gss/svcauth_gss.c | 15 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 9 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 26 | ||||
-rw-r--r-- | net/sunrpc/sunrpc_syms.c | 6 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 2 | ||||
-rw-r--r-- | net/sunrpc/svcauth.c | 2 | ||||
-rw-r--r-- | net/sunrpc/svcauth_unix.c | 10 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 34 |
8 files changed, 65 insertions, 39 deletions
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index db298b501c81..099a983797da 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -924,6 +924,7 @@ static inline int gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip) { struct rsc *rsci; + int rc; if (rsip->major_status != GSS_S_COMPLETE) return gss_write_null_verf(rqstp); @@ -932,7 +933,9 @@ gss_write_init_verf(struct svc_rqst *rqstp, struct rsi *rsip) rsip->major_status = GSS_S_NO_CONTEXT; return gss_write_null_verf(rqstp); } - return gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN); + rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN); + cache_put(&rsci->h, &rsc_cache); + return rc; } /* @@ -1089,6 +1092,8 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) } goto complete; case RPC_GSS_PROC_DESTROY: + if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) + goto auth_err; set_bit(CACHE_NEGATIVE, &rsci->h.flags); if (resv->iov_len + 4 > PAGE_SIZE) goto drop; @@ -1196,13 +1201,7 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp) if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) BUG(); - if (resbuf->page_len == 0 - && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE - < PAGE_SIZE) { - BUG_ON(resbuf->tail[0].iov_len); - /* Use head for everything */ - resv = &resbuf->head[0]; - } else if (resbuf->tail[0].iov_base == NULL) { + if (resbuf->tail[0].iov_base == NULL) { if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE) goto out_err; resbuf->tail[0].iov_base = resbuf->head[0].iov_base diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index ad39b47e05bc..a2f1893bde53 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -845,6 +845,8 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) int register_rpc_pipefs(void) { + int err; + rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", sizeof(struct rpc_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| @@ -852,7 +854,12 @@ int register_rpc_pipefs(void) init_once, NULL); if (!rpc_inode_cachep) return -ENOMEM; - register_filesystem(&rpc_pipe_fs_type); + err = register_filesystem(&rpc_pipe_fs_type); + if (err) { + kmem_cache_destroy(rpc_inode_cachep); + return err; + } + return 0; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 99014516b73c..b011eb625e49 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -736,6 +736,11 @@ static void rpc_async_schedule(struct work_struct *work) __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); } +struct rpc_buffer { + size_t len; + char data[]; +}; + /** * rpc_malloc - allocate an RPC buffer * @task: RPC task that will use this buffer @@ -754,18 +759,22 @@ static void rpc_async_schedule(struct work_struct *work) */ void *rpc_malloc(struct rpc_task *task, size_t size) { - size_t *buf; + struct rpc_buffer *buf; gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT; - size += sizeof(size_t); + size += sizeof(struct rpc_buffer); if (size <= RPC_BUFFER_MAXSIZE) buf = mempool_alloc(rpc_buffer_mempool, gfp); else buf = kmalloc(size, gfp); - *buf = size; + + if (!buf) + return NULL; + + buf->len = size; dprintk("RPC: %5u allocated buffer of size %zu at %p\n", task->tk_pid, size, buf); - return ++buf; + return &buf->data; } /** @@ -775,15 +784,18 @@ void *rpc_malloc(struct rpc_task *task, size_t size) */ void rpc_free(void *buffer) { - size_t size, *buf = buffer; + size_t size; + struct rpc_buffer *buf; if (!buffer) return; - size = *buf; - buf--; + + buf = container_of(buffer, struct rpc_buffer, data); + size = buf->len; dprintk("RPC: freeing buffer of size %zu at %p\n", size, buf); + if (size <= RPC_BUFFER_MAXSIZE) mempool_free(buf, rpc_buffer_mempool); else diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 43ecf62f12ef..0d35bc796d00 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -146,9 +146,11 @@ init_sunrpc(void) int err = register_rpc_pipefs(); if (err) goto out; - err = rpc_init_mempool() != 0; - if (err) + err = rpc_init_mempool(); + if (err) { + unregister_rpc_pipefs(); goto out; + } #ifdef RPC_DEBUG rpc_register_sysctl(); #endif diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index b7503c103ae8..e673ef993904 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -907,7 +907,7 @@ svc_process(struct svc_rqst *rqstp) * better idea of reply size */ if (procp->pc_xdrressize) - svc_reserve(rqstp, procp->pc_xdrressize<<2); + svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); /* Call the function that processes the request. */ if (!versp->vs_dispatch) { diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index f5c3808bf85a..af7c5f05c6e1 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -64,7 +64,7 @@ int svc_set_client(struct svc_rqst *rqstp) } /* A request, which was authenticated, has now executed. - * Time to finalise the the credentials and verifier + * Time to finalise the credentials and verifier * and release and resources */ int svc_authorise(struct svc_rqst *rqstp) diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 2bd23ea2aa8b..07dcd20cbee4 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -385,7 +385,7 @@ ip_map_cached_get(struct svc_rqst *rqstp) { struct ip_map *ipm; struct svc_sock *svsk = rqstp->rq_sock; - spin_lock_bh(&svsk->sk_defer_lock); + spin_lock(&svsk->sk_lock); ipm = svsk->sk_info_authunix; if (ipm != NULL) { if (!cache_valid(&ipm->h)) { @@ -395,13 +395,13 @@ ip_map_cached_get(struct svc_rqst *rqstp) * same IP address. */ svsk->sk_info_authunix = NULL; - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); cache_put(&ipm->h, &ip_map_cache); return NULL; } cache_get(&ipm->h); } - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); return ipm; } @@ -410,14 +410,14 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) { struct svc_sock *svsk = rqstp->rq_sock; - spin_lock_bh(&svsk->sk_defer_lock); + spin_lock(&svsk->sk_lock); if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL) { /* newly cached, keep the reference */ svsk->sk_info_authunix = ipm; ipm = NULL; } - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); if (ipm) cache_put(&ipm->h, &ip_map_cache); } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 22f61aee4824..5baf48de2558 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -53,7 +53,8 @@ * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. * when both need to be taken (rare), svc_serv->sv_lock is first. * BKL protects svc_serv->sv_nrthread. - * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list + * svc_sock->sk_lock protects the svc_sock->sk_deferred list + * and the ->sk_info_authunix cache. * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. * * Some flags can be set to certain values at any time @@ -787,15 +788,20 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) } clear_bit(SK_DATA, &svsk->sk_flags); - while ((err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, - 0, 0, MSG_PEEK | MSG_DONTWAIT)) < 0 || - (skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { - if (err == -EAGAIN) { - svc_sock_received(svsk); - return err; + skb = NULL; + err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, + 0, 0, MSG_PEEK | MSG_DONTWAIT); + if (err >= 0) + skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err); + + if (skb == NULL) { + if (err != -EAGAIN) { + /* possibly an icmp error */ + dprintk("svc: recvfrom returned error %d\n", -err); + set_bit(SK_DATA, &svsk->sk_flags); } - /* possibly an icmp error */ - dprintk("svc: recvfrom returned error %d\n", -err); + svc_sock_received(svsk); + return -EAGAIN; } rqstp->rq_addrlen = sizeof(rqstp->rq_addr); if (skb->tstamp.tv64 == 0) { @@ -1633,7 +1639,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, svsk->sk_server = serv; atomic_set(&svsk->sk_inuse, 1); svsk->sk_lastrecv = get_seconds(); - spin_lock_init(&svsk->sk_defer_lock); + spin_lock_init(&svsk->sk_lock); INIT_LIST_HEAD(&svsk->sk_deferred); INIT_LIST_HEAD(&svsk->sk_ready); mutex_init(&svsk->sk_mutex); @@ -1857,9 +1863,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) dprintk("revisit queued\n"); svsk = dr->svsk; dr->svsk = NULL; - spin_lock_bh(&svsk->sk_defer_lock); + spin_lock(&svsk->sk_lock); list_add(&dr->handle.recent, &svsk->sk_deferred); - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); set_bit(SK_DEFERRED, &svsk->sk_flags); svc_sock_enqueue(svsk); svc_sock_put(svsk); @@ -1925,7 +1931,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) return NULL; - spin_lock_bh(&svsk->sk_defer_lock); + spin_lock(&svsk->sk_lock); clear_bit(SK_DEFERRED, &svsk->sk_flags); if (!list_empty(&svsk->sk_deferred)) { dr = list_entry(svsk->sk_deferred.next, @@ -1934,6 +1940,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) list_del_init(&dr->handle.recent); set_bit(SK_DEFERRED, &svsk->sk_flags); } - spin_unlock_bh(&svsk->sk_defer_lock); + spin_unlock(&svsk->sk_lock); return dr; } |