diff options
author | Jeff Layton <jlayton@primarydata.com> | 2014-11-21 14:19:29 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2014-12-09 11:22:22 -0500 |
commit | 403c7b44441d60aba7f8a134c31279ffa60ea769 (patch) | |
tree | 97005025d0e50119a720f8a29672d3b695ff7886 | |
parent | 812443865c5fc255363d4a684a62c086af1addca (diff) | |
download | blackbird-op-linux-403c7b44441d60aba7f8a134c31279ffa60ea769.tar.gz blackbird-op-linux-403c7b44441d60aba7f8a134c31279ffa60ea769.zip |
sunrpc: fix potential races in pool_stats collection
In a later patch, we'll be removing some spinlocking around the socket
and thread queueing code in order to fix some contention problems. At
that point, the stats counters will no longer be protected by the
sp_lock.
Change the counters to atomic_long_t fields, except for the
"sockets_queued" counter which will still be manipulated under a
spinlock.
Signed-off-by: Jeff Layton <jlayton@primarydata.com>
Tested-by: Chris Worley <chris.worley@primarydata.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r-- | include/linux/sunrpc/svc.h | 6 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 12 |
2 files changed, 9 insertions, 9 deletions
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 7f80a99c59e4..513957eba0a5 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -26,10 +26,10 @@ typedef int (*svc_thread_fn)(void *); /* statistics for svc_pool structures */ struct svc_pool_stats { - unsigned long packets; + atomic_long_t packets; unsigned long sockets_queued; - unsigned long threads_woken; - unsigned long threads_timedout; + atomic_long_t threads_woken; + atomic_long_t threads_timedout; }; /* diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index b2676e597fc4..579ff2249562 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -362,7 +362,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt) pool = svc_pool_for_cpu(xprt->xpt_server, cpu); spin_lock_bh(&pool->sp_lock); - pool->sp_stats.packets++; + atomic_long_inc(&pool->sp_stats.packets); if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, @@ -383,7 +383,7 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt) svc_xprt_get(xprt); wake_up_process(rqstp->rq_task); rqstp->rq_xprt = xprt; - pool->sp_stats.threads_woken++; + atomic_long_inc(&pool->sp_stats.threads_woken); } else { dprintk("svc: transport %p put into queue\n", xprt); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); @@ -669,7 +669,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) spin_lock_bh(&pool->sp_lock); if (!time_left) - pool->sp_stats.threads_timedout++; + atomic_long_inc(&pool->sp_stats.threads_timedout); xprt = rqstp->rq_xprt; if (!xprt) { @@ -1306,10 +1306,10 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) seq_printf(m, "%u %lu %lu %lu %lu\n", pool->sp_id, - pool->sp_stats.packets, + (unsigned long)atomic_long_read(&pool->sp_stats.packets), pool->sp_stats.sockets_queued, - pool->sp_stats.threads_woken, - pool->sp_stats.threads_timedout); + (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), + (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); return 0; } |