summaryrefslogtreecommitdiffstats
path: root/fs/nfsd
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2008-08-13 22:03:27 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-09-29 17:56:59 -0400
commit54a66e548079f12a6f54c3cae96812a9ed9b54ae (patch)
tree94d3bdeb77da374a15d01ba42fd8adcc677fe4d6 /fs/nfsd
parente31a1b662f40fd460e982ef87582c66c51596cd0 (diff)
downloadblackbird-op-linux-54a66e548079f12a6f54c3cae96812a9ed9b54ae.tar.gz
blackbird-op-linux-54a66e548079f12a6f54c3cae96812a9ed9b54ae.zip
knfsd: allocate readahead cache in individual chunks
I had a report from someone building a large NFS server that they were unable to start more than 585 nfsd threads. It was reported against an older kernel using the slab allocator, and I tracked it down to the large allocation in nfsd_racache_init failing. It appears that the slub allocator handles large allocations better, but large contiguous allocations can often be problematic. There doesn't seem to be any reason that the racache has to be allocated as a single large chunk. This patch breaks this up so that the racache is built up from separate allocations. (Thanks also to Takashi Iwai for a bugfix.) Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu> Cc: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'fs/nfsd')
-rw-r--r--fs/nfsd/vfs.c59
1 files changed, 35 insertions, 24 deletions
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 1319e8027d55..aa1d0d6489a1 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -83,7 +83,6 @@ struct raparm_hbucket {
spinlock_t pb_lock;
} ____cacheline_aligned_in_smp;
-static struct raparms * raparml;
#define RAPARM_HASH_BITS 4
#define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS)
#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
@@ -1966,11 +1965,20 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
void
nfsd_racache_shutdown(void)
{
- if (!raparml)
- return;
+ struct raparms *raparm, *last_raparm;
+ unsigned int i;
+
dprintk("nfsd: freeing readahead buffers.\n");
- kfree(raparml);
- raparml = NULL;
+
+ for (i = 0; i < RAPARM_HASH_SIZE; i++) {
+ raparm = raparm_hash[i].pb_head;
+ while(raparm) {
+ last_raparm = raparm;
+ raparm = raparm->p_next;
+ kfree(last_raparm);
+ }
+ raparm_hash[i].pb_head = NULL;
+ }
}
/*
* Initialize readahead param cache
@@ -1981,35 +1989,38 @@ nfsd_racache_init(int cache_size)
int i;
int j = 0;
int nperbucket;
+ struct raparms **raparm = NULL;
- if (raparml)
+ if (raparm_hash[0].pb_head)
return 0;
- if (cache_size < 2*RAPARM_HASH_SIZE)
- cache_size = 2*RAPARM_HASH_SIZE;
- raparml = kcalloc(cache_size, sizeof(struct raparms), GFP_KERNEL);
-
- if (!raparml) {
- printk(KERN_WARNING
- "nfsd: Could not allocate memory read-ahead cache.\n");
- return -ENOMEM;
- }
+ nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
+ if (nperbucket < 2)
+ nperbucket = 2;
+ cache_size = nperbucket * RAPARM_HASH_SIZE;
dprintk("nfsd: allocating %d readahead buffers.\n", cache_size);
- for (i = 0 ; i < RAPARM_HASH_SIZE ; i++) {
- raparm_hash[i].pb_head = NULL;
+
+ for (i = 0; i < RAPARM_HASH_SIZE; i++) {
spin_lock_init(&raparm_hash[i].pb_lock);
- }
- nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
- for (i = 0; i < cache_size - 1; i++) {
- if (i % nperbucket == 0)
- raparm_hash[j++].pb_head = raparml + i;
- if (i % nperbucket < nperbucket-1)
- raparml[i].p_next = raparml + i + 1;
+
+ raparm = &raparm_hash[i].pb_head;
+ for (j = 0; j < nperbucket; j++) {
+ *raparm = kzalloc(sizeof(struct raparms), GFP_KERNEL);
+ if (!*raparm)
+ goto out_nomem;
+ raparm = &(*raparm)->p_next;
+ }
+ *raparm = NULL;
}
nfsdstats.ra_size = cache_size;
return 0;
+
+out_nomem:
+ dprintk("nfsd: kmalloc failed, freeing readahead buffers\n");
+ nfsd_racache_shutdown();
+ return -ENOMEM;
}
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
OpenPOWER on IntegriCloud