diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 04:03:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 10:23:02 -0700 |
commit | 94f6030ca792c57422f04a73e7a872d8325946d3 (patch) | |
tree | 0197f24d82b1706f1b0521f2cf68feeff64123df | |
parent | 81cda6626178cd55297831296ba8ecedbfd8b52d (diff) | |
download | blackbird-op-linux-94f6030ca792c57422f04a73e7a872d8325946d3.tar.gz blackbird-op-linux-94f6030ca792c57422f04a73e7a872d8325946d3.zip |
Slab allocators: Replace explicit zeroing with __GFP_ZERO
kmalloc_node() and kmem_cache_alloc_node() were not available in a zeroing
variant in the past. But with __GFP_ZERO it is possible now to do zeroing
while allocating.
Use __GFP_ZERO to remove the explicit clearing of memory via memset whereever
we can.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | block/as-iosched.c | 3 | ||||
-rw-r--r-- | block/cfq-iosched.c | 18 | ||||
-rw-r--r-- | block/deadline-iosched.c | 3 | ||||
-rw-r--r-- | block/elevator.c | 3 | ||||
-rw-r--r-- | block/genhd.c | 8 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 4 | ||||
-rw-r--r-- | drivers/ide/ide-probe.c | 4 | ||||
-rw-r--r-- | kernel/timer.c | 4 | ||||
-rw-r--r-- | lib/genalloc.c | 3 | ||||
-rw-r--r-- | mm/allocpercpu.c | 9 | ||||
-rw-r--r-- | mm/mempool.c | 3 | ||||
-rw-r--r-- | mm/vmalloc.c | 6 |
12 files changed, 30 insertions, 38 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 109e91b91ffa..3e316dd72529 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -1322,10 +1322,9 @@ static void *as_init_queue(request_queue_t *q) { struct as_data *ad; - ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); + ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node); if (!ad) return NULL; - memset(ad, 0, sizeof(*ad)); ad->q = q; /* Identify what queue the data belongs to */ diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e0aa4dad6742..9755a3cfad26 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1251,9 +1251,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { struct cfq_io_context *cic; - cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); + cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, + cfqd->queue->node); if (cic) { - memset(cic, 0, sizeof(*cic)); cic->last_end_request = jiffies; INIT_LIST_HEAD(&cic->queue_list); cic->dtor = cfq_free_io_context; @@ -1376,17 +1376,19 @@ retry: * free memory. */ spin_unlock_irq(cfqd->queue->queue_lock); - new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); + new_cfqq = kmem_cache_alloc_node(cfq_pool, + gfp_mask | __GFP_NOFAIL | __GFP_ZERO, + cfqd->queue->node); spin_lock_irq(cfqd->queue->queue_lock); goto retry; } else { - cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); + cfqq = kmem_cache_alloc_node(cfq_pool, + gfp_mask | __GFP_ZERO, + cfqd->queue->node); if (!cfqq) goto out; } - memset(cfqq, 0, sizeof(*cfqq)); - RB_CLEAR_NODE(&cfqq->rb_node); INIT_LIST_HEAD(&cfqq->fifo); @@ -2079,12 +2081,10 @@ static void *cfq_init_queue(request_queue_t *q) { struct cfq_data *cfqd; - cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); + cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); if (!cfqd) return NULL; - memset(cfqd, 0, sizeof(*cfqd)); - cfqd->service_tree = CFQ_RB_ROOT; INIT_LIST_HEAD(&cfqd->cic_list); diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 6d673e938d3e..87ca02ac84cb 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -360,10 +360,9 @@ static void *deadline_init_queue(request_queue_t *q) { struct deadline_data *dd; - dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); + dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); if (!dd) return NULL; - memset(dd, 0, sizeof(*dd)); INIT_LIST_HEAD(&dd->fifo_list[READ]); INIT_LIST_HEAD(&dd->fifo_list[WRITE]); diff --git a/block/elevator.c b/block/elevator.c index 4769a25d7037..d265963d1ed3 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -177,11 +177,10 @@ static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e) elevator_t *eq; int i; - eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node); + eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node); if (unlikely(!eq)) goto err; - memset(eq, 0, sizeof(*eq)); eq->ops = &e->ops; eq->elevator_type = e; kobject_init(&eq->kobj); diff --git a/block/genhd.c b/block/genhd.c index 863a8c0623ed..b321cadd6e65 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -726,21 +726,21 @@ struct gendisk *alloc_disk_node(int minors, int node_id) { struct gendisk *disk; - disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); + disk = kmalloc_node(sizeof(struct gendisk), + GFP_KERNEL | __GFP_ZERO, node_id); if (disk) { - memset(disk, 0, sizeof(struct gendisk)); if (!init_disk_stats(disk)) { kfree(disk); return NULL; } if (minors > 1) { int size = (minors - 1) * sizeof(struct hd_struct *); - disk->part = kmalloc_node(size, GFP_KERNEL, node_id); + disk->part = kmalloc_node(size, + GFP_KERNEL | __GFP_ZERO, node_id); if (!disk->part) { kfree(disk); return NULL; } - memset(disk->part, 0, size); } disk->minors = minors; kobj_set_kset_s(disk,block_subsys); diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 11e4235d0b0c..d7cadf304168 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1829,11 +1829,11 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { request_queue_t *q; - q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); + q = kmem_cache_alloc_node(requestq_cachep, + gfp_mask | __GFP_ZERO, node_id); if (!q) return NULL; - memset(q, 0, sizeof(*q)); init_timer(&q->unplug_timer); snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index cc5801399467..5a4c5ea12f89 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1073,14 +1073,14 @@ static int init_irq (ide_hwif_t *hwif) hwgroup->hwif->next = hwif; spin_unlock_irq(&ide_lock); } else { - hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL, + hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), + GFP_KERNEL | __GFP_ZERO, hwif_to_node(hwif->drives[0].hwif)); if (!hwgroup) goto out_up; hwif->hwgroup = hwgroup; - memset(hwgroup, 0, sizeof(ide_hwgroup_t)); hwgroup->hwif = hwif->next = hwif; hwgroup->rq = NULL; hwgroup->handler = NULL; diff --git a/kernel/timer.c b/kernel/timer.c index 1258371e0d2b..b7792fb03387 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1221,7 +1221,8 @@ static int __devinit init_timers_cpu(int cpu) /* * The APs use this path later in boot */ - base = kmalloc_node(sizeof(*base), GFP_KERNEL, + base = kmalloc_node(sizeof(*base), + GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); if (!base) return -ENOMEM; @@ -1232,7 +1233,6 @@ static int __devinit init_timers_cpu(int cpu) kfree(base); return -ENOMEM; } - memset(base, 0, sizeof(*base)); per_cpu(tvec_bases, cpu) = base; } else { /* diff --git a/lib/genalloc.c b/lib/genalloc.c index eb7c2bab9ebf..f6d276db2d58 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -54,11 +54,10 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, int nbytes = sizeof(struct gen_pool_chunk) + (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; - chunk = kmalloc_node(nbytes, GFP_KERNEL, nid); + chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); if (unlikely(chunk == NULL)) return -1; - memset(chunk, 0, nbytes); spin_lock_init(&chunk->lock); chunk->start_addr = addr; chunk->end_addr = addr + size; diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index b2486cf887a0..00b02623f008 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c @@ -53,12 +53,9 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) int node = cpu_to_node(cpu); BUG_ON(pdata->ptrs[cpu]); - if (node_online(node)) { - /* FIXME: kzalloc_node(size, gfp, node) */ - pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); - if (pdata->ptrs[cpu]) - memset(pdata->ptrs[cpu], 0, size); - } else + if (node_online(node)) + pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); + else pdata->ptrs[cpu] = kzalloc(size, gfp); return pdata->ptrs[cpu]; } diff --git a/mm/mempool.c b/mm/mempool.c index 3e8f1fed0e1f..02d5ec3feabc 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -62,10 +62,9 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, int node_id) { mempool_t *pool; - pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); + pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id); if (!pool) return NULL; - memset(pool, 0, sizeof(*pool)); pool->elements = kmalloc_node(min_nr * sizeof(void *), GFP_KERNEL, node_id); if (!pool->elements) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ddf87145cc49..8e05a11155c9 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -432,11 +432,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); + pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, + PAGE_KERNEL, node); area->flags |= VM_VPAGES; } else { pages = kmalloc_node(array_size, - (gfp_mask & GFP_LEVEL_MASK), + (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO, node); } area->pages = pages; @@ -445,7 +446,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, kfree(area); return NULL; } - memset(area->pages, 0, array_size); for (i = 0; i < area->nr_pages; i++) { if (node < 0) |