diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/fremap.c | 3 | ||||
-rw-r--r-- | mm/highmem.c | 2 | ||||
-rw-r--r-- | mm/madvise.c | 11 | ||||
-rw-r--r-- | mm/mempolicy.c | 8 | ||||
-rw-r--r-- | mm/mempool.c | 6 | ||||
-rw-r--r-- | mm/nommu.c | 3 | ||||
-rw-r--r-- | mm/oom_kill.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 12 | ||||
-rw-r--r-- | mm/page_io.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 34 | ||||
-rw-r--r-- | mm/swap_state.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 4 |
13 files changed, 44 insertions, 48 deletions
diff --git a/mm/fremap.c b/mm/fremap.c index 3235fb77c133..ab23a0673c35 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -89,6 +89,9 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (!page->mapping || page->index >= size) goto err_unlock; + err = -ENOMEM; + if (page_mapcount(page) > INT_MAX/2) + goto err_unlock; zap_pte(mm, vma, addr, pte); diff --git a/mm/highmem.c b/mm/highmem.c index 400911599468..90e1861e2da0 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -30,7 +30,7 @@ static mempool_t *page_pool, *isa_page_pool; -static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data) +static void *page_pool_alloc(gfp_t gfp_mask, void *data) { unsigned int gfp = gfp_mask | (unsigned int) (long) data; diff --git a/mm/madvise.c b/mm/madvise.c index 4454936f87d1..20e075d1c64c 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -83,6 +83,9 @@ static long madvise_willneed(struct vm_area_struct * vma, { struct file *file = vma->vm_file; + if (!file) + return -EBADF; + if (file->f_mapping->a_ops->get_xip_page) { /* no bad return value, but ignore advice */ return 0; @@ -141,11 +144,7 @@ static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { - struct file *filp = vma->vm_file; - long error = -EBADF; - - if (!filp) - goto out; + long error; switch (behavior) { case MADV_NORMAL: @@ -166,8 +165,6 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, error = -EINVAL; break; } - -out: return error; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9033f0859aa8..37af443eb094 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -687,7 +687,7 @@ get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned lo } /* Return a zonelist representing a mempolicy */ -static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy) +static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) { int nd; @@ -751,7 +751,7 @@ static unsigned offset_il_node(struct mempolicy *pol, /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ -static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid) +static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { struct zonelist *zl; struct page *page; @@ -789,7 +789,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or * Should be called with the mm_sem of the vma hold. */ struct page * -alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr) +alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = get_vma_policy(current, vma, addr); @@ -832,7 +832,7 @@ alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned l * 1) it's ok to take cpuset_sem (can WAIT), and * 2) allocating for current task (not interrupt). */ -struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order) +struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct mempolicy *pol = current->mempolicy; diff --git a/mm/mempool.c b/mm/mempool.c index 65f2957b8d51..9e377ea700b2 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -112,7 +112,7 @@ EXPORT_SYMBOL(mempool_create_node); * while this function is running. mempool_alloc() & mempool_free() * might be called (eg. from IRQ contexts) while this function executes. */ -int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask) +int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) { void *element; void **new_elements; @@ -200,7 +200,7 @@ EXPORT_SYMBOL(mempool_destroy); * *never* fails when called from process contexts. (it might * fail if called from an IRQ context.) */ -void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) +void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; @@ -276,7 +276,7 @@ EXPORT_SYMBOL(mempool_free); /* * A commonly used alloc and free fn. */ -void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data) +void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) { kmem_cache_t *mem = (kmem_cache_t *) pool_data; return kmem_cache_alloc(mem, gfp_mask); diff --git a/mm/nommu.c b/mm/nommu.c index 064d70442895..0ef241ae3763 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -157,8 +157,7 @@ void vfree(void *addr) kfree(addr); } -void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, - pgprot_t prot) +void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { /* * kmalloc doesn't like __GFP_HIGHMEM for some reason diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ac3bf33e5370..d348b9035955 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -263,7 +263,7 @@ static struct mm_struct *oom_kill_process(struct task_struct *p) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ -void out_of_memory(unsigned int __nocast gfp_mask, int order) +void out_of_memory(gfp_t gfp_mask, int order) { struct mm_struct *mm = NULL; task_t * p; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ae2903339e71..cc1fe2672a31 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -671,7 +671,7 @@ void fastcall free_cold_page(struct page *page) free_hot_cold_page(page, 1); } -static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags) +static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) { int i; @@ -686,7 +686,7 @@ static inline void prep_zero_page(struct page *page, int order, unsigned int __n * or two. */ static struct page * -buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags) +buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) { unsigned long flags; struct page *page = NULL; @@ -761,7 +761,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, } static inline int -should_reclaim_zone(struct zone *z, unsigned int gfp_mask) +should_reclaim_zone(struct zone *z, gfp_t gfp_mask) { if (!z->reclaim_pages) return 0; @@ -774,7 +774,7 @@ should_reclaim_zone(struct zone *z, unsigned int gfp_mask) * This is the 'heart' of the zoned buddy allocator. */ struct page * fastcall -__alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, +__alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) { const int wait = gfp_mask & __GFP_WAIT; @@ -977,7 +977,7 @@ EXPORT_SYMBOL(__alloc_pages); /* * Common helper functions. */ -fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order) +fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page * page; page = alloc_pages(gfp_mask, order); @@ -988,7 +988,7 @@ fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned EXPORT_SYMBOL(__get_free_pages); -fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask) +fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) { struct page * page; diff --git a/mm/page_io.c b/mm/page_io.c index 2e605a19ce57..330e00d6db00 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -19,7 +19,7 @@ #include <linux/writeback.h> #include <asm/pgtable.h> -static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index, +static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, struct page *page, bio_end_io_t end_io) { struct bio *bio; diff --git a/mm/shmem.c b/mm/shmem.c index 1f7aeb210c7b..ea064d89cda9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -921,8 +921,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) } static inline struct page * -shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info, - unsigned long idx) +shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) { return alloc_page(gfp | __GFP_ZERO); } diff --git a/mm/slab.c b/mm/slab.c index 5cbbdfa6dd0e..d05c678bceb3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -650,8 +650,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) return cachep->array[smp_processor_id()]; } -static inline kmem_cache_t *__find_general_cachep(size_t size, - unsigned int __nocast gfpflags) +static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) { struct cache_sizes *csizep = malloc_sizes; @@ -675,8 +674,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, return csizep->cs_cachep; } -kmem_cache_t *kmem_find_general_cachep(size_t size, - unsigned int __nocast gfpflags) +kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) { return __find_general_cachep(size, gfpflags); } @@ -1185,7 +1183,7 @@ __initcall(cpucache_init); * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ -static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) +static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) { struct page *page; void *addr; @@ -2048,7 +2046,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); /* Get the memory for a slab management obj. */ static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, - int colour_off, unsigned int __nocast local_flags) + int colour_off, gfp_t local_flags) { struct slab *slabp; @@ -2149,7 +2147,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) +static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) { struct slab *slabp; void *objp; @@ -2356,7 +2354,7 @@ bad: #define check_slabp(x,y) do { } while(0) #endif -static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) +static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) { int batchcount; struct kmem_list3 *l3; @@ -2456,7 +2454,7 @@ alloc_done: } static inline void -cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) +cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) { might_sleep_if(flags & __GFP_WAIT); #if DEBUG @@ -2467,7 +2465,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) #if DEBUG static void * cache_alloc_debugcheck_after(kmem_cache_t *cachep, - unsigned int __nocast flags, void *objp, void *caller) + gfp_t flags, void *objp, void *caller) { if (!objp) return objp; @@ -2510,7 +2508,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif -static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) +static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) { void* objp; struct array_cache *ac; @@ -2528,7 +2526,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast return objp; } -static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) +static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) { unsigned long save_flags; void* objp; @@ -2787,7 +2785,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) * Allocate an object from this cache. The flags are only relevant * if the cache has no available objects. */ -void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) +void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) { return __cache_alloc(cachep, flags); } @@ -2848,7 +2846,7 @@ out: * New and improved: it will now make sure that the object gets * put on the correct node list so that there is no false sharing. */ -void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) +void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) { unsigned long save_flags; void *ptr; @@ -2875,7 +2873,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i } EXPORT_SYMBOL(kmem_cache_alloc_node); -void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) +void *kmalloc_node(size_t size, gfp_t flags, int node) { kmem_cache_t *cachep; @@ -2908,7 +2906,7 @@ EXPORT_SYMBOL(kmalloc_node); * platforms. For example, on i386, it means that the memory must come * from the first 16MB. */ -void *__kmalloc(size_t size, unsigned int __nocast flags) +void *__kmalloc(size_t size, gfp_t flags) { kmem_cache_t *cachep; @@ -2997,7 +2995,7 @@ EXPORT_SYMBOL(kmem_cache_free); * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ -void *kzalloc(size_t size, unsigned int __nocast flags) +void *kzalloc(size_t size, gfp_t flags) { void *ret = kmalloc(size, flags); if (ret) @@ -3603,7 +3601,7 @@ unsigned int ksize(const void *objp) * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory */ -char *kstrdup(const char *s, unsigned int __nocast gfp) +char *kstrdup(const char *s, gfp_t gfp) { size_t len; char *buf; diff --git a/mm/swap_state.c b/mm/swap_state.c index adbc2b426c2f..132164f7d0a7 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -68,7 +68,7 @@ void show_swap_cache_info(void) * but sets SwapCache flag and private instead of mapping and index. */ static int __add_to_swap_cache(struct page *page, swp_entry_t entry, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { int error; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 13c3d82968ae..1150229b6366 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -395,7 +395,7 @@ void *vmap(struct page **pages, unsigned int count, EXPORT_SYMBOL(vmap); -void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) +void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) { struct page **pages; unsigned int nr_pages, array_size, i; @@ -446,7 +446,7 @@ fail: * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ -void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) +void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { struct vm_struct *area; |