diff options
author | Jeremy Fitzhardinge <jeremy@xensource.com> | 2007-07-17 18:37:04 -0700 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-07-18 08:47:41 -0700 |
commit | 5f4352fbffd6c45123dbce9e195efd54df4e177e (patch) | |
tree | e2a0316e2f2d22c266e7cae3015ddc0f2f77f64f /mm | |
parent | bdef40a6af64a0140a65df49bf504124d57094a9 (diff) | |
download | blackbird-op-linux-5f4352fbffd6c45123dbce9e195efd54df4e177e.tar.gz blackbird-op-linux-5f4352fbffd6c45123dbce9e195efd54df4e177e.zip |
Allocate and free vmalloc areas
Allocate/release a chunk of vmalloc address space:
alloc_vm_area reserves a chunk of address space, and makes sure all
the pagetables are constructed for that address range - but no pages.
free_vm_area releases the address space range.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Ian Pratt <ian.pratt@xensource.com>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Cc: "Jan Beulich" <JBeulich@novell.com>
Cc: "Andi Kleen" <ak@muc.de>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmalloc.c | 53 |
1 files changed, 53 insertions, 0 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8e05a11155c9..3130c343088f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -767,3 +767,56 @@ EXPORT_SYMBOL(remap_vmalloc_range); void __attribute__((weak)) vmalloc_sync_all(void) { } + + +static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) +{ + /* apply_to_page_range() does all the hard work. */ + return 0; +} + +/** + * alloc_vm_area - allocate a range of kernel address space + * @size: size of the area + * @returns: NULL on failure, vm_struct on success + * + * This function reserves a range of kernel address space, and + * allocates pagetables to map that range. No actual mappings + * are created. If the kernel address space is not shared + * between processes, it syncs the pagetable across all + * processes. + */ +struct vm_struct *alloc_vm_area(size_t size) +{ + struct vm_struct *area; + + area = get_vm_area(size, VM_IOREMAP); + if (area == NULL) + return NULL; + + /* + * This ensures that page tables are constructed for this region + * of kernel virtual address space and mapped into init_mm. + */ + if (apply_to_page_range(&init_mm, (unsigned long)area->addr, + area->size, f, NULL)) { + free_vm_area(area); + return NULL; + } + + /* Make sure the pagetables are constructed in process kernel + mappings */ + vmalloc_sync_all(); + + return area; +} +EXPORT_SYMBOL_GPL(alloc_vm_area); + +void free_vm_area(struct vm_struct *area) +{ + struct vm_struct *ret; + ret = remove_vm_area(area->addr); + BUG_ON(ret != area); + kfree(area); +} +EXPORT_SYMBOL_GPL(free_vm_area); |