summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c15
-rw-r--r--mm/memory.c17
-rw-r--r--mm/mmap.c63
-rw-r--r--mm/mremap.c7
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmalloc.c33
9 files changed, 88 insertions, 62 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index d5fdae2eb183..4a2fee2cb62b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -29,11 +29,6 @@
#include <linux/security.h>
#include <linux/syscalls.h>
/*
- * This is needed for the following functions:
- * - try_to_release_page
- * - block_invalidatepage
- * - generic_osync_inode
- *
* FIXME: remove all knowledge of the buffer layer from the core VM
*/
#include <linux/buffer_head.h> /* for generic_osync_inode */
@@ -1009,7 +1004,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
if (pos < size) {
retval = generic_file_direct_IO(READ, iocb,
iov, pos, nr_segs);
- if (retval >= 0 && !is_sync_kiocb(iocb))
+ if (retval > 0 && !is_sync_kiocb(iocb))
retval = -EIOCBQUEUED;
if (retval > 0)
*ppos = pos + retval;
@@ -1973,6 +1968,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
do {
unsigned long index;
unsigned long offset;
+ unsigned long maxlen;
size_t copied;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
@@ -1987,7 +1983,10 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
* same page as we're writing to, without it being marked
* up-to-date.
*/
- fault_in_pages_readable(buf, bytes);
+ maxlen = cur_iov->iov_len - iov_base;
+ if (maxlen > bytes)
+ maxlen = bytes;
+ fault_in_pages_readable(buf, maxlen);
page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
if (!page) {
@@ -2029,6 +2028,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
filemap_set_next_iovec(&cur_iov,
&iov_base, status);
buf = cur_iov->iov_base + iov_base;
+ } else {
+ iov_base += status;
}
}
}
diff --git a/mm/memory.c b/mm/memory.c
index 6bad4c4064e7..d209f745db7f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1701,12 +1701,13 @@ static int do_swap_page(struct mm_struct * mm,
spin_lock(&mm->page_table_lock);
page_table = pte_offset_map(pmd, address);
if (unlikely(!pte_same(*page_table, orig_pte))) {
- pte_unmap(page_table);
- spin_unlock(&mm->page_table_lock);
- unlock_page(page);
- page_cache_release(page);
ret = VM_FAULT_MINOR;
- goto out;
+ goto out_nomap;
+ }
+
+ if (unlikely(!PageUptodate(page))) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_nomap;
}
/* The page isn't present yet, go ahead with the fault. */
@@ -1741,6 +1742,12 @@ static int do_swap_page(struct mm_struct * mm,
spin_unlock(&mm->page_table_lock);
out:
return ret;
+out_nomap:
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ unlock_page(page);
+ page_cache_release(page);
+ goto out;
}
/*
diff --git a/mm/mmap.c b/mm/mmap.c
index 01f9793591f6..de54acd9942f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1244,7 +1244,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = mm->free_area_cache;
/* make sure it can fit in the remaining address space */
- if (addr >= len) {
+ if (addr > len) {
vma = find_vma(mm, addr-len);
if (!vma || addr <= vma->vm_start)
/* remember the address as a hint for next time */
@@ -1266,7 +1266,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
- } while (len <= vma->vm_start);
+ } while (len < vma->vm_start);
/*
* A failed mmap() very likely causes application failure,
@@ -1302,37 +1302,40 @@ unsigned long
get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- if (flags & MAP_FIXED) {
- unsigned long ret;
+ unsigned long ret;
- if (addr > TASK_SIZE - len)
- return -ENOMEM;
- if (addr & ~PAGE_MASK)
- return -EINVAL;
- if (file && is_file_hugepages(file)) {
- /*
- * Check if the given range is hugepage aligned, and
- * can be made suitable for hugepages.
- */
- ret = prepare_hugepage_range(addr, len);
- } else {
- /*
- * Ensure that a normal request is not falling in a
- * reserved hugepage range. For some archs like IA-64,
- * there is a separate region for hugepages.
- */
- ret = is_hugepage_only_range(current->mm, addr, len);
- }
- if (ret)
- return -EINVAL;
- return addr;
- }
+ if (!(flags & MAP_FIXED)) {
+ unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
- if (file && file->f_op && file->f_op->get_unmapped_area)
- return file->f_op->get_unmapped_area(file, addr, len,
- pgoff, flags);
+ get_area = current->mm->get_unmapped_area;
+ if (file && file->f_op && file->f_op->get_unmapped_area)
+ get_area = file->f_op->get_unmapped_area;
+ addr = get_area(file, addr, len, pgoff, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+ }
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ if (addr > TASK_SIZE - len)
+ return -ENOMEM;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (file && is_file_hugepages(file)) {
+ /*
+ * Check if the given range is hugepage aligned, and
+ * can be made suitable for hugepages.
+ */
+ ret = prepare_hugepage_range(addr, len);
+ } else {
+ /*
+ * Ensure that a normal request is not falling in a
+ * reserved hugepage range. For some archs like IA-64,
+ * there is a separate region for hugepages.
+ */
+ ret = is_hugepage_only_range(current->mm, addr, len);
+ }
+ if (ret)
+ return -EINVAL;
+ return addr;
}
EXPORT_SYMBOL(get_unmapped_area);
diff --git a/mm/mremap.c b/mm/mremap.c
index 0dd7ace94e51..ec7238a78f36 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -224,6 +224,12 @@ static unsigned long move_vma(struct vm_area_struct *vma,
split = 1;
}
+ /*
+ * if we failed to move page tables we still do total_vm increment
+ * since do_munmap() will decrement it by old_len == new_len
+ */
+ mm->total_vm += new_len >> PAGE_SHIFT;
+
if (do_munmap(mm, old_addr, old_len) < 0) {
/* OOM: unable to split vma, just get accounts right */
vm_unacct_memory(excess >> PAGE_SHIFT);
@@ -237,7 +243,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
vma->vm_next->vm_flags |= VM_ACCOUNT;
}
- mm->total_vm += new_len >> PAGE_SHIFT;
__vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
mm->locked_vm += new_len >> PAGE_SHIFT;
diff --git a/mm/nommu.c b/mm/nommu.c
index b293ec1cc4e6..c53e9c8f6b4a 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -150,7 +150,8 @@ void vfree(void *addr)
kfree(addr);
}
-void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
+void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
+ pgprot_t prot)
{
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc1b1064c505..b1061b1962f8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -43,7 +43,9 @@
* initializer cleaner
*/
nodemask_t node_online_map = { { [0] = 1UL } };
+EXPORT_SYMBOL(node_online_map);
nodemask_t node_possible_map = NODE_MASK_ALL;
+EXPORT_SYMBOL(node_possible_map);
struct pglist_data *pgdat_list;
unsigned long totalram_pages;
unsigned long totalhigh_pages;
diff --git a/mm/rmap.c b/mm/rmap.c
index 378de234c12b..9827409eb7c7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -586,7 +586,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
dec_mm_counter(mm, anon_rss);
}
- inc_mm_counter(mm, rss);
+ dec_mm_counter(mm, rss);
page_remove_rmap(page);
page_cache_release(page);
@@ -626,7 +626,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
- pte_t *pte;
+ pte_t *pte, *original_pte;
pte_t pteval;
struct page *page;
unsigned long address;
@@ -658,7 +658,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
if (!pmd_present(*pmd))
goto out_unlock;
- for (pte = pte_offset_map(pmd, address);
+ for (original_pte = pte = pte_offset_map(pmd, address);
address < end; pte++, address += PAGE_SIZE) {
if (!pte_present(*pte))
@@ -694,7 +694,7 @@ static void try_to_unmap_cluster(unsigned long cursor,
(*mapcount)--;
}
- pte_unmap(pte);
+ pte_unmap(original_pte);
out_unlock:
spin_unlock(&mm->page_table_lock);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a60e0075d55b..da48405cd9a3 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -79,7 +79,7 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page)
WARN_ON(page_count(page) <= 1);
bdi = bdev->bd_inode->i_mapping->backing_dev_info;
- bdi->unplug_io_fn(bdi, page);
+ blk_run_backing_dev(bdi, page);
}
up_read(&swap_unplug_sem);
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2bd83e5c2bbf..8ff16a1eee6a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
}
-/**
- * remove_vm_area - find and remove a contingous kernel virtual area
- *
- * @addr: base address
- *
- * Search for the kernel VM area starting at @addr, and remove it.
- * This function returns the found VM area, but using it is NOT safe
- * on SMP machines.
- */
-struct vm_struct *remove_vm_area(void *addr)
+/* Caller must hold vmlist_lock */
+struct vm_struct *__remove_vm_area(void *addr)
{
struct vm_struct **p, *tmp;
- write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
if (tmp->addr == addr)
goto found;
}
- write_unlock(&vmlist_lock);
return NULL;
found:
unmap_vm_area(tmp);
*p = tmp->next;
- write_unlock(&vmlist_lock);
/*
* Remove the guard page.
@@ -281,6 +270,24 @@ found:
return tmp;
}
+/**
+ * remove_vm_area - find and remove a contingous kernel virtual area
+ *
+ * @addr: base address
+ *
+ * Search for the kernel VM area starting at @addr, and remove it.
+ * This function returns the found VM area, but using it is NOT safe
+ * on SMP machines, except for its size or flags.
+ */
+struct vm_struct *remove_vm_area(void *addr)
+{
+ struct vm_struct *v;
+ write_lock(&vmlist_lock);
+ v = __remove_vm_area(addr);
+ write_unlock(&vmlist_lock);
+ return v;
+}
+
void __vunmap(void *addr, int deallocate_pages)
{
struct vm_struct *area;
OpenPOWER on IntegriCloud