summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-05-12 10:48:52 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-05-12 10:48:52 -0400
commit7d63b54a65ce902f9aaa8efe8192aa3b983264d4 (patch)
tree250a77bebe92cbd6edac70a649866044295876db /mm
parentfd88de569b802c4a04aaa6ee74667775f4aed8c6 (diff)
parentd8c3291c73b958243b33f8509d4507e76dafd055 (diff)
downloadtalos-obmc-linux-7d63b54a65ce902f9aaa8efe8192aa3b983264d4.tar.gz
talos-obmc-linux-7d63b54a65ce902f9aaa8efe8192aa3b983264d4.zip
Merge branch 'master'
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c32
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/slab.c3
-rw-r--r--mm/sparse.c9
5 files changed, 56 insertions, 5 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 1120338a5d0f..82c448898d05 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
return ret;
}
+/**
+ * find_get_pages_contig - gang contiguous pagecache lookup
+ * @mapping: The address_space to search
+ * @index: The starting page index
+ * @nr_pages: The maximum number of pages
+ * @pages: Where the resulting pages are placed
+ *
+ * find_get_pages_contig() works exactly like find_get_pages(), except
+ * that the returned number of pages are guaranteed to be contiguous.
+ *
+ * find_get_pages_contig() returns the number of pages which were found.
+ */
+unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
+ unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+ unsigned int ret;
+
+ read_lock_irq(&mapping->tree_lock);
+ ret = radix_tree_gang_lookup(&mapping->page_tree,
+ (void **)pages, index, nr_pages);
+ for (i = 0; i < ret; i++) {
+ if (pages[i]->mapping == NULL || pages[i]->index != index)
+ break;
+
+ page_cache_get(pages[i]);
+ index++;
+ }
+ read_unlock_irq(&mapping->tree_lock);
+ return i;
+}
+
/*
* Like find_get_pages, except we only return pages which are tagged with
* `tag'. We update *index to index the next page for the traversal.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 1fe76d963ac2..1ae2b2cc3a54 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -69,12 +69,16 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
err = __add_section(zone, phys_start_pfn + i);
- if (err)
+ /* We want to keep adding the rest of the
+ * sections if the first ones already exist
+ */
+ if (err && (err != -EEXIST))
break;
}
return err;
}
+EXPORT_SYMBOL_GPL(__add_pages);
static void grow_zone_span(struct zone *zone,
unsigned long start_pfn, unsigned long end_pfn)
diff --git a/mm/migrate.c b/mm/migrate.c
index d444229f2599..1c25040693d2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -439,6 +439,17 @@ redo:
goto unlock_both;
}
+ /* Make sure the dirty bit is up to date */
+ if (try_to_unmap(page, 1) == SWAP_FAIL) {
+ rc = -EPERM;
+ goto unlock_both;
+ }
+
+ if (page_mapcount(page)) {
+ rc = -EAGAIN;
+ goto unlock_both;
+ }
+
/*
* Default handling if a filesystem does not provide
* a migration function. We can only migrate clean
diff --git a/mm/slab.c b/mm/slab.c
index af5c5237e11a..c32af7e7581e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -979,7 +979,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
* That way we could avoid the overhead of putting the objects
* into the free lists and getting them back later.
*/
- transfer_objects(rl3->shared, ac, ac->limit);
+ if (rl3->shared)
+ transfer_objects(rl3->shared, ac, ac->limit);
free_block(cachep, ac->entry, ac->avail, node);
ac->avail = 0;
diff --git a/mm/sparse.c b/mm/sparse.c
index 0a51f36ba3a1..d7c32de99ee8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -32,7 +32,10 @@ static struct mem_section *sparse_index_alloc(int nid)
unsigned long array_size = SECTIONS_PER_ROOT *
sizeof(struct mem_section);
- section = alloc_bootmem_node(NODE_DATA(nid), array_size);
+ if (system_state == SYSTEM_RUNNING)
+ section = kmalloc_node(array_size, GFP_KERNEL, nid);
+ else
+ section = alloc_bootmem_node(NODE_DATA(nid), array_size);
if (section)
memset(section, 0, array_size);
@@ -281,9 +284,9 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
ret = sparse_init_one_section(ms, section_nr, memmap);
- if (ret <= 0)
- __kfree_section_memmap(memmap, nr_pages);
out:
pgdat_resize_unlock(pgdat, &flags);
+ if (ret <= 0)
+ __kfree_section_memmap(memmap, nr_pages);
return ret;
}
OpenPOWER on IntegriCloud