summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorJon Tollefson <kniht@linux.vnet.ibm.com>2008-07-23 21:27:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 10:47:19 -0700
commit658013e93eb70494f7300bc90457b09a807232a4 (patch)
treed03a23ac4fff0b35138b5d8abb761e9fad85b09c /arch/powerpc/mm
parentec4b2c0c8312d1118c2acd00c89988ecf955d5cc (diff)
downloadtalos-obmc-linux-658013e93eb70494f7300bc90457b09a807232a4.tar.gz
talos-obmc-linux-658013e93eb70494f7300bc90457b09a807232a4.zip
powerpc: scan device tree for gigantic pages
The 16G huge pages have to be reserved in the HMC prior to boot. The location of the pages are placed in the device tree. This patch adds code to scan the device tree during very early boot and save these page locations until hugetlbfs is ready for them. Acked-by: Adam Litke <agl@us.ibm.com> Signed-off-by: Jon Tollefson <kniht@linux.vnet.ibm.com> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c44
-rw-r--r--arch/powerpc/mm/hugetlbpage.c16
2 files changed, 59 insertions, 1 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8d3b58ebd38e..ae4c717243a5 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -68,6 +68,7 @@
#define KB (1024)
#define MB (1024*KB)
+#define GB (1024L*MB)
/*
* Note: pte --> Linux PTE
@@ -329,6 +330,44 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
return 0;
}
+/* Scan for 16G memory blocks that have been set aside for huge pages
+ * and reserve those blocks for 16G huge pages.
+ */
+static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
+ const char *uname, int depth,
+ void *data) {
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ unsigned long *addr_prop;
+ u32 *page_count_prop;
+ unsigned int expected_pages;
+ long unsigned int phys_addr;
+ long unsigned int block_size;
+
+ /* We are scanning "memory" nodes only */
+ if (type == NULL || strcmp(type, "memory") != 0)
+ return 0;
+
+ /* This property is the log base 2 of the number of virtual pages that
+ * will represent this memory block. */
+ page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
+ if (page_count_prop == NULL)
+ return 0;
+ expected_pages = (1 << page_count_prop[0]);
+ addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
+ if (addr_prop == NULL)
+ return 0;
+ phys_addr = addr_prop[0];
+ block_size = addr_prop[1];
+ if (block_size != (16 * GB))
+ return 0;
+ printk(KERN_INFO "Huge page(16GB) memory: "
+ "addr = 0x%lX size = 0x%lX pages = %d\n",
+ phys_addr, block_size, expected_pages);
+ lmb_reserve(phys_addr, block_size * expected_pages);
+ add_gpage(phys_addr, block_size, expected_pages);
+ return 0;
+}
+
static void __init htab_init_page_sizes(void)
{
int rc;
@@ -418,7 +457,10 @@ static void __init htab_init_page_sizes(void)
);
#ifdef CONFIG_HUGETLB_PAGE
- /* Init large page size. Currently, we pick 16M or 1M depending
+ /* Reserve 16G huge page memory sections for huge pages */
+ of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
+
+/* Init large page size. Currently, we pick 16M or 1M depending
* on what is available
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5df82186fc93..e2a650a9e533 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -110,6 +110,22 @@ pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
}
#endif
+/* Build list of addresses of gigantic pages. This function is used in early
+ * boot before the buddy or bootmem allocator is setup.
+ */
+void add_gpage(unsigned long addr, unsigned long page_size,
+ unsigned long number_of_pages)
+{
+ if (!addr)
+ return;
+ while (number_of_pages > 0) {
+ gpage_freearray[nr_gpages] = addr;
+ nr_gpages++;
+ number_of_pages--;
+ addr += page_size;
+ }
+}
+
/* Moves the gigantic page addresses from the temporary list to the
* huge_boot_pages list. */
int alloc_bootmem_huge_page(struct hstate *h)
OpenPOWER on IntegriCloud