summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-19 19:34:16 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-24 13:47:34 +1100
commit757c74d298dc8438760b8dea275c4c6e0ac8a77f (patch)
tree77c694ad28230b3714c039fe54ab5497febfd4ac /arch
parenta033a487f8ae79800a15774cb6565cbbca685fc6 (diff)
downloadblackbird-op-linux-757c74d298dc8438760b8dea275c4c6e0ac8a77f.tar.gz
blackbird-op-linux-757c74d298dc8438760b8dea275c4c6e0ac8a77f.zip
powerpc/mm: Introduce early_init_mmu() on 64-bit
This moves some MMU related init code out of setup_64.c into hash_utils_64.c and calls it early_init_mmu() and early_init_mmu_secondary(). This will make it easier to plug in a new MMU type. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h4
-rw-r--r--arch/powerpc/kernel/setup_64.c35
-rw-r--r--arch/powerpc/mm/hash_utils_64.c36
4 files changed, 43 insertions, 34 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 68b752626808..98c104a09961 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -284,8 +284,6 @@ extern void add_gpage(unsigned long addr, unsigned long page_size,
unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
-extern void htab_initialize(void);
-extern void htab_initialize_secondary(void);
extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
extern void hpte_init_iSeries(void);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index c073de4af849..cbf154387091 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -62,6 +62,10 @@ static inline int mmu_has_feature(unsigned long feature)
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
+/* MMU initialization (64-bit only fo now) */
+extern void early_init_mmu(void);
+extern void early_init_mmu_secondary(void);
+
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 73e16e298e28..c410c606955d 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -202,8 +202,6 @@ void __init early_setup(unsigned long dt_ptr)
/* Fix up paca fields required for the boot cpu */
get_paca()->cpu_start = 1;
- get_paca()->stab_real = __pa((u64)&initial_stab);
- get_paca()->stab_addr = (u64)&initial_stab;
/* Probe the machine type */
probe_machine();
@@ -212,20 +210,8 @@ void __init early_setup(unsigned long dt_ptr)
DBG("Found, Initializing memory management...\n");
- /*
- * Initialize the MMU Hash table and create the linear mapping
- * of memory. Has to be done before stab/slb initialization as
- * this is currently where the page size encoding is obtained
- */
- htab_initialize();
-
- /*
- * Initialize stab / SLB management except on iSeries
- */
- if (cpu_has_feature(CPU_FTR_SLB))
- slb_initialize();
- else if (!firmware_has_feature(FW_FEATURE_ISERIES))
- stab_initialize(get_paca()->stab_real);
+ /* Initialize the hash table or TLB handling */
+ early_init_mmu();
DBG(" <- early_setup()\n");
}
@@ -233,22 +219,11 @@ void __init early_setup(unsigned long dt_ptr)
#ifdef CONFIG_SMP
void early_setup_secondary(void)
{
- struct paca_struct *lpaca = get_paca();
-
/* Mark interrupts enabled in PACA */
- lpaca->soft_enabled = 0;
+ get_paca()->soft_enabled = 0;
- /* Initialize hash table for that CPU */
- htab_initialize_secondary();
-
- /* Initialize STAB/SLB. We use a virtual address as it works
- * in real mode on pSeries and we want a virutal address on
- * iSeries anyway
- */
- if (cpu_has_feature(CPU_FTR_SLB))
- slb_initialize();
- else
- stab_initialize(lpaca->stab_addr);
+ /* Initialize the hash table or TLB handling */
+ early_init_mmu_secondary();
}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 86c00c885e68..db556d25c3a7 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -590,7 +590,7 @@ static void __init htab_finish_init(void)
make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
}
-void __init htab_initialize(void)
+static void __init htab_initialize(void)
{
unsigned long table;
unsigned long pteg_count;
@@ -732,11 +732,43 @@ void __init htab_initialize(void)
#undef KB
#undef MB
-void htab_initialize_secondary(void)
+void __init early_init_mmu(void)
{
+ /* Setup initial STAB address in the PACA */
+ get_paca()->stab_real = __pa((u64)&initial_stab);
+ get_paca()->stab_addr = (u64)&initial_stab;
+
+ /* Initialize the MMU Hash table and create the linear mapping
+ * of memory. Has to be done before stab/slb initialization as
+ * this is currently where the page size encoding is obtained
+ */
+ htab_initialize();
+
+ /* Initialize stab / SLB management except on iSeries
+ */
+ if (cpu_has_feature(CPU_FTR_SLB))
+ slb_initialize();
+ else if (!firmware_has_feature(FW_FEATURE_ISERIES))
+ stab_initialize(get_paca()->stab_real);
+}
+
+#ifdef CONFIG_SMP
+void __init early_init_mmu_secondary(void)
+{
+ /* Initialize hash table for that CPU */
if (!firmware_has_feature(FW_FEATURE_LPAR))
mtspr(SPRN_SDR1, _SDR1);
+
+ /* Initialize STAB/SLB. We use a virtual address as it works
+ * in real mode on pSeries and we want a virutal address on
+ * iSeries anyway
+ */
+ if (cpu_has_feature(CPU_FTR_SLB))
+ slb_initialize();
+ else
+ stab_initialize(get_paca()->stab_addr);
}
+#endif /* CONFIG_SMP */
/*
* Called by asm hashtable.S for doing lazy icache flush
OpenPOWER on IntegriCloud