summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 18:31:20 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:17 -0800
commit98c5584cfc47932c4f3ccf5eee2e0bae1447b85e (patch)
treec067ac8bfc081bbe0b3073374cb15708458e04ab /include/asm-sparc64
parent09f94287f7260e03bbeab497e743691fafcc22c3 (diff)
downloadblackbird-op-linux-98c5584cfc47932c4f3ccf5eee2e0bae1447b85e.tar.gz
blackbird-op-linux-98c5584cfc47932c4f3ccf5eee2e0bae1447b85e.zip
[SPARC64]: Add infrastructure for dynamic TSB sizing.
This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/mmu.h13
-rw-r--r--include/asm-sparc64/mmu_context.h15
-rw-r--r--include/asm-sparc64/tsb.h2
3 files changed, 24 insertions, 6 deletions
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 36384cf7faa6..2effeba2476c 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -90,9 +90,20 @@
#ifndef __ASSEMBLY__
+#define TSB_ENTRY_ALIGNMENT 16
+
+struct tsb {
+ unsigned long tag;
+ unsigned long pte;
+} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
+
typedef struct {
unsigned long sparc64_ctx_val;
- unsigned long *sparc64_tsb;
+ struct tsb *tsb;
+ unsigned long tsb_nentries;
+ unsigned long tsb_reg_val;
+ unsigned long tsb_map_vaddr;
+ unsigned long tsb_map_pte;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 0dffb4ce8a1d..0a950f151d2b 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -22,7 +22,15 @@ extern void get_new_mmu_context(struct mm_struct *mm);
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
-extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
+extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg,
+ unsigned long tsb_vaddr, unsigned long tsb_pte);
+
+static inline void tsb_context_switch(struct mm_struct *mm)
+{
+ __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
+ mm->context.tsb_map_vaddr,
+ mm->context.tsb_map_pte);
+}
/* Set MMU context in the actual hardware. */
#define load_secondary_context(__mm) \
@@ -52,8 +60,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
if (!ctx_valid || (old_mm != mm)) {
load_secondary_context(mm);
- tsb_context_switch(__pa(mm->pgd),
- mm->context.sparc64_tsb);
+ tsb_context_switch(mm);
}
/* Even if (mm == old_mm) we _must_ check
@@ -91,7 +98,7 @@ static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm
load_secondary_context(mm);
__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
- tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb);
+ tsb_context_switch(mm);
}
#endif /* !(__ASSEMBLY__) */
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 03d272e0e477..1f93b7d8cdbc 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -19,7 +19,7 @@
* stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN
* retry
*
-
+ *
* Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
* PTE. The TAG is of the same layout as the TLB TAG TARGET mmu
* register which is:
OpenPOWER on IntegriCloud