summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:56:57 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:56:57 -0800
commitd04ef3a795b3b7b376a02713ed5e211e9ae1f917 (patch)
tree837da034751a2fc1be0fc5a105c218d41a498eb6 /arch/sparc64/mm
parent36177ba655c238e33400cc2837a28720b62784bd (diff)
parentdcc1e8dd88d4bc55e32a26dad7633d20ffe606d2 (diff)
downloadblackbird-op-linux-d04ef3a795b3b7b376a02713ed5e211e9ae1f917.tar.gz
blackbird-op-linux-d04ef3a795b3b7b376a02713ed5e211e9ae1f917.zip
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: [SPARC64]: Add a secondary TSB for hugepage mappings. [SPARC]: Respect vm_page_prot in io_remap_page_range().
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/fault.c15
-rw-r--r--arch/sparc64/mm/generic.c1
-rw-r--r--arch/sparc64/mm/hugetlbpage.c28
-rw-r--r--arch/sparc64/mm/init.c21
-rw-r--r--arch/sparc64/mm/tsb.c234
5 files changed, 197 insertions, 102 deletions
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 63b6cc0cd5d5..d21ff3230c02 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -410,9 +410,18 @@ good_area:
up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm);
- if (unlikely(mm_rss >= mm->context.tsb_rss_limit))
- tsb_grow(mm, mm_rss);
-
+#ifdef CONFIG_HUGETLB_PAGE
+ mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+#endif
+ if (unlikely(mm_rss >=
+ mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
+ tsb_grow(mm, MM_TSB_BASE, mm_rss);
+#ifdef CONFIG_HUGETLB_PAGE
+ mm_rss = mm->context.huge_pte_count;
+ if (unlikely(mm_rss >=
+ mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
+ tsb_grow(mm, MM_TSB_HUGE, mm_rss);
+#endif
return;
/*
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 5fc5c579e35e..8cb06205d265 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -140,7 +140,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
vma->vm_pgoff = phys_base >> PAGE_SHIFT;
- prot = __pgprot(pg_iobits);
offset -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 280dc7958a13..074620d413d4 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -199,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- if (pgd) {
- pud = pud_offset(pgd, addr);
- if (pud) {
- pmd = pmd_alloc(mm, pud, addr);
- if (pmd)
- pte = pte_alloc_map(mm, pmd, addr);
- }
+ pud = pud_alloc(mm, pgd, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ pte = pte_alloc_map(mm, pmd, addr);
}
return pte;
}
@@ -231,13 +229,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte;
}
-#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
-
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
int i;
+ if (!pte_present(*ptep) && pte_present(entry))
+ mm->context.huge_pte_count++;
+
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
set_pte_at(mm, addr, ptep, entry);
ptep++;
@@ -253,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
int i;
entry = *ptep;
+ if (pte_present(entry))
+ mm->context.huge_pte_count--;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
pte_clear(mm, addr, ptep);
@@ -290,6 +291,15 @@ static void context_reload(void *__data)
void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
+ struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
+
+ if (likely(tp->tsb != NULL))
+ return;
+
+ tsb_grow(mm, MM_TSB_HUGE, 0);
+ tsb_context_switch(mm);
+ smp_tsb_sync(mm);
+
/* On UltraSPARC-III+ and later, configure the second half of
* the Data-TLB for huge pages.
*/
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 2ae143ba50d8..ded63ee9c4fd 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -283,6 +283,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
struct mm_struct *mm;
struct tsb *tsb;
unsigned long tag, flags;
+ unsigned long tsb_index, tsb_hash_shift;
if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
@@ -312,10 +313,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
mm = vma->vm_mm;
+ tsb_index = MM_TSB_BASE;
+ tsb_hash_shift = PAGE_SHIFT;
+
spin_lock_irqsave(&mm->context.lock, flags);
- tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
- (mm->context.tsb_nentries - 1UL)];
+#ifdef CONFIG_HUGETLB_PAGE
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
+ if ((tlb_type == hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
+ (tlb_type != hypervisor &&
+ (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
+ tsb_index = MM_TSB_HUGE;
+ tsb_hash_shift = HPAGE_SHIFT;
+ }
+ }
+#endif
+
+ tsb = mm->context.tsb_block[tsb_index].tsb;
+ tsb += ((address >> tsb_hash_shift) &
+ (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
tag = (address >> 22UL);
tsb_insert(tsb, tag, pte_val(pte));
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index b2064e2a44d6..beaa02810f0e 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -15,9 +15,9 @@
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
-static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
+static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
{
- vaddr >>= PAGE_SHIFT;
+ vaddr >>= hash_shift;
return vaddr & (nentries - 1);
}
@@ -36,7 +36,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
unsigned long v;
for (v = start; v < end; v += PAGE_SIZE) {
- unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
+ unsigned long hash = tsb_hash(v, PAGE_SHIFT,
+ KERNEL_TSB_NENTRIES);
struct tsb *ent = &swapper_tsb[hash];
if (tag_compare(ent->tag, v)) {
@@ -46,49 +47,91 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
}
}
-void flush_tsb_user(struct mmu_gather *mp)
+static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
{
- struct mm_struct *mm = mp->mm;
- unsigned long nentries, base, flags;
- struct tsb *tsb;
- int i;
-
- spin_lock_irqsave(&mm->context.lock, flags);
-
- tsb = mm->context.tsb;
- nentries = mm->context.tsb_nentries;
+ unsigned long i;
- if (tlb_type == cheetah_plus || tlb_type == hypervisor)
- base = __pa(tsb);
- else
- base = (unsigned long) tsb;
-
for (i = 0; i < mp->tlb_nr; i++) {
unsigned long v = mp->vaddrs[i];
unsigned long tag, ent, hash;
v &= ~0x1UL;
- hash = tsb_hash(v, nentries);
- ent = base + (hash * sizeof(struct tsb));
+ hash = tsb_hash(v, hash_shift, nentries);
+ ent = tsb + (hash * sizeof(struct tsb));
tag = (v >> 22UL);
tsb_flush(ent, tag);
}
+}
+
+void flush_tsb_user(struct mmu_gather *mp)
+{
+ struct mm_struct *mm = mp->mm;
+ unsigned long nentries, base, flags;
+
+ spin_lock_irqsave(&mm->context.lock, flags);
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
+
+#ifdef CONFIG_HUGETLB_PAGE
+ if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+ base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+ base = __pa(base);
+ __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
+ }
+#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
}
-static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
+#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
+#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
+#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
+#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
+#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
+#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K
+#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K
+#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
+#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB
+#else
+#error Broken base page size setting...
+#endif
+
+#ifdef CONFIG_HUGETLB_PAGE
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
+#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
+#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
+#else
+#error Broken huge page size setting...
+#endif
+#endif
+
+static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
{
unsigned long tsb_reg, base, tsb_paddr;
unsigned long page_sz, tte;
- mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
+ mm->context.tsb_block[tsb_idx].tsb_nentries =
+ tsb_bytes / sizeof(struct tsb);
base = TSBMAP_BASE;
tte = pgprot_val(PAGE_KERNEL_LOCKED);
- tsb_paddr = __pa(mm->context.tsb);
+ tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
/* Use the smallest page size that can map the whole TSB
@@ -147,61 +190,49 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
/* Physical mapping, no locked TLB entry for TSB. */
tsb_reg |= tsb_paddr;
- mm->context.tsb_reg_val = tsb_reg;
- mm->context.tsb_map_vaddr = 0;
- mm->context.tsb_map_pte = 0;
+ mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+ mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
+ mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
} else {
tsb_reg |= base;
tsb_reg |= (tsb_paddr & (page_sz - 1UL));
tte |= (tsb_paddr & ~(page_sz - 1UL));
- mm->context.tsb_reg_val = tsb_reg;
- mm->context.tsb_map_vaddr = base;
- mm->context.tsb_map_pte = tte;
+ mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+ mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
+ mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
}
/* Setup the Hypervisor TSB descriptor. */
if (tlb_type == hypervisor) {
- struct hv_tsb_descr *hp = &mm->context.tsb_descr;
+ struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
- switch (PAGE_SIZE) {
- case 8192:
- default:
- hp->pgsz_idx = HV_PGSZ_IDX_8K;
+ switch (tsb_idx) {
+ case MM_TSB_BASE:
+ hp->pgsz_idx = HV_PGSZ_IDX_BASE;
break;
-
- case 64 * 1024:
- hp->pgsz_idx = HV_PGSZ_IDX_64K;
- break;
-
- case 512 * 1024:
- hp->pgsz_idx = HV_PGSZ_IDX_512K;
- break;
-
- case 4 * 1024 * 1024:
- hp->pgsz_idx = HV_PGSZ_IDX_4MB;
+#ifdef CONFIG_HUGETLB_PAGE
+ case MM_TSB_HUGE:
+ hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
break;
+#endif
+ default:
+ BUG();
};
hp->assoc = 1;
hp->num_ttes = tsb_bytes / 16;
hp->ctx_idx = 0;
- switch (PAGE_SIZE) {
- case 8192:
- default:
- hp->pgsz_mask = HV_PGSZ_MASK_8K;
- break;
-
- case 64 * 1024:
- hp->pgsz_mask = HV_PGSZ_MASK_64K;
- break;
-
- case 512 * 1024:
- hp->pgsz_mask = HV_PGSZ_MASK_512K;
+ switch (tsb_idx) {
+ case MM_TSB_BASE:
+ hp->pgsz_mask = HV_PGSZ_MASK_BASE;
break;
-
- case 4 * 1024 * 1024:
- hp->pgsz_mask = HV_PGSZ_MASK_4MB;
+#ifdef CONFIG_HUGETLB_PAGE
+ case MM_TSB_HUGE:
+ hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
break;
+#endif
+ default:
+ BUG();
};
hp->tsb_base = tsb_paddr;
hp->resv = 0;
@@ -241,11 +272,11 @@ void __init tsb_cache_init(void)
}
}
-/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
- * do_sparc64_fault() invokes this routine to try and grow the TSB.
+/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
+ * do_sparc64_fault() invokes this routine to try and grow it.
*
* When we reach the maximum TSB size supported, we stick ~0UL into
- * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
+ * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
* will not trigger any longer.
*
* The TSB can be anywhere from 8K to 1MB in size, in increasing powers
@@ -257,7 +288,7 @@ void __init tsb_cache_init(void)
* the number of entries that the current TSB can hold at once. Currently,
* we trigger when the RSS hits 3/4 of the TSB capacity.
*/
-void tsb_grow(struct mm_struct *mm, unsigned long rss)
+void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
{
unsigned long max_tsb_size = 1 * 1024 * 1024;
unsigned long new_size, old_size, flags;
@@ -297,7 +328,8 @@ retry_tsb_alloc:
* down to a 0-order allocation and force no TSB
* growing for this address space.
*/
- if (mm->context.tsb == NULL && new_cache_index > 0) {
+ if (mm->context.tsb_block[tsb_index].tsb == NULL &&
+ new_cache_index > 0) {
new_cache_index = 0;
new_size = 8192;
new_rss_limit = ~0UL;
@@ -307,8 +339,8 @@ retry_tsb_alloc:
/* If we failed on a TSB grow, we are under serious
* memory pressure so don't try to grow any more.
*/
- if (mm->context.tsb != NULL)
- mm->context.tsb_rss_limit = ~0UL;
+ if (mm->context.tsb_block[tsb_index].tsb != NULL)
+ mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
return;
}
@@ -339,23 +371,26 @@ retry_tsb_alloc:
*/
spin_lock_irqsave(&mm->context.lock, flags);
- old_tsb = mm->context.tsb;
- old_cache_index = (mm->context.tsb_reg_val & 0x7UL);
- old_size = mm->context.tsb_nentries * sizeof(struct tsb);
+ old_tsb = mm->context.tsb_block[tsb_index].tsb;
+ old_cache_index =
+ (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
+ old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
+ sizeof(struct tsb));
/* Handle multiple threads trying to grow the TSB at the same time.
* One will get in here first, and bump the size and the RSS limit.
* The others will get in here next and hit this check.
*/
- if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
+ if (unlikely(old_tsb &&
+ (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
spin_unlock_irqrestore(&mm->context.lock, flags);
kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
return;
}
- mm->context.tsb_rss_limit = new_rss_limit;
+ mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
if (old_tsb) {
extern void copy_tsb(unsigned long old_tsb_base,
@@ -372,8 +407,8 @@ retry_tsb_alloc:
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
}
- mm->context.tsb = new_tsb;
- setup_tsb_params(mm, new_size);
+ mm->context.tsb_block[tsb_index].tsb = new_tsb;
+ setup_tsb_params(mm, tsb_index, new_size);
spin_unlock_irqrestore(&mm->context.lock, flags);
@@ -394,40 +429,65 @@ retry_tsb_alloc:
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+#ifdef CONFIG_HUGETLB_PAGE
+ unsigned long huge_pte_count;
+#endif
+ unsigned int i;
+
spin_lock_init(&mm->context.lock);
mm->context.sparc64_ctx_val = 0UL;
+#ifdef CONFIG_HUGETLB_PAGE
+ /* We reset it to zero because the fork() page copying
+ * will re-increment the counters as the parent PTEs are
+ * copied into the child address space.
+ */
+ huge_pte_count = mm->context.huge_pte_count;
+ mm->context.huge_pte_count = 0;
+#endif
+
/* copy_mm() copies over the parent's mm_struct before calling
* us, so we need to zero out the TSB pointer or else tsb_grow()
* will be confused and think there is an older TSB to free up.
*/
- mm->context.tsb = NULL;
+ for (i = 0; i < MM_NUM_TSBS; i++)
+ mm->context.tsb_block[i].tsb = NULL;
/* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways.
*/
- tsb_grow(mm, get_mm_rss(mm));
+ tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
- if (unlikely(!mm->context.tsb))
+#ifdef CONFIG_HUGETLB_PAGE
+ if (unlikely(huge_pte_count))
+ tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
+#endif
+
+ if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
return -ENOMEM;
return 0;
}
-void destroy_context(struct mm_struct *mm)
+static void tsb_destroy_one(struct tsb_config *tp)
{
- unsigned long flags, cache_index;
+ unsigned long cache_index;
- cache_index = (mm->context.tsb_reg_val & 0x7UL);
- kmem_cache_free(tsb_caches[cache_index], mm->context.tsb);
+ if (!tp->tsb)
+ return;
+ cache_index = tp->tsb_reg_val & 0x7UL;
+ kmem_cache_free(tsb_caches[cache_index], tp->tsb);
+ tp->tsb = NULL;
+ tp->tsb_reg_val = 0UL;
+}
- /* We can remove these later, but for now it's useful
- * to catch any bogus post-destroy_context() references
- * to the TSB.
- */
- mm->context.tsb = NULL;
- mm->context.tsb_reg_val = 0UL;
+void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags, i;
+
+ for (i = 0; i < MM_NUM_TSBS; i++)
+ tsb_destroy_one(&mm->context.tsb_block[i]);
spin_lock_irqsave(&ctx_alloc_lock, flags);
OpenPOWER on IntegriCloud