summaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/fault_32.c11
-rw-r--r--arch/sparc/mm/fault_64.c6
-rw-r--r--arch/sparc/mm/highmem.c6
-rw-r--r--arch/sparc/mm/hugetlbpage.c28
-rw-r--r--arch/sparc/mm/init_32.c1
-rw-r--r--arch/sparc/mm/init_64.c37
-rw-r--r--arch/sparc/mm/io-unit.c6
-rw-r--r--arch/sparc/mm/iommu.c6
-rw-r--r--arch/sparc/mm/srmmu.c55
9 files changed, 120 insertions, 36 deletions
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 8d69de111470..89976c9b936c 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -351,6 +351,8 @@ vmalloc_fault:
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
+ pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd = tsk->active_mm->pgd + offset;
@@ -363,8 +365,13 @@ vmalloc_fault:
return;
}
- pmd = pmd_offset(pgd, address);
- pmd_k = pmd_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ pud = pud_offset(p4d, address);
+ pmd = pmd_offset(pud, address);
+
+ p4d_k = p4d_offset(pgd_k, address);
+ pud_k = pud_offset(p4d_k, address);
+ pmd_k = pmd_offset(pud_k, address);
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_area_nosemaphore;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 2371fb6b97e4..8b7ddbd14b65 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -80,6 +80,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
static unsigned int get_user_insn(unsigned long tpc)
{
pgd_t *pgdp = pgd_offset(current->mm, tpc);
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep, pte;
@@ -88,7 +89,10 @@ static unsigned int get_user_insn(unsigned long tpc)
if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
goto out;
- pudp = pud_offset(pgdp, tpc);
+ p4dp = p4d_offset(pgdp, tpc);
+ if (p4d_none(*p4dp) || unlikely(p4d_bad(*p4dp)))
+ goto out;
+ pudp = pud_offset(p4dp, tpc);
if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
goto out;
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 86bc2a58d26c..d4a80adea7e5 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -39,10 +39,14 @@ static pte_t *kmap_pte;
void __init kmap_init(void)
{
unsigned long address;
+ p4d_t *p4d;
+ pud_t *pud;
pmd_t *dir;
address = __fix_to_virt(FIX_KMAP_BEGIN);
- dir = pmd_offset(pgd_offset_k(address), address);
+ p4d = p4d_offset(pgd_offset_k(address), address);
+ pud = pud_offset(p4d, address);
+ dir = pmd_offset(pud, address);
/* cache the first kmap pte */
kmap_pte = pte_offset_kernel(dir, address);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index f78793a06bbd..7b9fa861b67c 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -277,11 +277,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
if (!pud)
return NULL;
if (sz >= PUD_SIZE)
@@ -298,13 +300,17 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd))
return NULL;
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
+ return NULL;
+ pud = pud_offset(p4d, addr);
if (pud_none(*pud))
return NULL;
if (is_hugetlb_pud(*pud))
@@ -449,7 +455,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
mm_dec_nr_pmds(tlb->mm);
}
-static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
@@ -458,7 +464,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long start;
start = addr;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
@@ -481,8 +487,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
if (end - 1 > ceiling - 1)
return;
- pud = pud_offset(pgd, start);
- pgd_clear(pgd);
+ pud = pud_offset(p4d, start);
+ p4d_clear(p4d);
pud_free_tlb(tlb, pud, start);
mm_dec_nr_puds(tlb->mm);
}
@@ -492,6 +498,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
+ p4d_t *p4d;
unsigned long next;
addr &= PMD_MASK;
@@ -511,10 +518,11 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
return;
pgd = pgd_offset(tlb->mm, addr);
+ p4d = p4d_offset(pgd, addr);
do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
+ next = p4d_addr_end(addr, end);
+ if (p4d_none_or_clear_bad(p4d))
continue;
- hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
- } while (pgd++, addr = next, addr != end);
+ hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
+ } while (p4d++, addr = next, addr != end);
}
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 046ab116cc8c..906eda1158b4 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -31,7 +31,6 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
-#include <asm/pgalloc.h> /* bug in asm-generic/tlb.h: check_pgt_cache */
#include <asm/setup.h>
#include <asm/tlb.h>
#include <asm/prom.h>
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 4b099dd7a767..1cf0d666dea3 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -530,7 +530,8 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
paddr = kaddr & mask;
else {
pgd_t *pgdp = pgd_offset_k(kaddr);
- pud_t *pudp = pud_offset(pgdp, kaddr);
+ p4d_t *p4dp = p4d_offset(pgdp, kaddr);
+ pud_t *pudp = pud_offset(p4dp, kaddr);
pmd_t *pmdp = pmd_offset(pudp, kaddr);
pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
@@ -1653,6 +1654,7 @@ static unsigned long max_phys_bits = 40;
bool kern_addr_valid(unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -1674,7 +1676,11 @@ bool kern_addr_valid(unsigned long addr)
if (pgd_none(*pgd))
return 0;
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
+ return 0;
+
+ pud = pud_offset(p4d, addr);
if (pud_none(*pud))
return 0;
@@ -1800,6 +1806,7 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
while (vstart < vend) {
unsigned long this_end, paddr = __pa(vstart);
pgd_t *pgd = pgd_offset_k(vstart);
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -1814,7 +1821,20 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
alloc_bytes += PAGE_SIZE;
pgd_populate(&init_mm, pgd, new);
}
- pud = pud_offset(pgd, vstart);
+
+ p4d = p4d_offset(pgd, vstart);
+ if (p4d_none(*p4d)) {
+ pud_t *new;
+
+ new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
+ PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
+ alloc_bytes += PAGE_SIZE;
+ p4d_populate(&init_mm, p4d, new);
+ }
+
+ pud = pud_offset(p4d, vstart);
if (pud_none(*pud)) {
pmd_t *new;
@@ -2612,13 +2632,18 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
for (; vstart < vend; vstart += PMD_SIZE) {
pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
unsigned long pte;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
if (!pgd)
return -ENOMEM;
- pud = vmemmap_pud_populate(pgd, vstart, node);
+ p4d = vmemmap_p4d_populate(pgd, vstart, node);
+ if (!p4d)
+ return -ENOMEM;
+
+ pud = vmemmap_pud_populate(p4d, vstart, node);
if (!pud)
return -ENOMEM;
@@ -2903,7 +2928,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
free_unref_page(page);
return NULL;
}
@@ -2919,7 +2944,7 @@ static void __pte_free(pgtable_t pte)
{
struct page *page = virt_to_page(pte);
- pgtable_page_dtor(page);
+ pgtable_pte_page_dtor(page);
__free_page(page);
}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index f770ee7229d8..33a0facd9eb5 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -239,12 +239,16 @@ static void *iounit_alloc(struct device *dev, size_t len,
page = va;
{
pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
long i;
pgdp = pgd_offset(&init_mm, addr);
- pmdp = pmd_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
+ pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 71ac353032b6..4d3c6991f0ae 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -343,6 +343,8 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
page = va;
{
pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -354,7 +356,9 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
__flush_page_to_ram(page);
pgdp = pgd_offset(&init_mm, addr);
- pmdp = pmd_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
+ pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index aaebbc00d262..f56c3c9a9793 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -296,6 +296,8 @@ static void __init srmmu_nocache_init(void)
void *srmmu_nocache_bitmap;
unsigned int bitmap_bits;
pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long paddr, vaddr;
@@ -329,6 +331,8 @@ static void __init srmmu_nocache_init(void)
while (vaddr < srmmu_nocache_end) {
pgd = pgd_offset_k(vaddr);
+ p4d = p4d_offset(__nocache_fix(pgd), vaddr);
+ pud = pud_offset(__nocache_fix(p4d), vaddr);
pmd = pmd_offset(__nocache_fix(pgd), vaddr);
pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
@@ -378,7 +382,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
return NULL;
page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
- if (!pgtable_page_ctor(page)) {
+ if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
return NULL;
}
@@ -389,7 +393,7 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
{
unsigned long p;
- pgtable_page_dtor(pte);
+ pgtable_pte_page_dtor(pte);
p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0)
BUG();
@@ -516,13 +520,17 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
unsigned long virt_addr, int bus_type)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long tmp;
physaddr &= PAGE_MASK;
pgdp = pgd_offset_k(virt_addr);
- pmdp = pmd_offset(pgdp, virt_addr);
+ p4dp = p4d_offset(pgdp, virt_addr);
+ pudp = pud_offset(p4dp, virt_addr);
+ pmdp = pmd_offset(pudp, virt_addr);
ptep = pte_offset_kernel(pmdp, virt_addr);
tmp = (physaddr >> 4) | SRMMU_ET_PTE;
@@ -551,11 +559,16 @@ void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
static inline void srmmu_unmapioaddr(unsigned long virt_addr)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
+
pgdp = pgd_offset_k(virt_addr);
- pmdp = pmd_offset(pgdp, virt_addr);
+ p4dp = p4d_offset(pgdp, virt_addr);
+ pudp = pud_offset(p4dp, virt_addr);
+ pmdp = pmd_offset(pudp, virt_addr);
ptep = pte_offset_kernel(pmdp, virt_addr);
/* No need to flush uncacheable page. */
@@ -693,20 +706,24 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
unsigned long end)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
while (start < end) {
pgdp = pgd_offset_k(start);
- if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+ p4dp = p4d_offset(pgdp, start);
+ pudp = pud_offset(p4dp, start);
+ if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(
SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
- pgd_set(__nocache_fix(pgdp), pmdp);
+ pud_set(__nocache_fix(pudp), pmdp);
}
- pmdp = pmd_offset(__nocache_fix(pgdp), start);
+ pmdp = pmd_offset(__nocache_fix(pudp), start);
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
@@ -724,19 +741,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
unsigned long end)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
while (start < end) {
pgdp = pgd_offset_k(start);
- if (pgd_none(*pgdp)) {
+ p4dp = p4d_offset(pgdp, start);
+ pudp = pud_offset(p4dp, start);
+ if (pud_none(*pudp)) {
pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
- pgd_set(pgdp, pmdp);
+ pud_set((pud_t *)pgdp, pmdp);
}
- pmdp = pmd_offset(pgdp, start);
+ pmdp = pmd_offset(pudp, start);
if (srmmu_pmd_none(*pmdp)) {
ptep = __srmmu_get_nocache(PTE_SIZE,
PTE_SIZE);
@@ -779,6 +800,8 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
unsigned long probed;
unsigned long addr;
pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
@@ -810,18 +833,20 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
}
pgdp = pgd_offset_k(start);
+ p4dp = p4d_offset(pgdp, start);
+ pudp = pud_offset(p4dp, start);
if (what == 2) {
*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
start += SRMMU_PGDIR_SIZE;
continue;
}
- if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+ if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
- pgd_set(__nocache_fix(pgdp), pmdp);
+ pud_set(__nocache_fix(pudp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pgdp), start);
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
@@ -906,6 +931,8 @@ void __init srmmu_paging_init(void)
phandle cpunode;
char node_str[128];
pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long pages_avail;
@@ -967,7 +994,9 @@ void __init srmmu_paging_init(void)
srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
pgd = pgd_offset_k(PKMAP_BASE);
- pmd = pmd_offset(pgd, PKMAP_BASE);
+ p4d = p4d_offset(pgd, PKMAP_BASE);
+ pud = pud_offset(p4d, PKMAP_BASE);
+ pmd = pmd_offset(pud, PKMAP_BASE);
pte = pte_offset_kernel(pmd, PKMAP_BASE);
pkmap_page_table = pte;
OpenPOWER on IntegriCloud