summaryrefslogtreecommitdiffstats
path: root/arch/riscv/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r--arch/riscv/mm/Makefile15
-rw-r--r--arch/riscv/mm/cacheflush.c28
-rw-r--r--arch/riscv/mm/context.c10
-rw-r--r--arch/riscv/mm/extable.c4
-rw-r--r--arch/riscv/mm/fault.c8
-rw-r--r--arch/riscv/mm/init.c99
-rw-r--r--arch/riscv/mm/ioremap.c84
-rw-r--r--arch/riscv/mm/kasan_init.c104
-rw-r--r--arch/riscv/mm/physaddr.c37
-rw-r--r--arch/riscv/mm/sifive_l2_cache.c178
-rw-r--r--arch/riscv/mm/tlbflush.c56
11 files changed, 320 insertions, 303 deletions
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 74055e1d6f21..50b7af58c566 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -6,11 +6,20 @@ CFLAGS_REMOVE_init.o = -pg
endif
obj-y += init.o
-obj-y += fault.o
obj-y += extable.o
-obj-y += ioremap.o
+obj-$(CONFIG_MMU) += fault.o
obj-y += cacheflush.o
obj-y += context.o
-obj-y += sifive_l2_cache.o
+ifeq ($(CONFIG_MMU),y)
+obj-$(CONFIG_SMP) += tlbflush.o
+endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+ifdef CONFIG_KASAN
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_init.o := n
+endif
+
+obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 9ebcff8ba263..8930ab7278e6 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -10,10 +10,19 @@
#include <asm/sbi.h>
+static void ipi_remote_fence_i(void *info)
+{
+ return local_flush_icache_all();
+}
+
void flush_icache_all(void)
{
- sbi_remote_fence_i(NULL);
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_remote_fence_i(NULL);
+ else
+ on_each_cpu(ipi_remote_fence_i, NULL, 1);
}
+EXPORT_SYMBOL(flush_icache_all);
/*
* Performs an icache flush for the given MM context. RISC-V has no direct
@@ -28,7 +37,7 @@ void flush_icache_all(void)
void flush_icache_mm(struct mm_struct *mm, bool local)
{
unsigned int cpu;
- cpumask_t others, hmask, *mask;
+ cpumask_t others, *mask;
preempt_disable();
@@ -46,11 +55,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
- if (mm != current->active_mm || !local) {
- cpumask_clear(&hmask);
- riscv_cpuid_to_hartid_mask(&others, &hmask);
- sbi_remote_fence_i(hmask.bits);
- } else {
+ if (mm == current->active_mm && local) {
/*
* It's assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart's cpumask bit
@@ -60,6 +65,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred().
*/
smp_mb();
+ } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
+ cpumask_t hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
+ sbi_remote_fence_i(cpumask_bits(&hartid_mask));
+ } else {
+ on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
}
preempt_enable();
@@ -67,6 +79,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_MMU
void flush_icache_pte(pte_t pte)
{
struct page *page = pte_page(pte);
@@ -74,3 +87,4 @@ void flush_icache_pte(pte_t pte)
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
flush_icache_all();
}
+#endif /* CONFIG_MMU */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 89ceb3cbe218..613ec81a8979 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
/*
* When necessary, performs a deferred icache flush for the given MM context,
@@ -57,13 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
- /*
- * Use the old spbtr name instead of using the current satp
- * name to support binutils 2.29 which doesn't know about the
- * privileged ISA 1.10 yet.
- */
- csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
+#ifdef CONFIG_MMU
+ csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
local_flush_tlb_all();
+#endif
flush_icache_deferred(next);
}
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
index 7aed9178d365..2fc729422151 100644
--- a/arch/riscv/mm/extable.c
+++ b/arch/riscv/mm/extable.c
@@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->sepc);
+ fixup = search_exception_tables(regs->epc);
if (fixup) {
- regs->sepc = fixup->fixup;
+ regs->epc = fixup->fixup;
return 1;
}
return 0;
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 96add1427a75..cf7248e07f43 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -18,6 +18,8 @@
#include <asm/ptrace.h>
#include <asm/tlbflush.h>
+#include "../kernel/head.h"
+
/*
* This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines.
@@ -32,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
int code = SEGV_MAPERR;
vm_fault_t fault;
- cause = regs->scause;
- addr = regs->sbadaddr;
+ cause = regs->cause;
+ addr = regs->badaddr;
tsk = current;
mm = tsk->mm;
@@ -51,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
goto vmalloc_fault;
/* Enable interrupts if they were enabled in the parent context. */
- if (likely(regs->sstatus & SR_SPIE))
+ if (likely(regs->status & SR_PIE))
local_irq_enable();
/*
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 42bf939693d3..965a8cf4829c 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -11,6 +11,7 @@
#include <linux/swap.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
@@ -18,11 +19,14 @@
#include <asm/pgtable.h>
#include <asm/io.h>
+#include "../kernel/head.h"
+
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
extern char _start[];
+void *dtb_early_va;
static void __init zone_sizes_init(void)
{
@@ -37,11 +41,42 @@ static void __init zone_sizes_init(void)
free_area_init_nodes(max_zone_pfns);
}
-void setup_zero_page(void)
+static void setup_zero_page(void)
{
memset((void *)empty_zero_page, 0, PAGE_SIZE);
}
+#ifdef CONFIG_DEBUG_VM
+static inline void print_mlk(char *name, unsigned long b, unsigned long t)
+{
+ pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t,
+ (((t) - (b)) >> 10));
+}
+
+static inline void print_mlm(char *name, unsigned long b, unsigned long t)
+{
+ pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t,
+ (((t) - (b)) >> 20));
+}
+
+static void print_vm_layout(void)
+{
+ pr_notice("Virtual kernel memory layout:\n");
+ print_mlk("fixmap", (unsigned long)FIXADDR_START,
+ (unsigned long)FIXADDR_TOP);
+ print_mlm("pci io", (unsigned long)PCI_IO_START,
+ (unsigned long)PCI_IO_END);
+ print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
+ (unsigned long)VMEMMAP_END);
+ print_mlm("vmalloc", (unsigned long)VMALLOC_START,
+ (unsigned long)VMALLOC_END);
+ print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
+ (unsigned long)high_memory);
+}
+#else
+static void print_vm_layout(void) { }
+#endif /* CONFIG_DEBUG_VM */
+
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
@@ -52,6 +87,7 @@ void __init mem_init(void)
memblock_free_all();
mem_init_print_info(NULL);
+ print_vm_layout();
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -63,13 +99,13 @@ static void __init setup_initrd(void)
pr_info("initrd not found or empty");
goto disable;
}
- if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
pr_err("initrd extends beyond end of memory");
goto disable;
}
size = initrd_end - initrd_start;
- memblock_reserve(__pa(initrd_start), size);
+ memblock_reserve(__pa_symbol(initrd_start), size);
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
@@ -82,12 +118,14 @@ disable:
}
#endif /* CONFIG_BLK_DEV_INITRD */
+static phys_addr_t dtb_early_pa __initdata;
+
void __init setup_bootmem(void)
{
struct memblock_region *reg;
phys_addr_t mem_size = 0;
- phys_addr_t vmlinux_end = __pa(&_end);
- phys_addr_t vmlinux_start = __pa(&_start);
+ phys_addr_t vmlinux_end = __pa_symbol(&_end);
+ phys_addr_t vmlinux_start = __pa_symbol(&_start);
/* Find the memory region containing the kernel */
for_each_memblock(memory, reg) {
@@ -117,7 +155,12 @@ void __init setup_bootmem(void)
setup_initrd();
#endif /* CONFIG_BLK_DEV_INITRD */
- early_init_fdt_reserve_self();
+ /*
+ * Avoid using early_init_fdt_reserve_self() since __pa() does
+ * not work for DTB pointers that are fixmap addresses
+ */
+ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+
early_init_fdt_scan_reserved_mem();
memblock_allow_resize();
memblock_dump_all();
@@ -132,12 +175,12 @@ void __init setup_bootmem(void)
}
}
+#ifdef CONFIG_MMU
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
-void *dtb_early_va;
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
@@ -263,7 +306,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
#define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
-#define PTE_PARENT_SIZE PMD_SIZE
#define fixmap_pgd_next fixmap_pmd
#else
#define pgd_next_t pte_t
@@ -271,7 +313,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
#define get_pgd_next_virt(__pa) get_pte_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
-#define PTE_PARENT_SIZE PGDIR_SIZE
#define fixmap_pgd_next fixmap_pte
#endif
@@ -304,14 +345,11 @@ static void __init create_pgd_mapping(pgd_t *pgdp,
static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
- uintptr_t map_size = PAGE_SIZE;
+ /* Upgrade to PMD_SIZE mappings whenever possible */
+ if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
+ return PAGE_SIZE;
- /* Upgrade to PMD/PGDIR mappings whenever possible */
- if (!(base & (PTE_PARENT_SIZE - 1)) &&
- !(size & (PTE_PARENT_SIZE - 1)))
- map_size = PTE_PARENT_SIZE;
-
- return map_size;
+ return PMD_SIZE;
}
/*
@@ -329,8 +367,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
*/
#ifndef __riscv_cmodel_medany
-#error "setup_vm() is called from head.S before relocate so it should "
- "not use absolute addressing."
+#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
@@ -393,6 +430,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
/* Save pointer to DTB for early FDT parsing */
dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
+ /* Save physical address for memblock reservation */
+ dtb_early_pa = dtb_pa;
}
static void __init setup_vm_final(void)
@@ -406,7 +445,7 @@ static void __init setup_vm_final(void)
/* Setup swapper PGD for fixmap */
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
- __pa(fixmap_pgd_next),
+ __pa_symbol(fixmap_pgd_next),
PGDIR_SIZE, PAGE_TABLE);
/* Map all memory banks */
@@ -435,13 +474,33 @@ static void __init setup_vm_final(void)
clear_fixmap(FIX_PMD);
/* Move to swapper page table */
- csr_write(sptbr, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
+ csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
}
+#else
+asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+{
+ dtb_early_va = (void *)dtb_pa;
+}
+
+static inline void setup_vm_final(void)
+{
+}
+#endif /* CONFIG_MMU */
void __init paging_init(void)
{
setup_vm_final();
+ memblocks_present();
+ sparse_init();
setup_zero_page();
zone_sizes_init();
}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ struct vmem_altmap *altmap)
+{
+ return vmemmap_populate_basepages(start, end, node);
+}
+#endif
diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c
deleted file mode 100644
index ac621ddb45c0..000000000000
--- a/arch/riscv/mm/ioremap.c
+++ /dev/null
@@ -1,84 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2012 Regents of the University of California
- */
-
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/io.h>
-
-#include <asm/pgtable.h>
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
- pgprot_t prot, void *caller)
-{
- phys_addr_t last_addr;
- unsigned long offset, vaddr;
- struct vm_struct *area;
-
- /* Disallow wrap-around or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
- return NULL;
-
- /* Page-align mappings */
- offset = addr & (~PAGE_MASK);
- addr -= offset;
- size = PAGE_ALIGN(size + offset);
-
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
- return NULL;
- vaddr = (unsigned long)area->addr;
-
- if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
- free_vm_area(area);
- return NULL;
- }
-
- return (void __iomem *)(vaddr + offset);
-}
-
-/*
- * ioremap - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * Must be freed with iounmap.
- */
-void __iomem *ioremap(phys_addr_t offset, unsigned long size)
-{
- return __ioremap_caller(offset, size, PAGE_KERNEL,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(ioremap);
-
-
-/**
- * iounmap - Free a IO remapping
- * @addr: virtual address from ioremap_*
- *
- * Caller must ensure there is only one unmapping for the same pointer.
- */
-void iounmap(volatile void __iomem *addr)
-{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
-}
-EXPORT_SYMBOL(iounmap);
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
new file mode 100644
index 000000000000..f0cc86040587
--- /dev/null
+++ b/arch/riscv/mm/kasan_init.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Andes Technology Corporation
+
+#include <linux/pfn.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+
+extern pgd_t early_pg_dir[PTRS_PER_PGD];
+asmlinkage void __init kasan_early_init(void)
+{
+ uintptr_t i;
+ pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_early_shadow_pte + i,
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ PAGE_KERNEL));
+
+ for (i = 0; i < PTRS_PER_PMD; ++i)
+ set_pmd(kasan_early_shadow_pmd + i,
+ pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
+ __pgprot(_PAGE_TABLE)));
+
+ for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
+ i += PGDIR_SIZE, ++pgd)
+ set_pgd(pgd,
+ pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
+ __pgprot(_PAGE_TABLE)));
+
+ /* init for swapper_pg_dir */
+ pgd = pgd_offset_k(KASAN_SHADOW_START);
+
+ for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
+ i += PGDIR_SIZE, ++pgd)
+ set_pgd(pgd,
+ pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
+ __pgprot(_PAGE_TABLE)));
+
+ flush_tlb_all();
+}
+
+static void __init populate(void *start, void *end)
+{
+ unsigned long i;
+ unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+ unsigned long vend = PAGE_ALIGN((unsigned long)end);
+ unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
+ unsigned long n_pmds =
+ (n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
+ n_pages / PTRS_PER_PTE;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
+ pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ for (i = 0; i < n_pages; i++) {
+ phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+
+ set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ }
+
+ for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
+ set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
+ __pgprot(_PAGE_TABLE)));
+
+ for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
+ set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
+ __pgprot(_PAGE_TABLE)));
+
+ flush_tlb_all();
+ memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+ struct memblock_region *reg;
+ unsigned long i;
+
+ kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+ (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+
+ for_each_memblock(memory, reg) {
+ void *start = (void *)__va(reg->base);
+ void *end = (void *)__va(reg->base + reg->size);
+
+ if (start >= end)
+ break;
+
+ populate(kasan_mem_to_shadow(start),
+ kasan_mem_to_shadow(end));
+ };
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(&kasan_early_shadow_pte[i],
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
+
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ init_task.kasan_depth = 0;
+}
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
new file mode 100644
index 000000000000..e8e4dcd39fed
--- /dev/null
+++ b/arch/riscv/mm/physaddr.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/mmdebug.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+
+phys_addr_t __virt_to_phys(unsigned long x)
+{
+ phys_addr_t y = x - PAGE_OFFSET;
+
+ /*
+ * Boundary checking aginst the kernel linear mapping space.
+ */
+ WARN(y >= KERN_VIRT_SIZE,
+ "virt_to_phys used for non-linear address: %pK (%pS)\n",
+ (void *)x, (void *)x);
+
+ return __va_to_pa_nodebug(x);
+}
+EXPORT_SYMBOL(__virt_to_phys);
+
+phys_addr_t __phys_addr_symbol(unsigned long x)
+{
+ unsigned long kernel_start = (unsigned long)PAGE_OFFSET;
+ unsigned long kernel_end = (unsigned long)_end;
+
+ /*
+ * Boundary checking aginst the kernel image mapping.
+ * __pa_symbol should only be used on kernel symbol addresses.
+ */
+ VIRTUAL_BUG_ON(x < kernel_start || x > kernel_end);
+
+ return __va_to_pa_nodebug(x);
+}
+EXPORT_SYMBOL(__phys_addr_symbol);
diff --git a/arch/riscv/mm/sifive_l2_cache.c b/arch/riscv/mm/sifive_l2_cache.c
deleted file mode 100644
index 2e637ad71c05..000000000000
--- a/arch/riscv/mm/sifive_l2_cache.c
+++ /dev/null
@@ -1,178 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SiFive L2 cache controller Driver
- *
- * Copyright (C) 2018-2019 SiFive, Inc.
- *
- */
-#include <linux/debugfs.h>
-#include <linux/interrupt.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <asm/sifive_l2_cache.h>
-
-#define SIFIVE_L2_DIRECCFIX_LOW 0x100
-#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
-#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
-
-#define SIFIVE_L2_DATECCFIX_LOW 0x140
-#define SIFIVE_L2_DATECCFIX_HIGH 0x144
-#define SIFIVE_L2_DATECCFIX_COUNT 0x148
-
-#define SIFIVE_L2_DATECCFAIL_LOW 0x160
-#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
-#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
-
-#define SIFIVE_L2_CONFIG 0x00
-#define SIFIVE_L2_WAYENABLE 0x08
-#define SIFIVE_L2_ECCINJECTERR 0x40
-
-#define SIFIVE_L2_MAX_ECCINTR 3
-
-static void __iomem *l2_base;
-static int g_irq[SIFIVE_L2_MAX_ECCINTR];
-
-enum {
- DIR_CORR = 0,
- DATA_CORR,
- DATA_UNCORR,
-};
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *sifive_test;
-
-static ssize_t l2_write(struct file *file, const char __user *data,
- size_t count, loff_t *ppos)
-{
- unsigned int val;
-
- if (kstrtouint_from_user(data, count, 0, &val))
- return -EINVAL;
- if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
- writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
- else
- return -EINVAL;
- return count;
-}
-
-static const struct file_operations l2_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = l2_write
-};
-
-static void setup_sifive_debug(void)
-{
- sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
-
- debugfs_create_file("sifive_debug_inject_error", 0200,
- sifive_test, NULL, &l2_fops);
-}
-#endif
-
-static void l2_config_read(void)
-{
- u32 regval, val;
-
- regval = readl(l2_base + SIFIVE_L2_CONFIG);
- val = regval & 0xFF;
- pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
- val = (regval & 0xFF00) >> 8;
- pr_info("L2CACHE: No. of ways per bank: %d\n", val);
- val = (regval & 0xFF0000) >> 16;
- pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
- val = (regval & 0xFF000000) >> 24;
- pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
-
- regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
- pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
-}
-
-static const struct of_device_id sifive_l2_ids[] = {
- { .compatible = "sifive,fu540-c000-ccache" },
- { /* end of table */ },
-};
-
-static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
-
-int register_sifive_l2_error_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&l2_err_chain, nb);
-}
-EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
-
-int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_unregister(&l2_err_chain, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
-
-static irqreturn_t l2_int_handler(int irq, void *device)
-{
- unsigned int add_h, add_l;
-
- if (irq == g_irq[DIR_CORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
- pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DirError interrupt sig */
- readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
- "DirECCFix");
- }
- if (irq == g_irq[DATA_CORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
- pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DataError interrupt sig */
- readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
- "DatECCFix");
- }
- if (irq == g_irq[DATA_UNCORR]) {
- add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
- add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
- pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
- /* Reading this register clears the DataFail interrupt sig */
- readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
- atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
- "DatECCFail");
- }
-
- return IRQ_HANDLED;
-}
-
-int __init sifive_l2_init(void)
-{
- struct device_node *np;
- struct resource res;
- int i, rc;
-
- np = of_find_matching_node(NULL, sifive_l2_ids);
- if (!np)
- return -ENODEV;
-
- if (of_address_to_resource(np, 0, &res))
- return -ENODEV;
-
- l2_base = ioremap(res.start, resource_size(&res));
- if (!l2_base)
- return -ENOMEM;
-
- for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
- g_irq[i] = irq_of_parse_and_map(np, i);
- rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
- if (rc) {
- pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
- return rc;
- }
- }
-
- l2_config_read();
-
-#ifdef CONFIG_DEBUG_FS
- setup_sifive_debug();
-#endif
- return 0;
-}
-device_initcall(sifive_l2_init);
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
new file mode 100644
index 000000000000..720b443c4528
--- /dev/null
+++ b/arch/riscv/mm/tlbflush.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <asm/sbi.h>
+
+void flush_tlb_all(void)
+{
+ sbi_remote_sfence_vma(NULL, 0, -1);
+}
+
+/*
+ * This function must not be called with cmask being null.
+ * Kernel may panic if cmask is NULL.
+ */
+static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
+ unsigned long size)
+{
+ struct cpumask hmask;
+ unsigned int cpuid;
+
+ if (cpumask_empty(cmask))
+ return;
+
+ cpuid = get_cpu();
+
+ if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
+ /* local cpu is the only cpu present in cpumask */
+ if (size <= PAGE_SIZE)
+ local_flush_tlb_page(start);
+ else
+ local_flush_tlb_all();
+ } else {
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
+ }
+
+ put_cpu();
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+}
OpenPOWER on IntegriCloud