From f84d02755f5a9f3b88e8d15d6384da25ad6dcf5e Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Tue, 15 Apr 2014 21:59:30 -0400 Subject: arm64: add EFI runtime services This patch adds EFI runtime support for arm64. This runtime support allows the kernel to access various EFI runtime services provided by EFI firmware. Things like reboot, real time clock, EFI boot variables, and others. This functionality is supported for little endian kernels only. The UEFI firmware standard specifies that the firmware be little endian. A future patch is expected to add support for big endian kernels running with little endian firmware. Signed-off-by: Mark Salter [ Remove unnecessary cache/tlb maintenance. ] Signed-off-by: Mark Rutland Signed-off-by: Leif Lindholm Signed-off-by: Matt Fleming --- arch/arm64/kernel/setup.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm64/kernel/setup.c') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 720853f70b6b..0a14aaffc834 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -55,6 +56,7 @@ #include #include #include +#include unsigned int processor_id; EXPORT_SYMBOL(processor_id); @@ -366,11 +368,14 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + efi_init(); arm64_memblock_init(); paging_init(); request_standard_resources(); + efi_idmap_init(); + unflatten_device_tree(); psci_init(); -- cgit v1.2.3 From a41dc0e841523efe1df7fa5ad48b5e9027a921df Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 3 Apr 2014 17:48:54 +0100 Subject: arm64: Implement cache_line_size() based on CTR_EL0.CWG The hardware provides the maximum cache line size in the system via the CTR_EL0.CWG bits. This patch implements the cache_line_size() function to read such information, together with a sanity check if the statically defined L1_CACHE_BYTES is smaller than the hardware value. Signed-off-by: Catalin Marinas Acked-by: Will Deacon --- arch/arm64/Kconfig | 3 +++ arch/arm64/include/asm/cache.h | 13 ++++++++++++- arch/arm64/include/asm/cachetype.h | 11 +++++++++++ arch/arm64/kernel/setup.c | 15 +++++++++++++++ 4 files changed, 41 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel/setup.c') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e759af5d7098..9a5b5fea86ba 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -242,6 +242,9 @@ config ARCH_WANT_HUGE_PMD_SHARE config HAVE_ARCH_TRANSPARENT_HUGEPAGE def_bool y +config ARCH_HAS_CACHE_LINE_SIZE + def_bool y + source "mm/Kconfig" config XEN_DOM0 diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 390308a67f0d..88cc05b5f3ac 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -16,6 +16,8 @@ #ifndef __ASM_CACHE_H #define __ASM_CACHE_H +#include + #define L1_CACHE_SHIFT 6 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) @@ -27,6 +29,15 @@ * the CPU. */ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES -#define ARCH_SLAB_MINALIGN 8 + +#ifndef __ASSEMBLY__ + +static inline int cache_line_size(void) +{ + u32 cwg = cache_type_cwg(); + return cwg ? 4 << cwg : L1_CACHE_BYTES; +} + +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 85f5f511352a..4b23e758d5e0 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h @@ -20,12 +20,16 @@ #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 +#define CTR_CWG_SHIFT 24 +#define CTR_CWG_MASK 15 #define ICACHE_POLICY_RESERVED 0 #define ICACHE_POLICY_AIVIVT 1 #define ICACHE_POLICY_VIPT 2 #define ICACHE_POLICY_PIPT 3 +#ifndef __ASSEMBLY__ + static inline u32 icache_policy(void) { return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK; @@ -45,4 +49,11 @@ static inline int icache_is_aivivt(void) return icache_policy() == ICACHE_POLICY_AIVIVT; } +static inline u32 cache_type_cwg(void) +{ + return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; +} + +#endif /* __ASSEMBLY__ */ + #endif /* __ASM_CACHETYPE_H */ diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 7ec784653b29..5b9e046d580e 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -198,6 +199,8 @@ static void __init setup_processor(void) { struct cpu_info *cpu_info; u64 features, block; + u32 cwg; + int cls; cpu_info = lookup_processor_type(read_cpuid_id()); if (!cpu_info) { @@ -214,6 +217,18 @@ static void __init setup_processor(void) sprintf(init_utsname()->machine, ELF_PLATFORM); elf_hwcap = 0; + /* + * Check for sane CTR_EL0.CWG value. + */ + cwg = cache_type_cwg(); + cls = cache_line_size(); + if (!cwg) + pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", + cls); + if (L1_CACHE_BYTES < cls) + pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", + L1_CACHE_BYTES, cls); + /* * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. * The blocks we test below represent incremental functionality -- cgit v1.2.3 From a501e32430d4232012ab708b8f0ce841f29e0f02 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 3 Apr 2014 15:57:15 +0100 Subject: arm64: Clean up the default pgprot setting The primary aim of this patchset is to remove the pgprot_default and prot_sect_default global variables and rely strictly on predefined values. The original goal was to be able to run SMP kernels on UP hardware by not setting the Shareability bit. However, it is unlikely to see UP ARMv8 hardware and even if we do, the Shareability bit is no longer assumed to disable cacheable accesses. A side effect is that the device mappings now have the Shareability attribute set. The hardware, however, should ignore it since Device accesses are always Outer Shareable. Following the removal of the two global variables, there is some PROT_* macro reshuffling and cleanup, including the __PAGE_* macros (replaced by PAGE_*). Signed-off-by: Catalin Marinas Acked-by: Will Deacon --- arch/arm64/include/asm/io.h | 8 ---- arch/arm64/include/asm/pgtable.h | 98 +++++++++++++++++++--------------------- arch/arm64/kernel/setup.c | 1 - arch/arm64/mm/dma-mapping.c | 2 +- arch/arm64/mm/mmu.c | 36 +-------------- 5 files changed, 50 insertions(+), 95 deletions(-) (limited to 'arch/arm64/kernel/setup.c') diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index a1bef78f0303..e0ecdcf6632d 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -230,19 +230,11 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot extern void __iounmap(volatile void __iomem *addr); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) - #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define iounmap __iounmap -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) -#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) - #define ARCH_HAS_IOREMAP_WC #include diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index e50bb3cbd8f2..752348dbb4f3 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -52,67 +52,60 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #endif #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) -/* - * The pgprot_* and protection_map entries will be fixed up at runtime to - * include the cachable and bufferable bits based on memory policy, as well as - * any architecture dependent bits like global/ASID and SMP shared mapping - * bits. - */ -#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF +#ifdef CONFIG_SMP +#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) +#else +#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF) +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) +#endif -extern pgprot_t pgprot_default; +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) +#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) -#define __pgprot_modify(prot,mask,bits) \ - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) +#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) +#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) -#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) +#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) -#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) -#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) -#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) -#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) -#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE) +#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) -#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) +#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) -#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) +#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) -#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) -#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) -#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) -#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) -#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) -#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) -#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) -#define __PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN) - -#endif /* __ASSEMBLY__ */ - -#define __P000 __PAGE_NONE -#define __P001 __PAGE_READONLY -#define __P010 __PAGE_COPY -#define __P011 __PAGE_COPY -#define __P100 __PAGE_EXECONLY -#define __P101 __PAGE_READONLY_EXEC -#define __P110 __PAGE_COPY_EXEC -#define __P111 __PAGE_COPY_EXEC - -#define __S000 __PAGE_NONE -#define __S001 __PAGE_READONLY -#define __S010 __PAGE_SHARED -#define __S011 __PAGE_SHARED -#define __S100 __PAGE_EXECONLY -#define __S101 __PAGE_READONLY_EXEC -#define __S110 __PAGE_SHARED_EXEC -#define __S111 __PAGE_SHARED_EXEC +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) +#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) +#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) +#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) +#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) +#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN) + +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_COPY +#define __P011 PAGE_COPY +#define __P100 PAGE_EXECONLY +#define __P101 PAGE_READONLY_EXEC +#define __P110 PAGE_COPY_EXEC +#define __P111 PAGE_COPY_EXEC + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED +#define __S011 PAGE_SHARED +#define __S100 PAGE_EXECONLY +#define __S101 PAGE_READONLY_EXEC +#define __S110 PAGE_SHARED_EXEC +#define __S111 PAGE_SHARED_EXEC -#ifndef __ASSEMBLY__ /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -274,6 +267,9 @@ static inline int has_transparent_hugepage(void) return 1; } +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + /* * Mark the prot value as uncacheable and unbufferable. */ diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 5b9e046d580e..7450c5802c3f 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -376,7 +376,6 @@ void __init setup_arch(char **cmdline_p) *cmdline_p = boot_command_line; - init_mem_pgprot(); early_ioremap_init(); parse_early_param(); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index c851eb44dc50..4164c5ace9f8 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -115,7 +115,7 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size, for (i = 0; i < (size >> PAGE_SHIFT); i++) map[i] = page + i; coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, - __get_dma_pgprot(attrs, pgprot_default, false)); + __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false)); kfree(map); if (!coherent_ptr) goto no_map; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 0a472c41a67f..2c0e1dda8163 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -43,11 +43,6 @@ struct page *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); -pgprot_t pgprot_default; -EXPORT_SYMBOL(pgprot_default); - -static pmdval_t prot_sect_kernel; - struct cachepolicy { const char policy[16]; u64 mair; @@ -122,33 +117,6 @@ static int __init early_cachepolicy(char *p) } early_param("cachepolicy", early_cachepolicy); -/* - * Adjust the PMD section entries according to the CPU in use. - */ -void __init init_mem_pgprot(void) -{ - pteval_t default_pgprot; - int i; - - default_pgprot = PTE_ATTRINDX(MT_NORMAL); - prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL); - -#ifdef CONFIG_SMP - /* - * Mark memory with the "shared" attribute for SMP systems - */ - default_pgprot |= PTE_SHARED; - prot_sect_kernel |= PMD_SECT_S; -#endif - - for (i = 0; i < 16; i++) { - unsigned long v = pgprot_val(protection_map[i]); - protection_map[i] = __pgprot(v | default_pgprot); - } - - pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot); -} - pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { @@ -205,7 +173,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, /* try section mapping first */ if (((addr | next | phys) & ~SECTION_MASK) == 0) { pmd_t old_pmd =*pmd; - set_pmd(pmd, __pmd(phys | prot_sect_kernel)); + set_pmd(pmd, __pmd(phys | PROT_SECT_NORMAL_EXEC)); /* * Check for previous table entries created during * boot (__create_page_tables) and flush them. @@ -417,7 +385,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) if (!p) return -ENOMEM; - set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); + set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); } else vmemmap_verify((pte_t *)pmd, node, addr, next); } while (addr = next, addr != end); -- cgit v1.2.3