From 7423cc0caee7a42735ee2908f24ec69957c9bc85 Mon Sep 17 00:00:00 2001 From: Adam Buchbinder Date: Tue, 23 Feb 2016 15:24:55 -0800 Subject: ARC: Fix misspellings in comments. Signed-off-by: Adam Buchbinder Signed-off-by: Vineet Gupta --- arch/arc/include/asm/cmpxchg.h | 2 +- arch/arc/include/asm/entry-compact.h | 2 +- arch/arc/include/asm/pgtable.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index af7a2db139c9..a444be67cd53 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -149,7 +149,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, * Since xchg() doesn't always do that, it would seem that following defintion * is incorrect. But here's the rationale: * SMP : Even xchg() takes the atomic_ops_lock, so OK. - * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC + * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC * is natively "SMP safe", no serialization required). * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg() * could clobber them. atomic_xchg() itself would be 1 insn, so it diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index 1aff3be91075..1d8f57cd6057 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -231,7 +231,7 @@ /* free up r9 as scratchpad */ PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg - /* Which mode (user/kernel) was the system in when intr occured */ + /* Which mode (user/kernel) was the system in when intr occurred */ lr r9, [status32_l\LVL\()] SWITCH_TO_KERNEL_STK diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index d426d4215513..ec2af62f5348 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -12,7 +12,7 @@ * - Utilise some unused free bits to confine PTE flags to 12 bits * This is a must for 4k pg-sz * - * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods + * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods * -TLB Locking never really existed, except for initial specs * -SILENT_xxx not needed for our port * -Per my request, MMU V3 changes the layout of some of the bits -- cgit v1.2.1 From 2a41b6dc28dc71c1a3f1622612a26edc58f7561e Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 8 Mar 2016 19:31:24 +0530 Subject: ARC: bitops: Remove non relevant comments commit 80f420842ff42 removed the ARC bitops microoptimization but failed to prune the comments to same effect Fixes: 80f420842ff42 ("ARC: Make ARC bitops "safer" (add anti-optimization)") Signed-off-by: Vineet Gupta --- arch/arc/include/asm/bitops.h | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 57c1f33844d4..0352fb8d21b9 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ \ m += nr >> 5; \ \ - /* \ - * ARC ISA micro-optimization: \ - * \ - * Instructions dealing with bitpos only consider lower 5 bits \ - * e.g (x << 33) is handled like (x << 1) by ASL instruction \ - * (mem pointer still needs adjustment to point to next word) \ - * \ - * Hence the masking to clamp @nr arg can be elided in general. \ - * \ - * However if @nr is a constant (above assumed in a register), \ - * and greater than 31, gcc can optimize away (x << 33) to 0, \ - * as overflow, given the 32-bit ISA. Thus masking needs to be \ - * done for const @nr, but no code is generated due to gcc \ - * const prop. \ - */ \ nr &= 0x1f; \ \ __asm__ __volatile__( \ -- cgit v1.2.1 From f778cc65717687a3d3f26dd21bef62cd059f1b8b Mon Sep 17 00:00:00 2001 From: Lada Trimasova Date: Wed, 9 Mar 2016 20:21:04 +0300 Subject: ARC: [BE] readl()/writel() to work in Big Endian CPU configuration read{l,w}() write{l,w}() primitives should use le{16,32}_to_cpu() and cpu_to_le{16,32}() respectively to ensure device registers are read correctly in Big Endian CPU configuration. Per Arnd Bergmann | Most drivers using readl() or readl_relaxed() expect those to perform byte | swaps on big-endian architectures, as the registers tend to be fixed endian This was needed for getting UART to work correctly on a Big Endian ARC. The ARC accessors originally were fine, and the bug got introduced inadventently by commit b8a033023994 ("ARCv2: barriers") Fixes: b8a033023994 ("ARCv2: barriers") Link: http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de Cc: Alexey Brodkin Cc: stable@vger.kernel.org [4.2+] Cc: Arnd Bergmann Signed-off-by: Lada Trimasova [vgupta: beefed up changelog, added Fixes/stable tags] Signed-off-by: Vineet Gupta --- arch/arc/include/asm/io.h | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 694ece8a0243..27b17adea50d 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -129,15 +129,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) /* - * Relaxed API for drivers which can handle any ordering themselves + * Relaxed API for drivers which can handle barrier ordering themselves + * + * Also these are defined to perform little endian accesses. + * To provide the typical device register semantics of fixed endian, + * swap the byte order for Big Endian + * + * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de */ #define readb_relaxed(c) __raw_readb(c) -#define readw_relaxed(c) __raw_readw(c) -#define readl_relaxed(c) __raw_readl(c) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ + __raw_readw(c)); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ + __raw_readl(c)); __r; }) #define writeb_relaxed(v,c) __raw_writeb(v,c) -#define writew_relaxed(v,c) __raw_writew(v,c) -#define writel_relaxed(v,c) __raw_writel(v,c) +#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) +#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) #include -- cgit v1.2.1 From 20d780374c81cf237834af2202c26df2100ddd69 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 25 Feb 2016 22:04:38 +0530 Subject: ARC: build: Better way to detect ISA compatible toolchain ARC architecture has 2 instruction sets: ARCompact/ARCv2. While same gcc supports compiling for either (using appropriate toggles), we can't use the same toolchain to build kernel because libgcc needs to be unique and the toolchian (uClibc based) is not multilibed. uClibc toolchain is convenient since it allows all userspace and kernel to be built with a single install for an ISA. This however means 2 gnu installs (with same triplet prefix) are needed for building for 2 ISA and need to be in PATH. As developers we keep switching the builds, but would occassionally fail to update the PATH leading to usage of wrong tools. And this would only show up at the end of kernel build when linking incompatible libgcc. So the initial solution was to have gcc define a special preprocessor macro DEFAULT_CPU_xxx which is unique for default toolchain configuration. Claudiu proposed using grep for an existing preprocessor macro which is again uniquely defined per ISA. Cc: Michal Marek Suggested-by: Claudiu Zissulescu Signed-off-by: Vineet Gupta --- arch/arc/include/asm/arcregs.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index f9f4c6f59fdb..7fbaea00a336 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -381,12 +381,6 @@ static inline int is_isa_arcompact(void) return IS_ENABLED(CONFIG_ISA_ARCOMPACT); } -#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7) -#error "Toolchain not configured for ARCompact builds" -#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS) -#error "Toolchain not configured for ARCv2 builds" -#endif - #endif /* __ASEMBLY__ */ #endif /* _ASM_ARC_ARCREGS_H */ -- cgit v1.2.1 From c2ff5cf2735c57542b8e630e63d6983013a2e1c3 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Fri, 18 Dec 2015 13:57:41 +0530 Subject: ARC: mm: Use virt_to_pfn() for addr >> PAGE_SHIFT pattern Signed-off-by: Vineet Gupta --- arch/arc/include/asm/page.h | 19 +++++++------------ arch/arc/include/asm/pgtable.h | 9 ++++----- 2 files changed, 11 insertions(+), 17 deletions(-) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 429957f1c236..9a7cf521b95c 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -10,7 +10,6 @@ #include - #ifndef __ASSEMBLY__ #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) @@ -76,30 +75,26 @@ typedef unsigned long pgprot_t; typedef pte_t * pgtable_t; -#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT) +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE) -#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) +#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) /* * __pa, __va, virt_to_page (ALERT: deprecated, don't use them) * * These macros have historically been misnamed * virt here means link-address/program-address as embedded in object code. - * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be - * 128th page, and virt_to_page( ) will return the struct page corresp to it. - * mem_map[ ] is an array of struct page for each page frame in the system - * - * Independent of where linux is linked at, link-addr = physical address - * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE - * would have been wrong in case kernel is not at 0x8zs + * And for ARC, link-addr = physical address */ #define __pa(vaddr) ((unsigned long)vaddr) #define __va(paddr) ((void *)((unsigned long)(paddr))) #define virt_to_page(kaddr) \ - (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT)) + (mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE)) -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* Default Permissions for stack/heaps pages (Non Executable) */ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index ec2af62f5348..7d6c93e63adf 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -278,15 +278,14 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) #define pmd_present(x) (pmd_val(x)) #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) -#define pte_page(x) (mem_map + \ - (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ - PAGE_SHIFT))) +#define pte_page(pte) \ + (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE)) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) -#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pte_pfn(pte) virt_to_pfn(pte_val(pte)) #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \ pgprot_val(prot))) -#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1)) /* * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system) -- cgit v1.2.1 From c511eaaa7851cda0657b2d5a401d33cf4dba52ad Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 17 Mar 2016 15:00:59 +0530 Subject: ARC: thp: unbork !CONFIG_TRANSPARENT_HUGEPAGE build linux-next for 4.6-rc1 timeline reported ARC build failures !THP | arch/arc/include/asm/tlbflush.h:29:0: warning: "flush_pmd_tlb_range" redefined [enabled by default] | arch/arc/include/asm/tlbflush.h:29:0: warning: "flush_pmd_tlb_range" redefined [enabled by default] | arch/arc/include/asm/tlbflush.h:29:0: warning: "flush_pmd_tlb_range" redefined [enabled by default] Turns out that commit ("mm/thp/migration: switch from flush_tlb_range to flush_pmd_tlb_range") triggered the issue while the problem was in ARC code where THP specific helpers were not guarded with #ifdef. Signed-off-by: Vineet Gupta --- arch/arc/include/asm/tlbflush.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h index 1fe9c8c80280..f0d42f1e83f5 100644 --- a/arch/arc/include/asm/tlbflush.h +++ b/arch/arc/include/asm/tlbflush.h @@ -17,8 +17,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +#endif #ifndef CONFIG_SMP #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) @@ -26,7 +28,9 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, #define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) #define flush_tlb_all() local_flush_tlb_all() #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE #define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e) +#endif #else extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -34,7 +38,8 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); - +#endif #endif /* CONFIG_SMP */ #endif -- cgit v1.2.1 From f5db19e93f680160a0fb3e2b05ceb4832b24d486 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 16 Mar 2016 15:04:39 +0530 Subject: ARC: dma: ioremap: use phys_addr_t consistenctly in code paths To support dma in physical memory beyond 4GB with PAE40 Signed-off-by: Vineet Gupta --- arch/arc/include/asm/cacheflush.h | 6 +++--- arch/arc/include/asm/io.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index fbe3587c4f36..a093adbdb017 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h @@ -40,9 +40,9 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr); void flush_dcache_page(struct page *page); -void dma_cache_wback_inv(unsigned long start, unsigned long sz); -void dma_cache_inv(unsigned long start, unsigned long sz); -void dma_cache_wback(unsigned long start, unsigned long sz); +void dma_cache_wback_inv(phys_addr_t start, unsigned long sz); +void dma_cache_inv(phys_addr_t start, unsigned long sz); +void dma_cache_wback(phys_addr_t start, unsigned long sz); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 27b17adea50d..1a20a62d192a 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -13,8 +13,8 @@ #include #include -extern void __iomem *ioremap(unsigned long physaddr, unsigned long size); -extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, +extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); +extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, unsigned long flags); extern void iounmap(const void __iomem *addr); -- cgit v1.2.1 From f2e3d55397ff7ad62e159e14281b346760857935 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 16 Mar 2016 16:38:57 +0530 Subject: ARC: dma: reintroduce platform specific dma<->phys Signed-off-by: Vineet Gupta --- arch/arc/include/asm/dma-mapping.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 660205414f1d..266f11c9bd59 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -11,6 +11,13 @@ #ifndef ASM_ARC_DMA_MAPPING_H #define ASM_ARC_DMA_MAPPING_H +#ifndef CONFIG_ARC_PLAT_NEEDS_PHYS_TO_DMA +#define plat_dma_to_phys(dev, dma_handle) ((phys_addr_t)(dma_handle)) +#define plat_phys_to_dma(dev, paddr) ((dma_addr_t)(paddr)) +#else +#include +#endif + extern struct dma_map_ops arc_dma_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) -- cgit v1.2.1 From deaf7565eb618a80534844300aeacffa14125182 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Sat, 24 Oct 2015 19:31:16 +0530 Subject: ARCv2: ioremap: Support dynamic peripheral address space The peripheral address space is architectural address window which is uncached and typically used to wire up peripherals. For ARC700 cores (ARCompact ISA based) this was fixed to 1GB region 0xC000_0000 - 0xFFFF_FFFF. For ARCv2 based HS38 cores the start address is flexible and can be 0xC, 0xD, 0xE, 0xF 000_000 by programming AUX_NON_VOLATILE_LIMIT reg (typically done in bootloader) Further in cas of PAE, the physical address can extend beyond 4GB so need to confine this check, otherwise all pages beyond 4GB will be treated as uncached Signed-off-by: Vineet Gupta --- arch/arc/include/asm/cache.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arc/include') diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 210ef3e72332..23706c635c30 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -54,6 +54,7 @@ extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); extern int ioc_exists; +extern unsigned long perip_base; #endif /* !__ASSEMBLY__ */ -- cgit v1.2.1