summaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r--arch/arc/include/asm/arcregs.h6
-rw-r--r--arch/arc/include/asm/bitops.h15
-rw-r--r--arch/arc/include/asm/cache.h1
-rw-r--r--arch/arc/include/asm/cacheflush.h6
-rw-r--r--arch/arc/include/asm/cmpxchg.h2
-rw-r--r--arch/arc/include/asm/dma-mapping.h7
-rw-r--r--arch/arc/include/asm/entry-compact.h2
-rw-r--r--arch/arc/include/asm/io.h22
-rw-r--r--arch/arc/include/asm/page.h19
-rw-r--r--arch/arc/include/asm/pgtable.h11
-rw-r--r--arch/arc/include/asm/tlbflush.h7
11 files changed, 46 insertions, 52 deletions
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index f9f4c6f59fdb..7fbaea00a336 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -381,12 +381,6 @@ static inline int is_isa_arcompact(void)
return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
}
-#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7)
-#error "Toolchain not configured for ARCompact builds"
-#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS)
-#error "Toolchain not configured for ARCv2 builds"
-#endif
-
#endif /* __ASEMBLY__ */
#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 57c1f33844d4..0352fb8d21b9 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -35,21 +35,6 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
\
m += nr >> 5; \
\
- /* \
- * ARC ISA micro-optimization: \
- * \
- * Instructions dealing with bitpos only consider lower 5 bits \
- * e.g (x << 33) is handled like (x << 1) by ASL instruction \
- * (mem pointer still needs adjustment to point to next word) \
- * \
- * Hence the masking to clamp @nr arg can be elided in general. \
- * \
- * However if @nr is a constant (above assumed in a register), \
- * and greater than 31, gcc can optimize away (x << 33) to 0, \
- * as overflow, given the 32-bit ISA. Thus masking needs to be \
- * done for const @nr, but no code is generated due to gcc \
- * const prop. \
- */ \
nr &= 0x1f; \
\
__asm__ __volatile__( \
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 210ef3e72332..23706c635c30 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -54,6 +54,7 @@ extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
extern int ioc_exists;
+extern unsigned long perip_base;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index fbe3587c4f36..a093adbdb017 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -40,9 +40,9 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
void flush_dcache_page(struct page *page);
-void dma_cache_wback_inv(unsigned long start, unsigned long sz);
-void dma_cache_inv(unsigned long start, unsigned long sz);
-void dma_cache_wback(unsigned long start, unsigned long sz);
+void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
+void dma_cache_inv(phys_addr_t start, unsigned long sz);
+void dma_cache_wback(phys_addr_t start, unsigned long sz);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index af7a2db139c9..a444be67cd53 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -149,7 +149,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
* Since xchg() doesn't always do that, it would seem that following defintion
* is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
- * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
* is natively "SMP safe", no serialization required).
* UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
* could clobber them. atomic_xchg() itself would be 1 insn, so it
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 660205414f1d..266f11c9bd59 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -11,6 +11,13 @@
#ifndef ASM_ARC_DMA_MAPPING_H
#define ASM_ARC_DMA_MAPPING_H
+#ifndef CONFIG_ARC_PLAT_NEEDS_PHYS_TO_DMA
+#define plat_dma_to_phys(dev, dma_handle) ((phys_addr_t)(dma_handle))
+#define plat_phys_to_dma(dev, paddr) ((dma_addr_t)(paddr))
+#else
+#include <plat/dma.h>
+#endif
+
extern struct dma_map_ops arc_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 1aff3be91075..1d8f57cd6057 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -231,7 +231,7 @@
/* free up r9 as scratchpad */
PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
- /* Which mode (user/kernel) was the system in when intr occured */
+ /* Which mode (user/kernel) was the system in when intr occurred */
lr r9, [status32_l\LVL\()]
SWITCH_TO_KERNEL_STK
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index 947bf0cfdec0..17f85c9c73cf 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -13,8 +13,8 @@
#include <asm/byteorder.h>
#include <asm/page.h>
-extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
-extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -138,15 +138,23 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
/*
- * Relaxed API for drivers which can handle any ordering themselves
+ * Relaxed API for drivers which can handle barrier ordering themselves
+ *
+ * Also these are defined to perform little endian accesses.
+ * To provide the typical device register semantics of fixed endian,
+ * swap the byte order for Big Endian
+ *
+ * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
*/
#define readb_relaxed(c) __raw_readb(c)
-#define readw_relaxed(c) __raw_readw(c)
-#define readl_relaxed(c) __raw_readl(c)
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
#define writeb_relaxed(v,c) __raw_writeb(v,c)
-#define writew_relaxed(v,c) __raw_writew(v,c)
-#define writel_relaxed(v,c) __raw_writel(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
#include <asm-generic/io.h>
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 429957f1c236..9a7cf521b95c 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -10,7 +10,6 @@
#include <uapi/asm/page.h>
-
#ifndef __ASSEMBLY__
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
@@ -76,30 +75,26 @@ typedef unsigned long pgprot_t;
typedef pte_t * pgtable_t;
-#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+
+#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE)
-#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
/*
* __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
*
* These macros have historically been misnamed
* virt here means link-address/program-address as embedded in object code.
- * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
- * 128th page, and virt_to_page( ) will return the struct page corresp to it.
- * mem_map[ ] is an array of struct page for each page frame in the system
- *
- * Independent of where linux is linked at, link-addr = physical address
- * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
- * would have been wrong in case kernel is not at 0x8zs
+ * And for ARC, link-addr = physical address
*/
#define __pa(vaddr) ((unsigned long)vaddr)
#define __va(paddr) ((void *)((unsigned long)(paddr)))
#define virt_to_page(kaddr) \
- (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+ (mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE))
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
/* Default Permissions for stack/heaps pages (Non Executable) */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index d426d4215513..7d6c93e63adf 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -12,7 +12,7 @@
* - Utilise some unused free bits to confine PTE flags to 12 bits
* This is a must for 4k pg-sz
*
- * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
* -TLB Locking never really existed, except for initial specs
* -SILENT_xxx not needed for our port
* -Per my request, MMU V3 changes the layout of some of the bits
@@ -278,15 +278,14 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
-#define pte_page(x) (mem_map + \
- (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
- PAGE_SHIFT)))
+#define pte_page(pte) \
+ (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pte_pfn(pte) virt_to_pfn(pte_val(pte))
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
pgprot_val(prot)))
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
/*
* pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
index 1fe9c8c80280..f0d42f1e83f5 100644
--- a/arch/arc/include/asm/tlbflush.h
+++ b/arch/arc/include/asm/tlbflush.h
@@ -17,8 +17,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
+#endif
#ifndef CONFIG_SMP
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
@@ -26,7 +28,9 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define flush_pmd_tlb_range(vma, s, e) local_flush_pmd_tlb_range(vma, s, e)
+#endif
#else
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
@@ -34,7 +38,8 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
-
+#endif
#endif /* CONFIG_SMP */
#endif
OpenPOWER on IntegriCloud