diff options
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/arcregs.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/entry-arcv2.h | 10 | ||||
-rw-r--r-- | arch/arc/include/asm/entry-compact.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/fpu.h | 55 | ||||
-rw-r--r-- | arch/arc/include/asm/hugepage.h | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/io.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/jump_label.h | 72 | ||||
-rw-r--r-- | arch/arc/include/asm/linkage.h | 8 | ||||
-rw-r--r-- | arch/arc/include/asm/mach_desc.h | 3 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu.h | 6 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/pgalloc.h | 5 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 9 | ||||
-rw-r--r-- | arch/arc/include/asm/processor.h | 10 | ||||
-rw-r--r-- | arch/arc/include/asm/switch_to.h | 17 | ||||
-rw-r--r-- | arch/arc/include/asm/syscalls.h | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/vmalloc.h | 4 | ||||
-rw-r--r-- | arch/arc/include/uapi/asm/unistd.h | 1 |
20 files changed, 163 insertions, 54 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 393d4f5e1450..1b505694691e 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -17,7 +17,6 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h -generic-y += msi.h generic-y += parport.h generic-y += percpu.h generic-y += preempt.h diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 5134f0baf33c..f7e432448e4b 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -39,6 +39,8 @@ #define ARC_REG_CLUSTER_BCR 0xcf #define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */ #define ARC_REG_LPB_CTRL 0x488 /* ARCv2 Loop Buffer control */ +#define ARC_REG_FPU_CTRL 0x300 +#define ARC_REG_FPU_STATUS 0x301 /* Common for ARCompact and ARCv2 status register */ #define ARC_REG_STATUS32 0x0A diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 918804c7c1a4..d8ece4292388 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -25,6 +25,8 @@ #ifndef __ASSEMBLY__ +#include <linux/build_bug.h> + /* Uncached access macros */ #define arc_read_uncached_32(ptr) \ ({ \ diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index f5ae394ebe06..0b8b63d0bec1 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -162,7 +162,7 @@ #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS - ST2 r58, r59, PT_sp + 12 + ST2 r58, r59, PT_r58 #endif .endm @@ -172,8 +172,8 @@ LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) - ld r12, [sp, PT_sp + 4] - ld r30, [sp, PT_sp + 8] + ld r12, [sp, PT_r12] + ld r30, [sp, PT_r30] ; Restore SP (into AUX_USER_SP) only if returning to U mode ; - for K mode, it will be implicitly restored as stack is unwound @@ -190,7 +190,7 @@ #endif #ifdef CONFIG_ARC_HAS_ACCL_REGS - LD2 r58, r59, PT_sp + 12 + LD2 r58, r59, PT_r58 #endif .endm @@ -256,7 +256,7 @@ .macro FAKE_RET_FROM_EXCPN lr r9, [status32] - bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) + bic r9, r9, STATUS_AE_MASK or r9, r9, STATUS_IE_MASK kflag r9 .endm diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index 66a292335ee6..c3aa775878dc 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -130,7 +130,7 @@ * to be saved again on kernel mode stack, as part of pt_regs. *-------------------------------------------------------------*/ .macro PROLOG_FREEUP_REG reg, mem -#ifdef CONFIG_SMP +#ifndef ARC_USE_SCRATCH_REG sr \reg, [ARC_REG_SCRATCH_DATA0] #else st \reg, [\mem] @@ -138,7 +138,7 @@ .endm .macro PROLOG_RESTORE_REG reg, mem -#ifdef CONFIG_SMP +#ifndef ARC_USE_SCRATCH_REG lr \reg, [ARC_REG_SCRATCH_DATA0] #else ld \reg, [\mem] diff --git a/arch/arc/include/asm/fpu.h b/arch/arc/include/asm/fpu.h new file mode 100644 index 000000000000..64347250fdf5 --- /dev/null +++ b/arch/arc/include/asm/fpu.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com) + * + */ + +#ifndef _ASM_ARC_FPU_H +#define _ASM_ARC_FPU_H + +#ifdef CONFIG_ARC_FPU_SAVE_RESTORE + +#include <asm/ptrace.h> + +#ifdef CONFIG_ISA_ARCOMPACT + +/* These DPFP regs need to be saved/restored across ctx-sw */ +struct arc_fpu { + struct { + unsigned int l, h; + } aux_dpfp[2]; +}; + +#define fpu_init_task(regs) + +#else + +/* + * ARCv2 FPU Control aux register + * - bits to enable Traps on Exceptions + * - Rounding mode + * + * ARCv2 FPU Status aux register + * - FPU exceptions flags (Inv, Div-by-Zero, overflow, underflow, inexact) + * - Flag Write Enable to clear flags explicitly (vs. by fpu instructions + * only + */ + +struct arc_fpu { + unsigned int ctrl, status; +}; + +extern void fpu_init_task(struct pt_regs *regs); + +#endif /* !CONFIG_ISA_ARCOMPACT */ + +extern void fpu_save_restore(struct task_struct *p, struct task_struct *n); + +#else /* !CONFIG_ARC_FPU_SAVE_RESTORE */ + +#define fpu_save_restore(p, n) +#define fpu_init_task(regs) + +#endif /* CONFIG_ARC_FPU_SAVE_RESTORE */ + +#endif /* _ASM_ARC_FPU_H */ diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 9a74ce71a767..30ac40fed2c5 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -8,7 +8,6 @@ #define _ASM_ARC_HUGEPAGE_H #include <linux/types.h> -#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> static inline pte_t pmd_pte(pmd_t pmd) diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 72f7929736f8..8f777d6441a5 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -34,10 +34,6 @@ static inline void ioport_unmap(void __iomem *addr) extern void iounmap(const void __iomem *addr); -#define ioremap_nocache(phy, sz) ioremap(phy, sz) -#define ioremap_wc(phy, sz) ioremap(phy, sz) -#define ioremap_wt(phy, sz) ioremap(phy, sz) - /* * io{read,write}{16,32}be() macros */ diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h new file mode 100644 index 000000000000..9d9618079739 --- /dev/null +++ b/arch/arc/include/asm/jump_label.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARC_JUMP_LABEL_H +#define _ASM_ARC_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include <linux/stringify.h> +#include <linux/types.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +/* + * NOTE about '.balign 4': + * + * To make atomic update of patched instruction available we need to guarantee + * that this instruction doesn't cross L1 cache line boundary. + * + * As of today we simply align instruction which can be patched by 4 byte using + * ".balign 4" directive. In that case patched instruction is aligned with one + * 16-bit NOP_S if this is required. + * However 'align by 4' directive is much stricter than it actually required. + * It's enough that our 32-bit instruction don't cross L1 cache line boundary / + * L1 I$ fetch block boundary which can be achieved by using + * ".bundle_align_mode" assembler directive. That will save us from adding + * useless NOP_S padding in most of the cases. + * + * TODO: switch to ".bundle_align_mode" directive using whin it will be + * supported by ARC toolchain. + */ + +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) +{ + asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" + "1: \n" + "nop \n" + ".pushsection __jump_table, \"aw\" \n" + ".word 1b, %l[l_yes], %c0 \n" + ".popsection \n" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) +{ + asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" + "1: \n" + "b %l[l_yes] \n" + ".pushsection __jump_table, \"aw\" \n" + ".word 1b, %l[l_yes], %c0 \n" + ".popsection \n" + : : "i" (&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +typedef u32 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h index a0eeb9f8f0a9..d9ee43c6b7db 100644 --- a/arch/arc/include/asm/linkage.h +++ b/arch/arc/include/asm/linkage.h @@ -62,15 +62,15 @@ #else /* !__ASSEMBLY__ */ #ifdef CONFIG_ARC_HAS_ICCM -#define __arcfp_code __attribute__((__section__(".text.arcfp"))) +#define __arcfp_code __section(.text.arcfp) #else -#define __arcfp_code __attribute__((__section__(".text"))) +#define __arcfp_code __section(.text) #endif #ifdef CONFIG_ARC_HAS_DCCM -#define __arcfp_data __attribute__((__section__(".data.arcfp"))) +#define __arcfp_data __section(.data.arcfp) #else -#define __arcfp_data __attribute__((__section__(".data"))) +#define __arcfp_data __section(.data) #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index 8ac0e2ac3e70..73746ed5b834 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h @@ -53,8 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[]; */ #define MACHINE_START(_type, _name) \ static const struct machine_desc __mach_desc_##_type \ -__used \ -__attribute__((__section__(".arch.info.init"))) = { \ +__used __section(.arch.info.init) = { \ .name = _name, #define MACHINE_END \ diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index 98cadf1a09ac..26b731d32a2b 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h @@ -40,6 +40,10 @@ #define ARC_REG_SCRATCH_DATA0 0x46c #endif +#if defined(CONFIG_ISA_ARCV2) || !defined(CONFIG_SMP) +#define ARC_USE_SCRATCH_REG +#endif + /* Bits in MMU PID register */ #define __TLB_ENABLE (1 << 31) #define __PROG_ENABLE (1 << 30) @@ -63,6 +67,8 @@ #if (CONFIG_ARC_MMU_VER >= 2) #define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ #define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ +#else +#define TLBWriteNI TLBWrite /* Not present in hardware, fallback */ #endif #if (CONFIG_ARC_MMU_VER >= 4) diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 035470816be5..3a5e6a5b9ed6 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h @@ -144,7 +144,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ cpumask_set_cpu(cpu, mm_cpumask(next)); -#ifndef CONFIG_SMP +#ifdef ARC_USE_SCRATCH_REG /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); #endif diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 9bdb8ed5b0db..b747f2ec2928 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -108,7 +108,7 @@ pte_alloc_one(struct mm_struct *mm) return 0; memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); page = virt_to_page(pte_pg); - if (!pgtable_page_ctor(page)) { + if (!pgtable_pte_page_ctor(page)) { __free_page(page); return 0; } @@ -123,13 +123,12 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) { - pgtable_page_dtor(virt_to_page(ptep)); + pgtable_pte_page_dtor(virt_to_page(ptep)); free_pages((unsigned long)ptep, __get_order_pte()); } #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) -#define check_pgt_cache() do { } while (0) #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) #endif /* _ASM_ARC_PGALLOC_H */ diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 1d87c18a2976..12be7e1b7cc0 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -33,7 +33,6 @@ #define _ASM_ARC_PGTABLE_H #include <linux/bits.h> -#define __ARCH_USE_5LEVEL_HACK #include <asm-generic/pgtable-nopmd.h> #include <asm/page.h> #include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */ @@ -274,6 +273,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) #define pmd_none(x) (!pmd_val(x)) #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) #define pmd_present(x) (pmd_val(x)) +#define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ) #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) @@ -351,7 +351,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, * Thus use this macro only when you are certain that "current" is current * e.g. when dealing with signal frame setup code etc */ -#ifndef CONFIG_SMP +#ifdef ARC_USE_SCRATCH_REG #define pgd_offset_fast(mm, addr) \ ({ \ pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \ @@ -395,11 +395,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, /* to cope with aliasing VIPT cache */ #define HAVE_ARCH_UNMAPPED_AREA -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) - #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 706edeaa5583..ec532d1e0725 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -14,15 +14,7 @@ #ifndef __ASSEMBLY__ #include <asm/ptrace.h> - -#ifdef CONFIG_ARC_FPU_SAVE_RESTORE -/* These DPFP regs need to be saved/restored across ctx-sw */ -struct arc_fpu { - struct { - unsigned int l, h; - } aux_dpfp[2]; -}; -#endif +#include <asm/fpu.h> #ifdef CONFIG_ARC_PLAT_EZNPS struct eznps_dp { diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h index 77f123385e96..aadf65b2b56c 100644 --- a/arch/arc/include/asm/switch_to.h +++ b/arch/arc/include/asm/switch_to.h @@ -9,19 +9,7 @@ #ifndef __ASSEMBLY__ #include <linux/sched.h> - -#ifdef CONFIG_ARC_FPU_SAVE_RESTORE - -extern void fpu_save_restore(struct task_struct *p, struct task_struct *n); -#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n) -#define ARC_FPU_NEXT(t) - -#else - -#define ARC_FPU_PREV(p, n) -#define ARC_FPU_NEXT(n) - -#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */ +#include <asm/fpu.h> #ifdef CONFIG_ARC_PLAT_EZNPS extern void dp_save_restore(struct task_struct *p, struct task_struct *n); @@ -36,9 +24,8 @@ struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n); #define switch_to(prev, next, last) \ do { \ ARC_EZNPS_DP_PREV(prev, next); \ - ARC_FPU_PREV(prev, next); \ + fpu_save_restore(prev, next); \ last = __switch_to(prev, next);\ - ARC_FPU_NEXT(next); \ mb(); \ } while (0) diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h index 7ddba13e9b59..c3f4714a4f5c 100644 --- a/arch/arc/include/asm/syscalls.h +++ b/arch/arc/include/asm/syscalls.h @@ -11,6 +11,7 @@ #include <linux/types.h> int sys_clone_wrapper(int, int, int, int, int); +int sys_clone3_wrapper(void *, size_t); int sys_cacheflush(uint32_t, uint32_t uint32_t); int sys_arc_settls(void *); int sys_arc_gettls(void); diff --git a/arch/arc/include/asm/vmalloc.h b/arch/arc/include/asm/vmalloc.h new file mode 100644 index 000000000000..973095aad665 --- /dev/null +++ b/arch/arc/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_ARC_VMALLOC_H +#define _ASM_ARC_VMALLOC_H + +#endif /* _ASM_ARC_VMALLOC_H */ diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 5eafa1115162..fa2713ae6bea 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -21,6 +21,7 @@ #define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_TIME32_SYSCALLS |