summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/Kbuild4
-rw-r--r--arch/powerpc/include/asm/archrandom.h29
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h20
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/bitops.h51
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h69
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h8
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h40
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h17
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h11
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h11
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h19
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h11
-rw-r--r--arch/powerpc/include/asm/bug.h49
-rw-r--r--arch/powerpc/include/asm/cache.h55
-rw-r--r--arch/powerpc/include/asm/cacheflush.h36
-rw-r--r--arch/powerpc/include/asm/compat.h17
-rw-r--r--arch/powerpc/include/asm/cpm.h172
-rw-r--r--arch/powerpc/include/asm/cputable.h32
-rw-r--r--arch/powerpc/include/asm/current.h3
-rw-r--r--arch/powerpc/include/asm/dma-direct.h13
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h18
-rw-r--r--arch/powerpc/include/asm/eeh.h40
-rw-r--r--arch/powerpc/include/asm/elf.h3
-rw-r--r--arch/powerpc/include/asm/elfnote.h24
-rw-r--r--arch/powerpc/include/asm/error-injection.h13
-rw-r--r--arch/powerpc/include/asm/fadump-internal.h169
-rw-r--r--arch/powerpc/include/asm/fadump.h194
-rw-r--r--arch/powerpc/include/asm/firmware.h11
-rw-r--r--arch/powerpc/include/asm/fixmap.h26
-rw-r--r--arch/powerpc/include/asm/ftrace.h2
-rw-r--r--arch/powerpc/include/asm/futex.h13
-rw-r--r--arch/powerpc/include/asm/head-64.h41
-rw-r--r--arch/powerpc/include/asm/hugetlb.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h13
-rw-r--r--arch/powerpc/include/asm/hw_irq.h57
-rw-r--r--arch/powerpc/include/asm/io-workarounds.h20
-rw-r--r--arch/powerpc/include/asm/io.h19
-rw-r--r--arch/powerpc/include/asm/iommu.h28
-rw-r--r--arch/powerpc/include/asm/kasan.h2
-rw-r--r--arch/powerpc/include/asm/kup.h49
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_uvmem.h80
-rw-r--r--arch/powerpc/include/asm/kvm_host.h31
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h108
-rw-r--r--arch/powerpc/include/asm/local.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h7
-rw-r--r--arch/powerpc/include/asm/mce.h10
-rw-r--r--arch/powerpc/include/asm/mem_encrypt.h26
-rw-r--r--arch/powerpc/include/asm/mmu.h2
-rw-r--r--arch/powerpc/include/asm/mmu_context.h5
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h22
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h23
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/mmu-book3e.h11
-rw-r--r--arch/powerpc/include/asm/nohash/pgalloc.h8
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h13
-rw-r--r--arch/powerpc/include/asm/opal-api.h48
-rw-r--r--arch/powerpc/include/asm/opal.h14
-rw-r--r--arch/powerpc/include/asm/page.h32
-rw-r--r--arch/powerpc/include/asm/page_32.h4
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h8
-rw-r--r--arch/powerpc/include/asm/pci.h3
-rw-r--r--arch/powerpc/include/asm/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h25
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h6
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h1
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h7
-rw-r--r--arch/powerpc/include/asm/ppc4xx_ocm.h31
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h80
-rw-r--r--arch/powerpc/include/asm/processor.h9
-rw-r--r--arch/powerpc/include/asm/ptrace.h6
-rw-r--r--arch/powerpc/include/asm/reg.h50
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h18
-rw-r--r--arch/powerpc/include/asm/scom.h154
-rw-r--r--arch/powerpc/include/asm/sections.h25
-rw-r--r--arch/powerpc/include/asm/secure_boot.h29
-rw-r--r--arch/powerpc/include/asm/security_features.h11
-rw-r--r--arch/powerpc/include/asm/secvar.h35
-rw-r--r--arch/powerpc/include/asm/setjmp.h4
-rw-r--r--arch/powerpc/include/asm/spinlock.h62
-rw-r--r--arch/powerpc/include/asm/string.h2
-rw-r--r--arch/powerpc/include/asm/svm.h31
-rw-r--r--arch/powerpc/include/asm/thread_info.h18
-rw-r--r--arch/powerpc/include/asm/time.h6
-rw-r--r--arch/powerpc/include/asm/timex.h34
-rw-r--r--arch/powerpc/include/asm/tlb.h11
-rw-r--r--arch/powerpc/include/asm/uaccess.h111
-rw-r--r--arch/powerpc/include/asm/ultravisor-api.h39
-rw-r--r--arch/powerpc/include/asm/ultravisor.h85
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h22
-rw-r--r--arch/powerpc/include/asm/vmalloc.h4
-rw-r--r--arch/powerpc/include/asm/xive-regs.h1
-rw-r--r--arch/powerpc/include/asm/xive.h98
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h3
-rw-r--r--arch/powerpc/include/uapi/asm/msgbuf.h8
-rw-r--r--arch/powerpc/include/uapi/asm/sembuf.h6
-rw-r--r--arch/powerpc/include/uapi/asm/shmbuf.h6
-rw-r--r--arch/powerpc/include/uapi/asm/spu_info.h14
-rw-r--r--arch/powerpc/include/uapi/asm/stat.h2
107 files changed, 1718 insertions, 1270 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 9a1d2fc6ceb7..d0a23d0db863 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -4,11 +4,11 @@ generated-y += syscall_table_64.h
generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += export.h
generic-y += irq_regs.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += vtime.h
-generic-y += msi.h
-generic-y += simd.h
+generic-y += early_ioremap.h
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index 9c63b596e6ce..9a53e29680f4 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -6,44 +6,35 @@
#include <asm/machdep.h>
-static inline int arch_get_random_long(unsigned long *v)
+static inline bool __must_check arch_get_random_long(unsigned long *v)
{
- return 0;
+ return false;
}
-static inline int arch_get_random_int(unsigned int *v)
+static inline bool __must_check arch_get_random_int(unsigned int *v)
{
- return 0;
+ return false;
}
-static inline int arch_get_random_seed_long(unsigned long *v)
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
if (ppc_md.get_random_seed)
return ppc_md.get_random_seed(v);
- return 0;
+ return false;
}
-static inline int arch_get_random_seed_int(unsigned int *v)
+
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
unsigned long val;
- int rc;
+ bool rc;
- rc = arch_get_random_long(&val);
+ rc = arch_get_random_seed_long(&val);
if (rc)
*v = val;
return rc;
}
-
-static inline int arch_has_random(void)
-{
- return 0;
-}
-
-static inline int arch_has_random_seed(void)
-{
- return !!ppc_md.get_random_seed;
-}
#endif /* CONFIG_ARCH_RANDOM */
#ifdef CONFIG_PPC_POWERNV
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index ec1c97a8e8cb..983c0084fb3f 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -15,6 +15,7 @@
#include <asm/epapr_hcalls.h>
#include <asm/dcr.h>
#include <asm/mmu_context.h>
+#include <asm/ultravisor-api.h>
#include <uapi/asm/ucontext.h>
@@ -34,6 +35,16 @@ extern struct static_key hcall_tracepoint_key;
void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
+/* Ultravisor */
+#if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
+long ucall_norets(unsigned long opcode, ...);
+#else
+static inline long ucall_norets(unsigned long opcode, ...)
+{
+ return U_NOT_AVAILABLE;
+}
+#endif
+
/* OPAL */
int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
int64_t a4, int64_t a5, int64_t a6, int64_t a7,
@@ -81,7 +92,8 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
long sys_debug_setcontext(struct ucontext __user *ctx,
int ndbg, struct sig_dbg_op __user *dbg);
int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp);
+ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
+ struct __kernel_old_timeval __user *tvp);
unsigned long __init early_init(unsigned long dt_ptr);
void __init machine_init(u64 dt_ptr);
#endif
@@ -123,7 +135,8 @@ extern int __ucmpdi2(u64, u64);
/* tracing */
void _mcount(void);
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
+ unsigned long sp);
void pnv_power9_force_smt4_catch(void);
void pnv_power9_force_smt4_release(void);
@@ -140,9 +153,12 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
/* Patch sites */
extern s32 patch__call_flush_count_cache;
extern s32 patch__flush_count_cache_return;
+extern s32 patch__flush_link_stack_return;
+extern s32 patch__call_kvm_flush_link_stack;
extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_count_cache;
+extern long kvm_flush_link_stack;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index fbe8df433019..123adcefd40f 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -18,8 +18,6 @@
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
- * read_barrier_depends() prevents data-dependent loads being reordered
- * across this point (nop on PPC).
*
* *mb() variants without smp_ prefix must order all types of memory
* operations with one another. sync is the only instruction sufficient
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 603aed229af7..28dcf8222943 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -64,7 +64,7 @@
/* Macro for generating the ***_bits() functions */
#define DEFINE_BITOP(fn, op, prefix) \
-static __inline__ void fn(unsigned long mask, \
+static inline void fn(unsigned long mask, \
volatile unsigned long *_p) \
{ \
unsigned long old; \
@@ -86,22 +86,22 @@ DEFINE_BITOP(clear_bits, andc, "")
DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
DEFINE_BITOP(change_bits, xor, "")
-static __inline__ void set_bit(int nr, volatile unsigned long *addr)
+static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
-static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
+static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
{
clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
-static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
+static inline void arch_clear_bit_unlock(int nr, volatile unsigned long *addr)
{
clear_bits_unlock(BIT_MASK(nr), addr + BIT_WORD(nr));
}
-static __inline__ void change_bit(int nr, volatile unsigned long *addr)
+static inline void arch_change_bit(int nr, volatile unsigned long *addr)
{
change_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
}
@@ -109,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
/* Like DEFINE_BITOP(), with changes to the arguments to 'op' and the output
* operands. */
#define DEFINE_TESTOP(fn, op, prefix, postfix, eh) \
-static __inline__ unsigned long fn( \
+static inline unsigned long fn( \
unsigned long mask, \
volatile unsigned long *_p) \
{ \
@@ -138,34 +138,34 @@ DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
-static __inline__ int test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr)
+static inline int arch_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
{
return test_and_set_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
-static __inline__ int test_and_set_bit_lock(unsigned long nr,
- volatile unsigned long *addr)
+static inline int arch_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
{
return test_and_set_bits_lock(BIT_MASK(nr),
addr + BIT_WORD(nr)) != 0;
}
-static __inline__ int test_and_clear_bit(unsigned long nr,
- volatile unsigned long *addr)
+static inline int arch_test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
{
return test_and_clear_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
-static __inline__ int test_and_change_bit(unsigned long nr,
- volatile unsigned long *addr)
+static inline int arch_test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
{
return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
}
#ifdef CONFIG_PPC64
-static __inline__ unsigned long clear_bit_unlock_return_word(int nr,
- volatile unsigned long *addr)
+static inline unsigned long
+clear_bit_unlock_return_word(int nr, volatile unsigned long *addr)
{
unsigned long old, t;
unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
@@ -185,15 +185,18 @@ static __inline__ unsigned long clear_bit_unlock_return_word(int nr,
return old;
}
-/* This is a special function for mm/filemap.c */
-#define clear_bit_unlock_is_negative_byte(nr, addr) \
- (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters))
+/*
+ * This is a special function for mm/filemap.c
+ * Bit 7 corresponds to PG_waiters.
+ */
+#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \
+ (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7))
#endif /* CONFIG_PPC64 */
#include <asm-generic/bitops/non-atomic.h>
-static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
+static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__(PPC_RELEASE_BARRIER "" ::: "memory");
__clear_bit(nr, addr);
@@ -215,14 +218,14 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
* fls: find last (most-significant) bit set.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __inline__ int fls(unsigned int x)
+static inline int fls(unsigned int x)
{
return 32 - __builtin_clz(x);
}
#include <asm-generic/bitops/builtin-__fls.h>
-static __inline__ int fls64(__u64 x)
+static inline int fls64(__u64 x)
{
return 64 - __builtin_clzll(x);
}
@@ -239,6 +242,10 @@ unsigned long __arch_hweight64(__u64 w);
#include <asm-generic/bitops/find.h>
+/* wrappers that deal with KASAN instrumentation */
+#include <asm-generic/bitops/instrumented-atomic.h>
+#include <asm-generic/bitops/instrumented-lock.h>
+
/* Little-endian versions */
#include <asm-generic/bitops/le.h>
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 677e9babef80..3c0ba22dc360 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -91,6 +91,7 @@
static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
{
+ addr &= 0xf0000000; /* align addr to start of segment */
barrier(); /* make sure thread.kuap is updated before playing with SRs */
while (addr < end) {
mtsrin(sr, addr);
@@ -101,41 +102,91 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
isync(); /* Context sync required after mtsrin() */
}
-static inline void allow_user_access(void __user *to, const void __user *from, u32 size)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
{
u32 addr, end;
- if (__builtin_constant_p(to) && to == NULL)
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+ BUILD_BUG_ON(dir == KUAP_CURRENT);
+
+ if (!(dir & KUAP_WRITE))
return;
addr = (__force u32)to;
- if (!addr || addr >= TASK_SIZE || !size)
+ if (unlikely(addr >= TASK_SIZE || !size))
return;
end = min(addr + size, TASK_SIZE);
+
current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */
}
-static inline void prevent_user_access(void __user *to, const void __user *from, u32 size)
+static __always_inline void prevent_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
{
- u32 addr = (__force u32)to;
- u32 end = min(addr + size, TASK_SIZE);
+ u32 addr, end;
+
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+ if (dir == KUAP_CURRENT) {
+ u32 kuap = current->thread.kuap;
- if (!addr || addr >= TASK_SIZE || !size)
+ if (unlikely(!kuap))
+ return;
+
+ addr = kuap & 0xf0000000;
+ end = kuap << 28;
+ } else if (dir & KUAP_WRITE) {
+ addr = (__force u32)to;
+ end = min(addr + size, TASK_SIZE);
+
+ if (unlikely(addr >= TASK_SIZE || !size))
+ return;
+ } else {
return;
+ }
current->thread.kuap = 0;
kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */
}
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline unsigned long prevent_user_access_return(void)
+{
+ unsigned long flags = current->thread.kuap;
+ unsigned long addr = flags & 0xf0000000;
+ unsigned long end = flags << 28;
+ void __user *to = (__force void __user *)addr;
+
+ if (flags)
+ prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+
+ return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
{
+ unsigned long addr = flags & 0xf0000000;
+ unsigned long end = flags << 28;
+ void __user *to = (__force void __user *)addr;
+
+ if (flags)
+ allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+}
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ unsigned long begin = regs->kuap & 0xf0000000;
+ unsigned long end = regs->kuap << 28;
+
if (!is_write)
return false;
- return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
+ return WARN(address < begin || address >= end,
+ "Bug: write fault blocked by segment registers !");
}
#endif /* CONFIG_PPC_KUAP */
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 998317702630..dc5c039eb28e 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -49,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
#define get_hugepd_cache_index(x) (x)
-#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
void *table, int shift)
{
@@ -66,13 +65,6 @@ static inline void __tlb_remove_table(void *_table)
pgtable_free(table, shift);
}
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb,
- void *table, int shift)
-{
- pgtable_free(table, shift);
-}
-#endif
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 838de59f6754..5b39c11e884a 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -148,23 +148,21 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#include <asm/fixmap.h>
-#ifdef CONFIG_HIGHMEM
-#define KVIRT_TOP PKMAP_BASE
-#else
-#define KVIRT_TOP FIXADDR_START
-#endif
-
/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
* until mem_init() at which point this becomes the top of the vmalloc
* and ioremap space
*/
-#ifdef CONFIG_NOT_COHERENT_CACHE
-#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#ifdef CONFIG_HIGHMEM
+#define IOREMAP_TOP PKMAP_BASE
#else
-#define IOREMAP_TOP KVIRT_TOP
+#define IOREMAP_TOP FIXADDR_START
#endif
+/* PPC32 shares vmalloc area with ioremap */
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 16MB value just means that there will be a 64MB "hole" after the
@@ -195,14 +193,17 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
#define VMALLOC_END ioremap_bot
+#endif
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
-extern unsigned long ioremap_bot;
-
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index f254de956d6a..90dd3a3fc8c7 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -63,6 +63,14 @@
* because that would require an expensive read/modify write of the AMR.
*/
+static inline unsigned long get_kuap(void)
+{
+ if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
+ return 0;
+
+ return mfspr(SPRN_AMR);
+}
+
static inline void set_kuap(unsigned long value)
{
if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
@@ -77,25 +85,43 @@ static inline void set_kuap(unsigned long value)
isync();
}
-static inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
{
// This is written so we can resolve to a single case at build time
- if (__builtin_constant_p(to) && to == NULL)
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+ if (dir == KUAP_READ)
set_kuap(AMR_KUAP_BLOCK_WRITE);
- else if (__builtin_constant_p(from) && from == NULL)
+ else if (dir == KUAP_WRITE)
set_kuap(AMR_KUAP_BLOCK_READ);
- else
+ else if (dir == KUAP_READ_WRITE)
set_kuap(0);
+ else
+ BUILD_BUG();
}
static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size)
+ unsigned long size, unsigned long dir)
+{
+ set_kuap(AMR_KUAP_BLOCKED);
+}
+
+static inline unsigned long prevent_user_access_return(void)
{
+ unsigned long flags = get_kuap();
+
set_kuap(AMR_KUAP_BLOCKED);
+
+ return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+ set_kuap(flags);
}
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 15b75005bc34..3fa1b962dc27 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
*
*/
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
+
+// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
- MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
+ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
+
/*
* For platforms that support on 65bit VA we limit the context bits
*/
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 23b83d3593e2..bb3deb76c951 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -206,7 +206,6 @@ extern int mmu_io_psize;
void mmu_early_init_devtree(void);
void hash__early_init_devtree(void);
void radix__early_init_devtree(void);
-extern void radix_init_native(void);
extern void hash__early_init_mmu(void);
extern void radix__early_init_mmu(void);
static inline void early_init_mmu(void)
@@ -238,9 +237,6 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
first_memblock_size);
}
-extern int (*register_process_table)(unsigned long base, unsigned long page_size,
- unsigned long tbl_size);
-
#ifdef CONFIG_PPC_PSERIES
extern void radix_init_pseries(void);
#else
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index d5a44912902f..a41e91bd0580 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_list;
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
extern void pmd_fragment_free(unsigned long *);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
-#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
-#endif
void pte_frag_destroy(void *pte_frag);
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
@@ -122,11 +120,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_INDEX);
}
@@ -143,11 +136,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
}
@@ -166,11 +154,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- /*
- * By now all the pud entries should be none entries. So go
- * ahead and flush the page walk cache
- */
- flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, PTE_INDEX);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index a069dfcac9a9..4e697bc2f4cd 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -70,9 +70,6 @@ static inline int get_hugepd_cache_index(int index)
/* should not reach */
}
-#else /* !CONFIG_HUGETLB_PAGE */
-static inline int pmd_huge(pmd_t pmd) { return 0; }
-static inline int pud_huge(pud_t pud) { return 0; }
#endif /* CONFIG_HUGETLB_PAGE */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index e3d4dd4ae2fa..34d1018896b3 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -59,9 +59,6 @@ static inline int get_hugepd_cache_index(int index)
BUG();
}
-#else /* !CONFIG_HUGETLB_PAGE */
-static inline int pmd_huge(pmd_t pmd) { return 0; }
-static inline int pud_huge(pud_t pud) { return 0; }
#endif /* CONFIG_HUGETLB_PAGE */
static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8308f32e9782..201a69e6a355 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -289,7 +289,6 @@ extern unsigned long __kernel_io_end;
#define KERN_IO_END __kernel_io_end
extern struct page *vmemmap;
-extern unsigned long ioremap_bot;
extern unsigned long pci_io_base;
#endif /* __ASSEMBLY__ */
@@ -317,6 +316,7 @@ extern unsigned long pci_io_base;
#define PHB_IO_BASE (ISA_IO_END)
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
+#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_IO_END)
/* Advertise special mapping type for AGP */
@@ -608,8 +608,10 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
*/
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
- return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) |
- pgprot_val(pgprot));
+ VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
+ VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
+
+ return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot));
}
static inline unsigned long pte_pfn(pte_t pte)
@@ -1353,18 +1355,21 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
* Like pmd_huge() and pmd_large(), but works regardless of config options
*/
#define pmd_is_leaf pmd_is_leaf
+#define pmd_leaf pmd_is_leaf
static inline bool pmd_is_leaf(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
#define pud_is_leaf pud_is_leaf
+#define pud_leaf pud_is_leaf
static inline bool pud_is_leaf(pud_t pud)
{
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
#define pgd_is_leaf pgd_is_leaf
+#define pgd_leaf pgd_is_leaf
static inline bool pgd_is_leaf(pgd_t pgd)
{
return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index e04a839cb5b9..d97db3ad9aae 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -254,7 +254,13 @@ extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
-extern int radix__has_transparent_hugepage(void);
+static inline int radix__has_transparent_hugepage(void)
+{
+ /* For radix 2M at PMD level means thp */
+ if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
+ return 1;
+ return 0;
+}
#endif
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
@@ -266,9 +272,6 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags, unsigned int psz);
-extern int radix__ioremap_range(unsigned long ea, phys_addr_t pa,
- unsigned long size, pgprot_t prot, int nid);
-
static inline unsigned long radix__get_tree_size(void)
{
unsigned long rts_field;
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 05147cecb8df..ca8db193ae38 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -17,8 +17,8 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
unsigned long page_size);
extern void radix__flush_pwc_lpid(unsigned int lpid);
-extern void radix__flush_tlb_lpid(unsigned int lpid);
-extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid);
+extern void radix__flush_all_lpid(unsigned int lpid);
+extern void radix__flush_all_lpid_guest(unsigned int lpid);
#else
static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); };
static inline void radix__flush_tlb_lpid_page(unsigned int lpid,
@@ -31,11 +31,11 @@ static inline void radix__flush_pwc_lpid(unsigned int lpid)
{
WARN_ON(1);
}
-static inline void radix__flush_tlb_lpid(unsigned int lpid)
+static inline void radix__flush_all_lpid(unsigned int lpid)
{
WARN_ON(1);
}
-static inline void radix__local_flush_tlb_lpid_guest(unsigned int lpid)
+static inline void radix__flush_all_lpid_guest(unsigned int lpid)
{
WARN_ON(1);
}
@@ -73,6 +73,4 @@ extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_all(void);
-extern void radix__local_flush_tlb_lpid(unsigned int lpid);
-
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index ebf572ea621e..dcb5c3839d2f 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -147,19 +147,12 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
flush_tlb_page(vma, address);
}
-/*
- * flush the page walk cache for the address
- */
-static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
-{
- /*
- * Flush the page table walk cache on freeing a page table. We already
- * have marked the upper/higher level page table entry none by now.
- * So it is safe to flush PWC here.
- */
- if (!radix_enabled())
- return;
+extern bool tlbie_capable;
+extern bool tlbie_enabled;
- radix__flush_tlb_pwc(tlb, address);
+static inline bool cputlb_use_tlbie(void)
+{
+ return tlbie_enabled;
}
+
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 6436b65ac7bc..0e1263455d73 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -26,5 +26,16 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index fed7e6241349..338f36cd9934 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -5,14 +5,6 @@
#include <asm/asm-compat.h>
-/*
- * Define an illegal instr to trap on the bug.
- * We don't use 0 because that marks the end of a function
- * in the ELF ABI. That's "Boo Boo" in case you wonder...
- */
-#define BUG_OPCODE .long 0x00b00b00 /* For asm */
-#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */
-
#ifdef CONFIG_BUG
#ifdef __ASSEMBLY__
@@ -57,6 +49,15 @@
".previous\n"
#endif
+#define BUG_ENTRY(insn, flags, ...) \
+ __asm__ __volatile__( \
+ "1: " insn "\n" \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry)), \
+ ##__VA_ARGS__)
+
/*
* BUG_ON() and WARN_ON() do their best to cooperate with compile-time
* optimisations. However depending on the complexity of the condition
@@ -64,11 +65,7 @@
*/
#define BUG() do { \
- __asm__ __volatile__( \
- "1: twi 31,0,0\n" \
- _EMIT_BUG_ENTRY \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (0), "i" (sizeof(struct bug_entry))); \
+ BUG_ENTRY("twi 31, 0, 0", 0); \
unreachable(); \
} while (0)
@@ -77,23 +74,11 @@
if (x) \
BUG(); \
} else { \
- __asm__ __volatile__( \
- "1: "PPC_TLNEI" %4,0\n" \
- _EMIT_BUG_ENTRY \
- : : "i" (__FILE__), "i" (__LINE__), "i" (0), \
- "i" (sizeof(struct bug_entry)), \
- "r" ((__force long)(x))); \
+ BUG_ENTRY(PPC_TLNEI " %4, 0", 0, "r" ((__force long)(x))); \
} \
} while (0)
-#define __WARN_FLAGS(flags) do { \
- __asm__ __volatile__( \
- "1: twi 31,0,0\n" \
- _EMIT_BUG_ENTRY \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_WARNING|(flags)), \
- "i" (sizeof(struct bug_entry))); \
-} while (0)
+#define __WARN_FLAGS(flags) BUG_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags))
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
@@ -101,13 +86,9 @@
if (__ret_warn_on) \
__WARN(); \
} else { \
- __asm__ __volatile__( \
- "1: "PPC_TLNEI" %4,0\n" \
- _EMIT_BUG_ENTRY \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
- "i" (sizeof(struct bug_entry)), \
- "r" (__ret_warn_on)); \
+ BUG_ENTRY(PPC_TLNEI " %4, 0", \
+ BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \
+ "r" (__ret_warn_on)); \
} \
unlikely(__ret_warn_on); \
})
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 45e3137ccd71..72b81015cebe 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -55,42 +55,48 @@ struct ppc64_caches {
extern struct ppc64_caches ppc64_caches;
-static inline u32 l1_cache_shift(void)
+static inline u32 l1_dcache_shift(void)
{
return ppc64_caches.l1d.log_block_size;
}
-static inline u32 l1_cache_bytes(void)
+static inline u32 l1_dcache_bytes(void)
{
return ppc64_caches.l1d.block_size;
}
+
+static inline u32 l1_icache_shift(void)
+{
+ return ppc64_caches.l1i.log_block_size;
+}
+
+static inline u32 l1_icache_bytes(void)
+{
+ return ppc64_caches.l1i.block_size;
+}
#else
-static inline u32 l1_cache_shift(void)
+static inline u32 l1_dcache_shift(void)
{
return L1_CACHE_SHIFT;
}
-static inline u32 l1_cache_bytes(void)
+static inline u32 l1_dcache_bytes(void)
{
return L1_CACHE_BYTES;
}
+
+static inline u32 l1_icache_shift(void)
+{
+ return L1_CACHE_SHIFT;
+}
+
+static inline u32 l1_icache_bytes(void)
+{
+ return L1_CACHE_BYTES;
+}
+
#endif
-#endif /* ! __ASSEMBLY__ */
-
-#if defined(__ASSEMBLY__)
-/*
- * For a snooping icache, we still need a dummy icbi to purge all the
- * prefetched instructions from the ifetch buffers. We also need a sync
- * before the icbi to order the the actual stores to memory that might
- * have modified instructions with the icbi.
- */
-#define PURGE_PREFETCHED_INS \
- sync; \
- icbi 0,r3; \
- sync; \
- isync
-#else
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#ifdef CONFIG_PPC_BOOK3S_32
@@ -124,6 +130,17 @@ static inline void dcbst(void *addr)
{
__asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
}
+
+static inline void icbi(void *addr)
+{
+ asm volatile ("icbi 0, %0" : : "r"(addr) : "memory");
+}
+
+static inline void iccci(void *addr)
+{
+ asm volatile ("iccci 0, %0" : : "r"(addr) : "memory");
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index eef388f2659f..4a1c9f0200e1 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -42,29 +42,25 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-extern void flush_icache_range(unsigned long, unsigned long);
+void flush_icache_range(unsigned long start, unsigned long stop);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
int len);
-extern void __flush_dcache_icache(void *page_va);
extern void flush_dcache_icache_page(struct page *page);
-#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
-extern void __flush_dcache_icache_phys(unsigned long physaddr);
-#else
-static inline void __flush_dcache_icache_phys(unsigned long physaddr)
-{
- BUG();
-}
-#endif
-
-/*
- * Write any modified data cache blocks out to memory and invalidate them.
- * Does not invalidate the corresponding instruction cache blocks.
+void __flush_dcache_icache(void *page);
+
+/**
+ * flush_dcache_range(): Write any modified data cache blocks out to memory and
+ * invalidate them. Does not invalidate the corresponding instruction cache
+ * blocks.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
*/
static inline void flush_dcache_range(unsigned long start, unsigned long stop)
{
- unsigned long shift = l1_cache_shift();
- unsigned long bytes = l1_cache_bytes();
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
void *addr = (void *)(start & ~(bytes - 1));
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
@@ -89,8 +85,8 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop)
*/
static inline void clean_dcache_range(unsigned long start, unsigned long stop)
{
- unsigned long shift = l1_cache_shift();
- unsigned long bytes = l1_cache_bytes();
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
void *addr = (void *)(start & ~(bytes - 1));
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
@@ -108,8 +104,8 @@ static inline void clean_dcache_range(unsigned long start, unsigned long stop)
static inline void invalidate_dcache_range(unsigned long start,
unsigned long stop)
{
- unsigned long shift = l1_cache_shift();
- unsigned long bytes = l1_cache_bytes();
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
void *addr = (void *)(start & ~(bytes - 1));
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 74d0db511099..3e3cdfaa76c6 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -96,23 +96,6 @@ typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
- return (void __user *)(unsigned long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
- return (u32)(unsigned long)uptr;
-}
-
static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = current->thread.regs;
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h
index 4c24ea8209bb..ce483b0f8a4d 100644
--- a/arch/powerpc/include/asm/cpm.h
+++ b/arch/powerpc/include/asm/cpm.h
@@ -1,171 +1 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CPM_H
-#define __CPM_H
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/of.h>
-#include <soc/fsl/qe/qe.h>
-
-/*
- * SPI Parameter RAM common to QE and CPM.
- */
-struct spi_pram {
- __be16 rbase; /* Rx Buffer descriptor base address */
- __be16 tbase; /* Tx Buffer descriptor base address */
- u8 rfcr; /* Rx function code */
- u8 tfcr; /* Tx function code */
- __be16 mrblr; /* Max receive buffer length */
- __be32 rstate; /* Internal */
- __be32 rdp; /* Internal */
- __be16 rbptr; /* Internal */
- __be16 rbc; /* Internal */
- __be32 rxtmp; /* Internal */
- __be32 tstate; /* Internal */
- __be32 tdp; /* Internal */
- __be16 tbptr; /* Internal */
- __be16 tbc; /* Internal */
- __be32 txtmp; /* Internal */
- __be32 res; /* Tx temp. */
- __be16 rpbase; /* Relocation pointer (CPM1 only) */
- __be16 res1; /* Reserved */
-};
-
-/*
- * USB Controller pram common to QE and CPM.
- */
-struct usb_ctlr {
- u8 usb_usmod;
- u8 usb_usadr;
- u8 usb_uscom;
- u8 res1[1];
- __be16 usb_usep[4];
- u8 res2[4];
- __be16 usb_usber;
- u8 res3[2];
- __be16 usb_usbmr;
- u8 res4[1];
- u8 usb_usbs;
- /* Fields down below are QE-only */
- __be16 usb_ussft;
- u8 res5[2];
- __be16 usb_usfrn;
- u8 res6[0x22];
-} __attribute__ ((packed));
-
-/*
- * Function code bits, usually generic to devices.
- */
-#ifdef CONFIG_CPM1
-#define CPMFCR_GBL ((u_char)0x00) /* Flag doesn't exist in CPM1 */
-#define CPMFCR_TC2 ((u_char)0x00) /* Flag doesn't exist in CPM1 */
-#define CPMFCR_DTB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
-#define CPMFCR_BDB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
-#else
-#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
-#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
-#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
-#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
-#endif
-#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
-
-/* Opcodes common to CPM1 and CPM2
-*/
-#define CPM_CR_INIT_TRX ((ushort)0x0000)
-#define CPM_CR_INIT_RX ((ushort)0x0001)
-#define CPM_CR_INIT_TX ((ushort)0x0002)
-#define CPM_CR_HUNT_MODE ((ushort)0x0003)
-#define CPM_CR_STOP_TX ((ushort)0x0004)
-#define CPM_CR_GRA_STOP_TX ((ushort)0x0005)
-#define CPM_CR_RESTART_TX ((ushort)0x0006)
-#define CPM_CR_CLOSE_RX_BD ((ushort)0x0007)
-#define CPM_CR_SET_GADDR ((ushort)0x0008)
-#define CPM_CR_SET_TIMER ((ushort)0x0008)
-#define CPM_CR_STOP_IDMA ((ushort)0x000b)
-
-/* Buffer descriptors used by many of the CPM protocols. */
-typedef struct cpm_buf_desc {
- ushort cbd_sc; /* Status and Control */
- ushort cbd_datlen; /* Data length in buffer */
- uint cbd_bufaddr; /* Buffer address in host memory */
-} cbd_t;
-
-/* Buffer descriptor control/status used by serial
- */
-
-#define BD_SC_EMPTY (0x8000) /* Receive is empty */
-#define BD_SC_READY (0x8000) /* Transmit is ready */
-#define BD_SC_WRAP (0x2000) /* Last buffer descriptor */
-#define BD_SC_INTRPT (0x1000) /* Interrupt on change */
-#define BD_SC_LAST (0x0800) /* Last buffer in frame */
-#define BD_SC_TC (0x0400) /* Transmit CRC */
-#define BD_SC_CM (0x0200) /* Continuous mode */
-#define BD_SC_ID (0x0100) /* Rec'd too many idles */
-#define BD_SC_P (0x0100) /* xmt preamble */
-#define BD_SC_BR (0x0020) /* Break received */
-#define BD_SC_FR (0x0010) /* Framing error */
-#define BD_SC_PR (0x0008) /* Parity error */
-#define BD_SC_NAK (0x0004) /* NAK - did not respond */
-#define BD_SC_OV (0x0002) /* Overrun */
-#define BD_SC_UN (0x0002) /* Underrun */
-#define BD_SC_CD (0x0001) /* */
-#define BD_SC_CL (0x0001) /* Collision */
-
-/* Buffer descriptor control/status used by Ethernet receive.
- * Common to SCC and FCC.
- */
-#define BD_ENET_RX_EMPTY (0x8000)
-#define BD_ENET_RX_WRAP (0x2000)
-#define BD_ENET_RX_INTR (0x1000)
-#define BD_ENET_RX_LAST (0x0800)
-#define BD_ENET_RX_FIRST (0x0400)
-#define BD_ENET_RX_MISS (0x0100)
-#define BD_ENET_RX_BC (0x0080) /* FCC Only */
-#define BD_ENET_RX_MC (0x0040) /* FCC Only */
-#define BD_ENET_RX_LG (0x0020)
-#define BD_ENET_RX_NO (0x0010)
-#define BD_ENET_RX_SH (0x0008)
-#define BD_ENET_RX_CR (0x0004)
-#define BD_ENET_RX_OV (0x0002)
-#define BD_ENET_RX_CL (0x0001)
-#define BD_ENET_RX_STATS (0x01ff) /* All status bits */
-
-/* Buffer descriptor control/status used by Ethernet transmit.
- * Common to SCC and FCC.
- */
-#define BD_ENET_TX_READY (0x8000)
-#define BD_ENET_TX_PAD (0x4000)
-#define BD_ENET_TX_WRAP (0x2000)
-#define BD_ENET_TX_INTR (0x1000)
-#define BD_ENET_TX_LAST (0x0800)
-#define BD_ENET_TX_TC (0x0400)
-#define BD_ENET_TX_DEF (0x0200)
-#define BD_ENET_TX_HB (0x0100)
-#define BD_ENET_TX_LC (0x0080)
-#define BD_ENET_TX_RL (0x0040)
-#define BD_ENET_TX_RCMASK (0x003c)
-#define BD_ENET_TX_UN (0x0002)
-#define BD_ENET_TX_CSL (0x0001)
-#define BD_ENET_TX_STATS (0x03ff) /* All status bits */
-
-/* Buffer descriptor control/status used by Transparent mode SCC.
- */
-#define BD_SCC_TX_LAST (0x0800)
-
-/* Buffer descriptor control/status used by I2C.
- */
-#define BD_I2C_START (0x0400)
-
-#ifdef CONFIG_CPM
-int cpm_command(u32 command, u8 opcode);
-#else
-static inline int cpm_command(u32 command, u8 opcode)
-{
- return -ENOSYS;
-}
-#endif /* CONFIG_CPM */
-
-int cpm2_gpiochip_add32(struct device *dev);
-
-#endif
+#include <soc/fsl/cpm.h>
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index d05f0c28e515..40a4d3c6fd99 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -145,12 +145,10 @@ static inline void cpu_feature_keys_init(void) { }
/* Definitions for features that only exist on 32-bit chips */
#ifdef CONFIG_PPC32
-#define CPU_FTR_601 ASM_CONST(0x00001000)
#define CPU_FTR_L2CR ASM_CONST(0x00002000)
#define CPU_FTR_SPEC7450 ASM_CONST(0x00004000)
#define CPU_FTR_TAU ASM_CONST(0x00008000)
#define CPU_FTR_CAN_DOZE ASM_CONST(0x00010000)
-#define CPU_FTR_USE_RTC ASM_CONST(0x00020000)
#define CPU_FTR_L3CR ASM_CONST(0x00040000)
#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x00080000)
#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x00100000)
@@ -160,14 +158,12 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x01000000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x02000000)
#define CPU_FTR_PPC_LE ASM_CONST(0x04000000)
-#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x08000000)
#define CPU_FTR_SPE ASM_CONST(0x10000000)
#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x20000000)
#define CPU_FTR_INDEXED_DCR ASM_CONST(0x40000000)
#else /* CONFIG_PPC32 */
/* Define these to 0 for the sake of tests in common code */
-#define CPU_FTR_601 (0)
#define CPU_FTR_PPC_LE (0)
#endif
@@ -213,8 +209,10 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
-#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000)
+#define CPU_FTR_P9_TLBIE_STQ_BUG LONG_ASM_CONST(0x0000400000000000)
#define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000)
+#define CPU_FTR_P9_RADIX_PREFETCH_BUG LONG_ASM_CONST(0x0002000000000000)
#ifndef __ASSEMBLY__
@@ -294,8 +292,8 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_MAYBE_CAN_NAP 0
#endif
-#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
- CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_USE_RTC)
+#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | \
+ CPU_FTR_COHERENT_ICACHE)
#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE)
@@ -386,7 +384,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
- CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_NOEXECUTE | \
CPU_FTR_DEBUG_LVL_EXC)
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
@@ -461,9 +459,11 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
- CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
-#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
-#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
+ CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
+#define CPU_FTRS_POWER9_DD2_0 (CPU_FTRS_POWER9 | CPU_FTR_P9_RADIX_PREFETCH_BUG)
+#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | \
+ CPU_FTR_P9_RADIX_PREFETCH_BUG | \
+ CPU_FTR_POWER9_DD2_1)
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
CPU_FTR_P9_TM_HV_ASSIST | \
CPU_FTR_P9_TM_XER_SO_BUG)
@@ -498,7 +498,9 @@ static inline void cpu_feature_keys_init(void) { }
#else
enum {
CPU_FTRS_POSSIBLE =
-#ifdef CONFIG_PPC_BOOK3S_32
+#ifdef CONFIG_PPC_BOOK3S_601
+ CPU_FTRS_PPC601 |
+#elif defined(CONFIG_PPC_BOOK3S_32)
CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
@@ -574,8 +576,10 @@ enum {
#else
enum {
CPU_FTRS_ALWAYS =
-#ifdef CONFIG_PPC_BOOK3S_32
- CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
+#ifdef CONFIG_PPC_BOOK3S_601
+ CPU_FTRS_PPC601 &
+#elif defined(CONFIG_PPC_BOOK3S_32)
+ CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
diff --git a/arch/powerpc/include/asm/current.h b/arch/powerpc/include/asm/current.h
index 297827b76169..bbfb94800415 100644
--- a/arch/powerpc/include/asm/current.h
+++ b/arch/powerpc/include/asm/current.h
@@ -16,7 +16,8 @@ static inline struct task_struct *get_current(void)
{
struct task_struct *task;
- __asm__ __volatile__("ld %0,%1(13)"
+ /* get_current can be cached by the compiler, so no volatile */
+ asm ("ld %0,%1(13)"
: "=r" (task)
: "i" (offsetof(struct paca_struct, __current)));
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
index a2912b47102c..abc154d784b0 100644
--- a/arch/powerpc/include/asm/dma-direct.h
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -2,26 +2,13 @@
#ifndef ASM_POWERPC_DMA_DIRECT_H
#define ASM_POWERPC_DMA_DIRECT_H 1
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <=
- min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
-}
-
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- if (!dev)
- return paddr + PCI_DRAM_OFFSET;
return paddr + dev->archdata.dma_offset;
}
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{
- if (!dev)
- return daddr - PCI_DRAM_OFFSET;
return daddr - dev->archdata.dma_offset;
}
#endif /* ASM_POWERPC_DMA_DIRECT_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
deleted file mode 100644
index 565d6f74b189..000000000000
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2004 IBM
- */
-#ifndef _ASM_DMA_MAPPING_H
-#define _ASM_DMA_MAPPING_H
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- /* We don't handle the NULL dev case for ISA for now. We could
- * do it via an out of line call but it is not needed for now. The
- * only ISA DMA device we support is the floppy and we have a hack
- * in the floppy driver directly to get a device for us.
- */
- return NULL;
-}
-
-#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 8aa7c76c2130..6f9b2a12540a 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -88,6 +88,19 @@ struct eeh_pe {
struct list_head child_list; /* List of PEs below this PE */
struct list_head child; /* Memb. child_list/eeh_phb_pe */
struct list_head edevs; /* List of eeh_dev in this PE */
+
+#ifdef CONFIG_STACKTRACE
+ /*
+ * Saved stack trace. When we find a PE freeze in eeh_dev_check_failure
+ * the stack trace is saved here so we can print it in the recovery
+ * thread if it turns out to due to a real problem rather than
+ * a hot-remove.
+ *
+ * A max of 64 entries might be overkill, but it also might not be.
+ */
+ unsigned long stack_trace[64];
+ int trace_entries;
+#endif /* CONFIG_STACKTRACE */
};
#define eeh_pe_for_each_dev(pe, edev, tmp) \
@@ -121,6 +134,8 @@ static inline bool eeh_pe_passed(struct eeh_pe *pe)
struct eeh_dev {
int mode; /* EEH mode */
int class_code; /* Class code of the device */
+ int bdfn; /* bdfn of device (for cfg ops) */
+ struct pci_controller *controller;
int pe_config_addr; /* PE config address */
u32 config_space[16]; /* Saved PCI config space */
int pcix_cap; /* Saved PCIx capability */
@@ -136,6 +151,17 @@ struct eeh_dev {
struct pci_dev *physfn; /* Associated SRIOV PF */
};
+/* "fmt" must be a simple literal string */
+#define EEH_EDEV_PRINT(level, edev, fmt, ...) \
+ pr_##level("PCI %04x:%02x:%02x.%x#%04x: EEH: " fmt, \
+ (edev)->controller->global_number, PCI_BUSNO((edev)->bdfn), \
+ PCI_SLOT((edev)->bdfn), PCI_FUNC((edev)->bdfn), \
+ ((edev)->pe ? (edev)->pe_config_addr : 0xffff), ##__VA_ARGS__)
+#define eeh_edev_dbg(edev, fmt, ...) EEH_EDEV_PRINT(debug, (edev), fmt, ##__VA_ARGS__)
+#define eeh_edev_info(edev, fmt, ...) EEH_EDEV_PRINT(info, (edev), fmt, ##__VA_ARGS__)
+#define eeh_edev_warn(edev, fmt, ...) EEH_EDEV_PRINT(warn, (edev), fmt, ##__VA_ARGS__)
+#define eeh_edev_err(edev, fmt, ...) EEH_EDEV_PRINT(err, (edev), fmt, ##__VA_ARGS__)
+
static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev)
{
return edev ? edev->pdn : NULL;
@@ -247,7 +273,7 @@ static inline bool eeh_state_active(int state)
== (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
}
-typedef void *(*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag);
+typedef void (*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag);
typedef void *(*eeh_pe_traverse_func)(struct eeh_pe *pe, void *flag);
void eeh_set_pe_aux_size(int size);
int eeh_phb_pe_create(struct pci_controller *phb);
@@ -261,20 +287,20 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
void eeh_pe_update_time_stamp(struct eeh_pe *pe);
void *eeh_pe_traverse(struct eeh_pe *root,
eeh_pe_traverse_func fn, void *flag);
-void *eeh_pe_dev_traverse(struct eeh_pe *root,
- eeh_edev_traverse_func fn, void *flag);
+void eeh_pe_dev_traverse(struct eeh_pe *root,
+ eeh_edev_traverse_func fn, void *flag);
void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
struct eeh_dev *eeh_dev_init(struct pci_dn *pdn);
void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
-void eeh_probe_devices(void);
+void eeh_show_enabled(void);
int __init eeh_ops_register(struct eeh_ops *ops);
int __exit eeh_ops_unregister(const char *name);
int eeh_check_failure(const volatile void __iomem *token);
int eeh_dev_check_failure(struct eeh_dev *edev);
-void eeh_addr_cache_build(void);
+void eeh_addr_cache_init(void);
void eeh_add_device_early(struct pci_dn *);
void eeh_add_device_tree_early(struct pci_dn *);
void eeh_add_device_late(struct pci_dev *);
@@ -316,7 +342,7 @@ static inline bool eeh_enabled(void)
return false;
}
-static inline void eeh_probe_devices(void) { }
+static inline void eeh_show_enabled(void) { }
static inline void *eeh_dev_init(struct pci_dn *pdn, void *data)
{
@@ -332,7 +358,7 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
#define eeh_dev_check_failure(x) (0)
-static inline void eeh_addr_cache_build(void) { }
+static inline void eeh_addr_cache_init(void) { }
static inline void eeh_add_device_early(struct pci_dn *pdn) { }
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 409c9bfb43d9..57c229a86f08 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -175,4 +175,7 @@ do { \
ARCH_DLINFO_CACHE_GEOMETRY; \
} while (0)
+/* Relocate the kernel image to @final_address */
+void relocate(unsigned long final_address);
+
#endif /* _ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/include/asm/elfnote.h b/arch/powerpc/include/asm/elfnote.h
new file mode 100644
index 000000000000..a201b6e9ae44
--- /dev/null
+++ b/arch/powerpc/include/asm/elfnote.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PowerPC ELF notes.
+ *
+ * Copyright 2019, IBM Corporation
+ */
+
+#ifndef __ASM_POWERPC_ELFNOTE_H__
+#define __ASM_POWERPC_ELFNOTE_H__
+
+/*
+ * These note types should live in a SHT_NOTE segment and have
+ * "PowerPC" in the name field.
+ */
+
+/*
+ * The capabilities supported/required by this kernel (bitmap).
+ *
+ * This type uses a bitmap as "desc" field. Each bit is described
+ * in arch/powerpc/kernel/note.S
+ */
+#define PPC_ELFNOTE_CAPABILITIES 1
+
+#endif /* __ASM_POWERPC_ELFNOTE_H__ */
diff --git a/arch/powerpc/include/asm/error-injection.h b/arch/powerpc/include/asm/error-injection.h
deleted file mode 100644
index 62fd24739852..000000000000
--- a/arch/powerpc/include/asm/error-injection.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-
-#ifndef _ASM_ERROR_INJECTION_H
-#define _ASM_ERROR_INJECTION_H
-
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm-generic/error-injection.h>
-
-void override_function_with_return(struct pt_regs *regs);
-
-#endif /* _ASM_ERROR_INJECTION_H */
diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
new file mode 100644
index 000000000000..c814a2b55389
--- /dev/null
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Firmware-Assisted Dump internal code.
+ *
+ * Copyright 2011, Mahesh Salgaonkar, IBM Corporation.
+ * Copyright 2019, Hari Bathini, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_FADUMP_INTERNAL_H
+#define _ASM_POWERPC_FADUMP_INTERNAL_H
+
+/* Maximum number of memory regions kernel supports */
+#define FADUMP_MAX_MEM_REGS 128
+
+#ifndef CONFIG_PRESERVE_FA_DUMP
+
+/* The upper limit percentage for user specified boot memory size (25%) */
+#define MAX_BOOT_MEM_RATIO 4
+
+#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
+
+/* Alignment per CMA requirement. */
+#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \
+ max_t(unsigned long, MAX_ORDER - 1, \
+ pageblock_order))
+
+/* FAD commands */
+#define FADUMP_REGISTER 1
+#define FADUMP_UNREGISTER 2
+#define FADUMP_INVALIDATE 3
+
+/*
+ * Copy the ascii values for first 8 characters from a string into u64
+ * variable at their respective indexes.
+ * e.g.
+ * The string "FADMPINF" will be converted into 0x4641444d50494e46
+ */
+static inline u64 fadump_str_to_u64(const char *str)
+{
+ u64 val = 0;
+ int i;
+
+ for (i = 0; i < sizeof(val); i++)
+ val = (*str) ? (val << 8) | *str++ : val << 8;
+ return val;
+}
+
+#define FADUMP_CPU_UNKNOWN (~((u32)0))
+
+#define FADUMP_CRASH_INFO_MAGIC fadump_str_to_u64("FADMPINF")
+
+/* fadump crash info structure */
+struct fadump_crash_info_header {
+ u64 magic_number;
+ u64 elfcorehdr_addr;
+ u32 crashing_cpu;
+ struct pt_regs regs;
+ struct cpumask online_mask;
+};
+
+struct fadump_memory_range {
+ u64 base;
+ u64 size;
+};
+
+/* fadump memory ranges info */
+struct fadump_mrange_info {
+ char name[16];
+ struct fadump_memory_range *mem_ranges;
+ u32 mem_ranges_sz;
+ u32 mem_range_cnt;
+ u32 max_mem_ranges;
+};
+
+/* Platform specific callback functions */
+struct fadump_ops;
+
+/* Firmware-assisted dump configuration details. */
+struct fw_dump {
+ unsigned long reserve_dump_area_start;
+ unsigned long reserve_dump_area_size;
+ /* cmd line option during boot */
+ unsigned long reserve_bootvar;
+
+ unsigned long cpu_state_data_size;
+ u64 cpu_state_dest_vaddr;
+ u32 cpu_state_data_version;
+ u32 cpu_state_entry_size;
+
+ unsigned long hpte_region_size;
+
+ unsigned long boot_memory_size;
+ u64 boot_mem_dest_addr;
+ u64 boot_mem_addr[FADUMP_MAX_MEM_REGS];
+ u64 boot_mem_sz[FADUMP_MAX_MEM_REGS];
+ u64 boot_mem_top;
+ u64 boot_mem_regs_cnt;
+
+ unsigned long fadumphdr_addr;
+ unsigned long cpu_notes_buf_vaddr;
+ unsigned long cpu_notes_buf_size;
+
+ /*
+ * Maximum size supported by firmware to copy from source to
+ * destination address per entry.
+ */
+ u64 max_copy_size;
+ u64 kernel_metadata;
+
+ int ibm_configure_kernel_dump;
+
+ unsigned long fadump_enabled:1;
+ unsigned long fadump_supported:1;
+ unsigned long dump_active:1;
+ unsigned long dump_registered:1;
+ unsigned long nocma:1;
+
+ struct fadump_ops *ops;
+};
+
+struct fadump_ops {
+ u64 (*fadump_init_mem_struct)(struct fw_dump *fadump_conf);
+ u64 (*fadump_get_metadata_size)(void);
+ int (*fadump_setup_metadata)(struct fw_dump *fadump_conf);
+ u64 (*fadump_get_bootmem_min)(void);
+ int (*fadump_register)(struct fw_dump *fadump_conf);
+ int (*fadump_unregister)(struct fw_dump *fadump_conf);
+ int (*fadump_invalidate)(struct fw_dump *fadump_conf);
+ void (*fadump_cleanup)(struct fw_dump *fadump_conf);
+ int (*fadump_process)(struct fw_dump *fadump_conf);
+ void (*fadump_region_show)(struct fw_dump *fadump_conf,
+ struct seq_file *m);
+ void (*fadump_trigger)(struct fadump_crash_info_header *fdh,
+ const char *msg);
+};
+
+/* Helper functions */
+s32 fadump_setup_cpu_notes_buf(u32 num_cpus);
+void fadump_free_cpu_notes_buf(void);
+u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
+void fadump_update_elfcore_header(char *bufp);
+bool is_fadump_boot_mem_contiguous(void);
+bool is_fadump_reserved_mem_contiguous(void);
+
+#else /* !CONFIG_PRESERVE_FA_DUMP */
+
+/* Firmware-assisted dump configuration details. */
+struct fw_dump {
+ u64 boot_mem_top;
+ u64 dump_active;
+};
+
+#endif /* CONFIG_PRESERVE_FA_DUMP */
+
+#ifdef CONFIG_PPC_PSERIES
+extern void rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node);
+#else
+static inline void
+rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { }
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+extern void opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node);
+#else
+static inline void
+opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { }
+#endif
+
+#endif /* _ASM_POWERPC_FADUMP_INTERNAL_H */
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 17d9b6acaf63..526a6a647312 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -6,196 +6,14 @@
* Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
*/
-#ifndef __PPC64_FA_DUMP_H__
-#define __PPC64_FA_DUMP_H__
+#ifndef _ASM_POWERPC_FADUMP_H
+#define _ASM_POWERPC_FADUMP_H
#ifdef CONFIG_FA_DUMP
-/*
- * The RMA region will be saved for later dumping when kernel crashes.
- * RMA is Real Mode Area, the first block of logical memory address owned
- * by logical partition, containing the storage that may be accessed with
- * translate off.
- */
-#define RMA_START 0x0
-#define RMA_END (ppc64_rma_size)
-
-/*
- * On some Power systems where RMO is 128MB, it still requires minimum of
- * 256MB for kernel to boot successfully. When kdump infrastructure is
- * configured to save vmcore over network, we run into OOM issue while
- * loading modules related to network setup. Hence we need aditional 64M
- * of memory to avoid OOM issue.
- */
-#define MIN_BOOT_MEM (((RMA_END < (0x1UL << 28)) ? (0x1UL << 28) : RMA_END) \
- + (0x1UL << 26))
-
-/* The upper limit percentage for user specified boot memory size (25%) */
-#define MAX_BOOT_MEM_RATIO 4
-
-#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
-
-/* Alignement per CMA requirement. */
-#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \
- max_t(unsigned long, MAX_ORDER - 1, pageblock_order))
-
-/* Firmware provided dump sections */
-#define FADUMP_CPU_STATE_DATA 0x0001
-#define FADUMP_HPTE_REGION 0x0002
-#define FADUMP_REAL_MODE_REGION 0x0011
-
-/* Dump request flag */
-#define FADUMP_REQUEST_FLAG 0x00000001
-
-/* FAD commands */
-#define FADUMP_REGISTER 1
-#define FADUMP_UNREGISTER 2
-#define FADUMP_INVALIDATE 3
-
-/* Dump status flag */
-#define FADUMP_ERROR_FLAG 0x2000
-
-#define FADUMP_CPU_ID_MASK ((1UL << 32) - 1)
-
-#define CPU_UNKNOWN (~((u32)0))
-
-/* Utility macros */
-#define SKIP_TO_NEXT_CPU(reg_entry) \
-({ \
- while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) \
- reg_entry++; \
- reg_entry++; \
-})
-
extern int crashing_cpu;
-/* Kernel Dump section info */
-struct fadump_section {
- __be32 request_flag;
- __be16 source_data_type;
- __be16 error_flags;
- __be64 source_address;
- __be64 source_len;
- __be64 bytes_dumped;
- __be64 destination_address;
-};
-
-/* ibm,configure-kernel-dump header. */
-struct fadump_section_header {
- __be32 dump_format_version;
- __be16 dump_num_sections;
- __be16 dump_status_flag;
- __be32 offset_first_dump_section;
-
- /* Fields for disk dump option. */
- __be32 dd_block_size;
- __be64 dd_block_offset;
- __be64 dd_num_blocks;
- __be32 dd_offset_disk_path;
-
- /* Maximum time allowed to prevent an automatic dump-reboot. */
- __be32 max_time_auto;
-};
-
-/*
- * Firmware Assisted dump memory structure. This structure is required for
- * registering future kernel dump with power firmware through rtas call.
- *
- * No disk dump option. Hence disk dump path string section is not included.
- */
-struct fadump_mem_struct {
- struct fadump_section_header header;
-
- /* Kernel dump sections */
- struct fadump_section cpu_state_data;
- struct fadump_section hpte_region;
- struct fadump_section rmr_region;
-};
-
-/* Firmware-assisted dump configuration details. */
-struct fw_dump {
- unsigned long cpu_state_data_size;
- unsigned long hpte_region_size;
- unsigned long boot_memory_size;
- unsigned long reserve_dump_area_start;
- unsigned long reserve_dump_area_size;
- /* cmd line option during boot */
- unsigned long reserve_bootvar;
-
- unsigned long fadumphdr_addr;
- unsigned long cpu_notes_buf;
- unsigned long cpu_notes_buf_size;
-
- int ibm_configure_kernel_dump;
-
- unsigned long fadump_enabled:1;
- unsigned long fadump_supported:1;
- unsigned long dump_active:1;
- unsigned long dump_registered:1;
- unsigned long nocma:1;
-};
-
-/*
- * Copy the ascii values for first 8 characters from a string into u64
- * variable at their respective indexes.
- * e.g.
- * The string "FADMPINF" will be converted into 0x4641444d50494e46
- */
-static inline u64 str_to_u64(const char *str)
-{
- u64 val = 0;
- int i;
-
- for (i = 0; i < sizeof(val); i++)
- val = (*str) ? (val << 8) | *str++ : val << 8;
- return val;
-}
-#define STR_TO_HEX(x) str_to_u64(x)
-#define REG_ID(x) str_to_u64(x)
-
-#define FADUMP_CRASH_INFO_MAGIC STR_TO_HEX("FADMPINF")
-#define REGSAVE_AREA_MAGIC STR_TO_HEX("REGSAVE")
-
-/* The firmware-assisted dump format.
- *
- * The register save area is an area in the partition's memory used to preserve
- * the register contents (CPU state data) for the active CPUs during a firmware
- * assisted dump. The dump format contains register save area header followed
- * by register entries. Each list of registers for a CPU starts with
- * "CPUSTRT" and ends with "CPUEND".
- */
-
-/* Register save area header. */
-struct fadump_reg_save_area_header {
- __be64 magic_number;
- __be32 version;
- __be32 num_cpu_offset;
-};
-
-/* Register entry. */
-struct fadump_reg_entry {
- __be64 reg_id;
- __be64 reg_value;
-};
-
-/* fadump crash info structure */
-struct fadump_crash_info_header {
- u64 magic_number;
- u64 elfcorehdr_addr;
- u32 crashing_cpu;
- struct pt_regs regs;
- struct cpumask online_mask;
-};
-
-struct fad_crash_memory_ranges {
- unsigned long long base;
- unsigned long long size;
-};
-
extern int is_fadump_memory_area(u64 addr, ulong size);
-extern int early_init_dt_scan_fw_dump(unsigned long node,
- const char *uname, int depth, void *data);
-extern int fadump_reserve_mem(void);
extern int setup_fadump(void);
extern int is_fadump_active(void);
extern int should_fadump_crash(void);
@@ -207,5 +25,11 @@ static inline int is_fadump_active(void) { return 0; }
static inline int should_fadump_crash(void) { return 0; }
static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
static inline void fadump_cleanup(void) { }
+#endif /* !CONFIG_FA_DUMP */
+
+#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
+extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
+ int depth, void *data);
+extern int fadump_reserve_mem(void);
#endif
-#endif
+#endif /* _ASM_POWERPC_FADUMP_H */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index faeca8b76c8c..ca33f4ef6cb4 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -33,7 +33,7 @@
#define FW_FEATURE_LLAN ASM_CONST(0x0000000000010000)
#define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000000020000)
#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
-#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000)
+#define FW_FEATURE_PUT_TCE_IND ASM_CONST(0x0000000000080000)
#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
@@ -50,6 +50,8 @@
#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000)
#define FW_FEATURE_BLOCK_REMOVE ASM_CONST(0x0000001000000000)
#define FW_FEATURE_PAPR_SCM ASM_CONST(0x0000002000000000)
+#define FW_FEATURE_ULTRAVISOR ASM_CONST(0x0000004000000000)
+#define FW_FEATURE_STUFF_TCE ASM_CONST(0x0000008000000000)
#ifndef __ASSEMBLY__
@@ -62,15 +64,16 @@ enum {
FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
- FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
+ FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE |
+ FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN |
FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 |
FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE |
- FW_FEATURE_PAPR_SCM,
+ FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR,
FW_FEATURE_PSERIES_ALWAYS = 0,
- FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
+ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_ULTRAVISOR,
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 0cfc365d814b..2ef155a3c821 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -15,6 +15,7 @@
#define _ASM_FIXMAP_H
#ifndef __ASSEMBLY__
+#include <linux/sizes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
@@ -63,7 +64,22 @@ enum fixed_addresses {
FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
FIX_IMMR_SIZE,
#endif
+#ifdef CONFIG_PPC_83xx
+ /* For IMMR we need an aligned 2M area */
+#define FIX_IMMR_SIZE (SZ_2M / PAGE_SIZE)
+ FIX_IMMR_START,
+ FIX_IMMR_BASE = __ALIGN_MASK(FIX_IMMR_START, FIX_IMMR_SIZE - 1) - 1 +
+ FIX_IMMR_SIZE,
+#endif
/* FIX_PCIE_MCFG, */
+ __end_of_permanent_fixed_addresses,
+
+#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
+#define FIX_BTMAPS_SLOTS 16
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
__end_of_fixed_addresses
};
@@ -71,14 +87,22 @@ enum fixed_addresses {
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
+#define FIXMAP_PAGE_IO PAGE_KERNEL_NCG
#include <asm-generic/fixmap.h>
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
- map_kernel_page(fix_to_virt(idx), phys, flags);
+ if (__builtin_constant_p(idx))
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ else if (WARN_ON(idx >= __end_of_fixed_addresses))
+ return;
+
+ map_kernel_page(__fix_to_virt(idx), phys, flags);
}
+#define __early_set_fixmap __set_fixmap
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 3dfb80b86561..f54a08a2cd70 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -8,6 +8,8 @@
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
#ifdef __ASSEMBLY__
/* Based off of objdump optput from glibc */
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 3a6aa57b9d90..bc7d9d06a6d9 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -35,7 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{
int oldval = 0, ret;
- allow_write_to_user(uaddr, sizeof(*uaddr));
+ allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
pagefault_disable();
switch (op) {
@@ -60,10 +60,9 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
pagefault_enable();
- if (!ret)
- *oval = oldval;
+ *oval = oldval;
- prevent_write_to_user(uaddr, sizeof(*uaddr));
+ prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
return ret;
}
@@ -77,7 +76,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
- allow_write_to_user(uaddr, sizeof(*uaddr));
+ allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
+
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
@@ -98,7 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "cc", "memory");
*uval = prev;
- prevent_write_to_user(uaddr, sizeof(*uaddr));
+ prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
+
return ret;
}
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index a466765709a9..2dabcf668292 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -169,47 +169,6 @@ name:
#define ABS_ADDR(label) (label - fs_label + fs_start)
-#define EXC_REAL_BEGIN(name, start, size) \
- FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
-
-#define EXC_REAL_END(name, start, size) \
- FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
-
-#define EXC_VIRT_BEGIN(name, start, size) \
- FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
-
-#define EXC_VIRT_END(name, start, size) \
- FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
-
-#define EXC_COMMON_BEGIN(name) \
- USE_TEXT_SECTION(); \
- .balign IFETCH_ALIGN_BYTES; \
- .global name; \
- _ASM_NOKPROBE_SYMBOL(name); \
- DEFINE_FIXED_SYMBOL(name); \
-name:
-
-#define TRAMP_REAL_BEGIN(name) \
- FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
-
-#define TRAMP_VIRT_BEGIN(name) \
- FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
-
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#define TRAMP_KVM_BEGIN(name) \
- TRAMP_VIRT_BEGIN(name)
-#else
-#define TRAMP_KVM_BEGIN(name)
-#endif
-
-#define EXC_REAL_NONE(start, size) \
- FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
- FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
-
-#define EXC_VIRT_NONE(start, size) \
- FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
- FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_HEAD_64_H */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 20a101046cff..bd6504c28c2f 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -31,9 +31,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
return 0;
}
-void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
- pte_t pte);
-
#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 11112023e327..e90c073e437e 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -342,6 +342,16 @@
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C
+/* Flags for H_SVM_PAGE_IN */
+#define H_PAGE_IN_SHARED 0x1
+
+/* Platform-specific hcalls used by the Ultravisor */
+#define H_SVM_PAGE_IN 0xEF00
+#define H_SVM_PAGE_OUT 0xEF04
+#define H_SVM_INIT_START 0xEF08
+#define H_SVM_INIT_DONE 0xEF0C
+#define H_SVM_INIT_ABORT 0xEF14
+
/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
#define H_SET_MODE_RESOURCE_SET_DAWR 2
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 67e2da195eae..f2f8d8aa8e3b 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -14,6 +14,7 @@ struct arch_hw_breakpoint {
unsigned long address;
u16 type;
u16 len; /* length of the target data symbol */
+ u16 hw_len; /* length programmed in hw */
};
/* Note: Don't change the the first 6 bits below as they are in the same order
@@ -33,6 +34,15 @@ struct arch_hw_breakpoint {
#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
HW_BRK_TYPE_HYP)
+#ifdef CONFIG_PPC_8xx
+#define HW_BREAKPOINT_ALIGN 0x3
+#else
+#define HW_BREAKPOINT_ALIGN 0x7
+#endif
+
+#define DABR_MAX_LEN 8
+#define DAWR_MAX_LEN 512
+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <linux/kdebug.h>
#include <asm/reg.h>
@@ -44,8 +54,6 @@ struct pmu;
struct perf_sample_data;
struct task_struct;
-#define HW_BREAKPOINT_ALIGN 0x7
-
extern int hw_breakpoint_slots(int type);
extern int arch_bp_generic_fields(int type, int *gen_bp_type);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
@@ -70,6 +78,7 @@ static inline void hw_breakpoint_disable(void)
brk.address = 0;
brk.type = 0;
brk.len = 0;
+ brk.hw_len = 0;
if (ppc_breakpoint_available())
__set_breakpoint(&brk);
}
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 32a18f2f49bc..e3a905e3d573 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -226,8 +226,8 @@ static inline bool arch_irqs_disabled(void)
#endif /* CONFIG_PPC_BOOK3S */
#ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
-#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
+#define __hard_irq_enable() wrtee(MSR_EE)
+#define __hard_irq_disable() wrtee(0)
#else
#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
@@ -280,8 +280,6 @@ extern void force_external_irq_replay(void);
#else /* CONFIG_PPC64 */
-#define SET_MSR_EE(x) mtmsr(x)
-
static inline unsigned long arch_local_save_flags(void)
{
return mfmsr();
@@ -289,47 +287,44 @@ static inline unsigned long arch_local_save_flags(void)
static inline void arch_local_irq_restore(unsigned long flags)
{
-#if defined(CONFIG_BOOKE)
- asm volatile("wrtee %0" : : "r" (flags) : "memory");
-#else
- mtmsr(flags);
-#endif
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(flags);
+ else
+ mtmsr(flags);
}
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags = arch_local_save_flags();
-#ifdef CONFIG_BOOKE
- asm volatile("wrteei 0" : : : "memory");
-#elif defined(CONFIG_PPC_8xx)
- wrtspr(SPRN_EID);
-#else
- SET_MSR_EE(flags & ~MSR_EE);
-#endif
+
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else
+ mtmsr(flags & ~MSR_EE);
+
return flags;
}
static inline void arch_local_irq_disable(void)
{
-#ifdef CONFIG_BOOKE
- asm volatile("wrteei 0" : : : "memory");
-#elif defined(CONFIG_PPC_8xx)
- wrtspr(SPRN_EID);
-#else
- arch_local_irq_save();
-#endif
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else
+ mtmsr(mfmsr() & ~MSR_EE);
}
static inline void arch_local_irq_enable(void)
{
-#ifdef CONFIG_BOOKE
- asm volatile("wrteei 1" : : : "memory");
-#elif defined(CONFIG_PPC_8xx)
- wrtspr(SPRN_EIE);
-#else
- unsigned long msr = mfmsr();
- SET_MSR_EE(msr | MSR_EE);
-#endif
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(MSR_EE);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EIE);
+ else
+ mtmsr(mfmsr() | MSR_EE);
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h
index 01567ea4ceaf..3cce499fbe27 100644
--- a/arch/powerpc/include/asm/io-workarounds.h
+++ b/arch/powerpc/include/asm/io-workarounds.h
@@ -8,6 +8,7 @@
#ifndef _IO_WORKAROUNDS_H
#define _IO_WORKAROUNDS_H
+#ifdef CONFIG_PPC_IO_WORKAROUNDS
#include <linux/io.h>
#include <asm/pci-bridge.h>
@@ -32,4 +33,23 @@ extern int spiderpci_iowa_init(struct iowa_bus *, void *);
#define SPIDER_PCI_DUMMY_READ 0x0810
#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
+#endif
+
+#if defined(CONFIG_PPC_IO_WORKAROUNDS) && defined(CONFIG_PPC_INDIRECT_MMIO)
+extern bool io_workaround_inited;
+
+static inline bool iowa_is_active(void)
+{
+ return unlikely(io_workaround_inited);
+}
+#else
+static inline bool iowa_is_active(void)
+{
+ return false;
+}
+#endif
+
+void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
+ pgprot_t prot, void *caller);
+
#endif /* _IO_WORKAROUNDS_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 23e5d5d16c7e..635969b5b58e 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -691,8 +691,6 @@ static inline void iosync(void)
* * ioremap_prot allows to specify the page flags as an argument and can
* also be hooked by the platform via ppc_md.
*
- * * ioremap_nocache is identical to ioremap
- *
* * ioremap_wc enables write combining
*
* * ioremap_wt enables write through
@@ -705,16 +703,9 @@ static inline void iosync(void)
* create hand-made mappings for use only by the PCI code and cannot
* currently be hooked. Must be page aligned.
*
- * * __ioremap is the low level implementation used by ioremap and
- * ioremap_prot and cannot be hooked (but can be used by a hook on one
- * of the previous ones)
- *
* * __ioremap_caller is the same as above but takes an explicit caller
* reference rather than using __builtin_return_address(0)
*
- * * __iounmap, is the low level implementation used by iounmap and cannot
- * be hooked (but can be used by a hook on iounmap)
- *
*/
extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
@@ -722,20 +713,20 @@ extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
void __iomem *ioremap_wt(phys_addr_t address, unsigned long size);
void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
-#define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_uc(addr, size) ioremap((addr), (size))
#define ioremap_cache(addr, size) \
ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL))
extern void iounmap(volatile void __iomem *addr);
-extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
- unsigned long flags);
+int early_ioremap_range(unsigned long ea, phys_addr_t pa,
+ unsigned long size, pgprot_t prot);
+void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
+ pgprot_t prot, void *caller);
+
extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
pgprot_t prot, void *caller);
-extern void __iounmap(volatile void __iomem *addr);
-
extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
unsigned long size, pgprot_t prot);
extern void __iounmap_at(void *ea, unsigned long size);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 18d342b815e4..350101e11ddb 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -48,15 +48,16 @@ struct iommu_table_ops {
* returns old TCE and DMA direction mask.
* @tce is a physical address.
*/
- int (*exchange)(struct iommu_table *tbl,
+ int (*xchg_no_kill)(struct iommu_table *tbl,
long index,
unsigned long *hpa,
- enum dma_data_direction *direction);
- /* Real mode */
- int (*exchange_rm)(struct iommu_table *tbl,
- long index,
- unsigned long *hpa,
- enum dma_data_direction *direction);
+ enum dma_data_direction *direction,
+ bool realmode);
+
+ void (*tce_kill)(struct iommu_table *tbl,
+ unsigned long index,
+ unsigned long pages,
+ bool realmode);
__be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
#endif
@@ -111,6 +112,8 @@ struct iommu_table {
struct iommu_table_ops *it_ops;
struct kref it_kref;
int it_nid;
+ unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */
+ unsigned long it_reserved_end;
};
#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
@@ -149,8 +152,9 @@ extern int iommu_tce_table_put(struct iommu_table *tbl);
/* Initializes an iommu_table based in values set in the passed-in
* structure
*/
-extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
- int nid);
+extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
+ int nid, unsigned long res_start, unsigned long res_end);
+
#define IOMMU_TABLE_GROUP_MAX_TABLES 2
struct iommu_table_group;
@@ -206,6 +210,12 @@ extern void iommu_del_device(struct device *dev);
extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
unsigned long entry, unsigned long *hpa,
enum dma_data_direction *direction);
+extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
+ struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction);
+extern void iommu_tce_kill(struct iommu_table *tbl,
+ unsigned long entry, unsigned long pages);
#else
static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number,
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 296e51c2f066..fbff9ff9032e 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -31,9 +31,11 @@
void kasan_early_init(void);
void kasan_mmu_init(void);
void kasan_init(void);
+void kasan_late_init(void);
#else
static inline void kasan_init(void) { }
static inline void kasan_mmu_init(void) { }
+static inline void kasan_late_init(void) { }
#endif
#endif /* __ASSEMBLY */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 5b5e39643a27..92bcd1a26d73 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -2,6 +2,16 @@
#ifndef _ASM_POWERPC_KUP_H_
#define _ASM_POWERPC_KUP_H_
+#define KUAP_READ 1
+#define KUAP_WRITE 2
+#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
+/*
+ * For prevent_user_access() only.
+ * Use the current saved situation instead of the to/from/size params.
+ * Used on book3s/32
+ */
+#define KUAP_CURRENT 4
+
#ifdef CONFIG_PPC64
#include <asm/book3s/64/kup-radix.h>
#endif
@@ -42,32 +52,55 @@ void setup_kuap(bool disabled);
#else
static inline void setup_kuap(bool disabled) { }
static inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size) { }
+ unsigned long size, unsigned long dir) { }
static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size) { }
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) { return false; }
+ unsigned long size, unsigned long dir) { }
+static inline unsigned long prevent_user_access_return(void) { return 0UL; }
+static inline void restore_user_access(unsigned long flags) { }
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ return false;
+}
#endif /* CONFIG_PPC_KUAP */
static inline void allow_read_from_user(const void __user *from, unsigned long size)
{
- allow_user_access(NULL, from, size);
+ allow_user_access(NULL, from, size, KUAP_READ);
}
static inline void allow_write_to_user(void __user *to, unsigned long size)
{
- allow_user_access(to, NULL, size);
+ allow_user_access(to, NULL, size, KUAP_WRITE);
+}
+
+static inline void allow_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ allow_user_access(to, from, size, KUAP_READ_WRITE);
}
static inline void prevent_read_from_user(const void __user *from, unsigned long size)
{
- prevent_user_access(NULL, from, size);
+ prevent_user_access(NULL, from, size, KUAP_READ);
}
static inline void prevent_write_to_user(void __user *to, unsigned long size)
{
- prevent_user_access(to, NULL, size);
+ prevent_user_access(to, NULL, size, KUAP_WRITE);
+}
+
+static inline void prevent_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ prevent_user_access(to, from, size, KUAP_READ_WRITE);
+}
+
+static inline void prevent_current_access_user(void)
+{
+ prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
}
#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_POWERPC_KUP_H_ */
+#endif /* _ASM_POWERPC_KUAP_H_ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index bb7c8cc77f1a..04b2b927bb5a 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -535,7 +535,7 @@ static inline void note_hpte_modification(struct kvm *kvm,
*/
static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
{
- return rcu_dereference_raw_notrace(kvm->memslots[0]);
+ return rcu_dereference_raw_check(kvm->memslots[0]);
}
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
new file mode 100644
index 000000000000..5a9834e0e2d1
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KVM_BOOK3S_UVMEM_H__
+#define __ASM_KVM_BOOK3S_UVMEM_H__
+
+#ifdef CONFIG_PPC_UV
+int kvmppc_uvmem_init(void);
+void kvmppc_uvmem_free(void);
+int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
+void kvmppc_uvmem_slot_free(struct kvm *kvm,
+ const struct kvm_memory_slot *slot);
+unsigned long kvmppc_h_svm_page_in(struct kvm *kvm,
+ unsigned long gra,
+ unsigned long flags,
+ unsigned long page_shift);
+unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
+ unsigned long gra,
+ unsigned long flags,
+ unsigned long page_shift);
+unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
+unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
+int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
+unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
+void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
+ struct kvm *kvm, bool skip_page_out);
+#else
+static inline int kvmppc_uvmem_init(void)
+{
+ return 0;
+}
+
+static inline void kvmppc_uvmem_free(void) { }
+
+static inline int
+kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
+{
+ return 0;
+}
+
+static inline void
+kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { }
+
+static inline unsigned long
+kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra,
+ unsigned long flags, unsigned long page_shift)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline unsigned long
+kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra,
+ unsigned long flags, unsigned long page_shift)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
+{
+ return -EFAULT;
+}
+
+static inline void
+kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
+ struct kvm *kvm, bool skip_page_out) { }
+#endif /* CONFIG_PPC_UV */
+#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index e6e5f59aaa97..6e8b8ffd06ad 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -232,11 +232,25 @@ struct revmap_entry {
};
/*
- * We use the top bit of each memslot->arch.rmap entry as a lock bit,
- * and bit 32 as a present flag. The bottom 32 bits are the
- * index in the guest HPT of a HPTE that points to the page.
+ * The rmap array of size number of guest pages is allocated for each memslot.
+ * This array is used to store usage specific information about the guest page.
+ * Below are the encodings of the various possible usage types.
*/
-#define KVMPPC_RMAP_LOCK_BIT 63
+/* Free bits which can be used to define a new usage */
+#define KVMPPC_RMAP_TYPE_MASK 0xff00000000000000
+#define KVMPPC_RMAP_NESTED 0xc000000000000000 /* Nested rmap array */
+#define KVMPPC_RMAP_HPT 0x0100000000000000 /* HPT guest */
+
+/*
+ * rmap usage definition for a hash page table (hpt) guest:
+ * 0x0000080000000000 Lock bit
+ * 0x0000018000000000 RC bits
+ * 0x0000000100000000 Present bit
+ * 0x00000000ffffffff HPT index bits
+ * The bottom 32 bits are the index in the guest HPT of a HPTE that points to
+ * the page.
+ */
+#define KVMPPC_RMAP_LOCK_BIT 43
#define KVMPPC_RMAP_RC_SHIFT 32
#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
#define KVMPPC_RMAP_PRESENT 0x100000000ul
@@ -261,6 +275,11 @@ struct kvm_hpt_info {
struct kvm_resize_hpt;
+/* Flag values for kvm_arch.secure_guest */
+#define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */
+#define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */
+#define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
+
struct kvm_arch {
unsigned int lpid;
unsigned int smt_mode; /* # vcpus per virtual core */
@@ -283,6 +302,7 @@ struct kvm_arch {
cpumask_t cpu_in_guest;
u8 radix;
u8 fwnmi_enabled;
+ u8 secure_guest;
bool threads_indep;
bool nested_enable;
pgd_t *pgtable;
@@ -315,6 +335,8 @@ struct kvm_arch {
#endif
struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ struct mutex uvmem_lock;
+ struct list_head uvmem_pfns;
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr;
int max_nested_lpid;
@@ -386,7 +408,6 @@ struct kvmppc_mmu {
u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data, bool iswrite);
- void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 2484e6a8f5ca..bc2494e5710a 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -119,8 +119,7 @@ extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
enum xlate_instdata xlid, enum xlate_readwrite xlrw,
struct kvmppc_pte *pte);
-extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
- unsigned int id);
+extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
extern int kvmppc_core_check_processor_compat(void);
@@ -271,9 +270,10 @@ struct kvmppc_ops {
union kvmppc_one_reg *val);
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
- struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
+ int (*vcpu_create)(struct kvm_vcpu *vcpu);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
int (*check_requests)(struct kvm_vcpu *vcpu);
int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
@@ -321,6 +321,7 @@ struct kvmppc_ops {
int size);
int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
int size);
+ int (*svm_off)(struct kvm *kvm);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
@@ -452,9 +453,100 @@ static inline u32 kvmppc_get_xics_latch(void)
return xirr;
}
-static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+/*
+ * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
+ * a CPU thread that's running/napping inside of a guest is by default regarded
+ * as a request to wake the CPU (if needed) and continue execution within the
+ * guest, potentially to process new state like externally-generated
+ * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
+ *
+ * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
+ * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
+ * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
+ * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
+ * the receiving side prior to processing the IPI work.
+ *
+ * NOTE:
+ *
+ * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
+ * This is to guard against sequences such as the following:
+ *
+ * CPU
+ * X: smp_muxed_ipi_set_message():
+ * X: smp_mb()
+ * X: message[RESCHEDULE] = 1
+ * X: doorbell_global_ipi(42):
+ * X: kvmppc_set_host_ipi(42)
+ * X: ppc_msgsnd_sync()/smp_mb()
+ * X: ppc_msgsnd() -> 42
+ * 42: doorbell_exception(): // from CPU X
+ * 42: ppc_msgsync()
+ * 105: smp_muxed_ipi_set_message():
+ * 105: smb_mb()
+ * // STORE DEFERRED DUE TO RE-ORDERING
+ * --105: message[CALL_FUNCTION] = 1
+ * | 105: doorbell_global_ipi(42):
+ * | 105: kvmppc_set_host_ipi(42)
+ * | 42: kvmppc_clear_host_ipi(42)
+ * | 42: smp_ipi_demux_relaxed()
+ * | 42: // returns to executing guest
+ * | // RE-ORDERED STORE COMPLETES
+ * ->105: message[CALL_FUNCTION] = 1
+ * 105: ppc_msgsnd_sync()/smp_mb()
+ * 105: ppc_msgsnd() -> 42
+ * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
+ * 105: // hangs waiting on 42 to process messages/call_single_queue
+ *
+ * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
+ * to guard against sequences such as the following (as well as to create
+ * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
+ *
+ * CPU
+ * X: smp_muxed_ipi_set_message():
+ * X: smp_mb()
+ * X: message[RESCHEDULE] = 1
+ * X: doorbell_global_ipi(42):
+ * X: kvmppc_set_host_ipi(42)
+ * X: ppc_msgsnd_sync()/smp_mb()
+ * X: ppc_msgsnd() -> 42
+ * 42: doorbell_exception(): // from CPU X
+ * 42: ppc_msgsync()
+ * // STORE DEFERRED DUE TO RE-ORDERING
+ * -- 42: kvmppc_clear_host_ipi(42)
+ * | 42: smp_ipi_demux_relaxed()
+ * | 105: smp_muxed_ipi_set_message():
+ * | 105: smb_mb()
+ * | 105: message[CALL_FUNCTION] = 1
+ * | 105: doorbell_global_ipi(42):
+ * | 105: kvmppc_set_host_ipi(42)
+ * | // RE-ORDERED STORE COMPLETES
+ * -> 42: kvmppc_clear_host_ipi(42)
+ * 42: // returns to executing guest
+ * 105: ppc_msgsnd_sync()/smp_mb()
+ * 105: ppc_msgsnd() -> 42
+ * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
+ * 105: // hangs waiting on 42 to process messages/call_single_queue
+ */
+static inline void kvmppc_set_host_ipi(int cpu)
{
- paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
+ /*
+ * order stores of IPI messages vs. setting of host_ipi flag
+ *
+ * pairs with the barrier in kvmppc_clear_host_ipi()
+ */
+ smp_mb();
+ paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
+}
+
+static inline void kvmppc_clear_host_ipi(int cpu)
+{
+ paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
+ /*
+ * order clearing of host_ipi flag vs. processing of IPI messages
+ *
+ * pairs with the barrier in kvmppc_set_host_ipi()
+ */
+ smp_mb();
}
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
@@ -486,7 +578,10 @@ static inline u32 kvmppc_get_xics_latch(void)
return 0;
}
-static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
+static inline void kvmppc_set_host_ipi(int cpu)
+{}
+
+static inline void kvmppc_clear_host_ipi(int cpu)
{}
static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
@@ -598,6 +693,7 @@ extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val);
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val);
+extern bool kvmppc_xive_native_supported(void);
#else
static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index fdd00939270b..bc4bd19b7fc2 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -17,7 +17,7 @@ typedef struct
#define LOCAL_INIT(i) { (i) }
-static __inline__ long local_read(local_t *l)
+static __inline__ long local_read(const local_t *l)
{
return READ_ONCE(l->v);
}
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index c43d6eca9edd..7bcb64444a39 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -3,9 +3,6 @@
#define _ASM_POWERPC_MACHDEP_H
#ifdef __KERNEL__
-/*
- */
-
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
@@ -31,10 +28,6 @@ struct pci_host_bridge;
struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
- void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
- pgprot_t prot, void *caller);
- void (*iounmap)(volatile void __iomem *token);
-
#ifdef CONFIG_PM
void (*iommu_save)(void);
void (*iommu_restore)(void);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index a4c6a74ad2fb..6a6ddaabdb34 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -30,6 +30,10 @@ enum MCE_Disposition {
enum MCE_Initiator {
MCE_INITIATOR_UNKNOWN = 0,
MCE_INITIATOR_CPU = 1,
+ MCE_INITIATOR_PCI = 2,
+ MCE_INITIATOR_ISA = 3,
+ MCE_INITIATOR_MEMORY= 4,
+ MCE_INITIATOR_POWERMGM = 5,
};
enum MCE_ErrorType {
@@ -41,6 +45,8 @@ enum MCE_ErrorType {
MCE_ERROR_TYPE_USER = 5,
MCE_ERROR_TYPE_RA = 6,
MCE_ERROR_TYPE_LINK = 7,
+ MCE_ERROR_TYPE_DCACHE = 8,
+ MCE_ERROR_TYPE_ICACHE = 9,
};
enum MCE_ErrorClass {
@@ -122,7 +128,8 @@ struct machine_check_event {
enum MCE_UeErrorType ue_error_type:8;
u8 effective_address_provided;
u8 physical_address_provided;
- u8 reserved_1[5];
+ u8 ignore_event;
+ u8 reserved_1[4];
u64 effective_address;
u64 physical_address;
u8 reserved_2[8];
@@ -193,6 +200,7 @@ struct mce_error_info {
enum MCE_Initiator initiator:8;
enum MCE_ErrorClass error_class:8;
bool sync_error;
+ bool ignore_event;
};
#define MAX_MC_EVT 100
diff --git a/arch/powerpc/include/asm/mem_encrypt.h b/arch/powerpc/include/asm/mem_encrypt.h
new file mode 100644
index 000000000000..ba9dab07c1be
--- /dev/null
+++ b/arch/powerpc/include/asm/mem_encrypt.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SVM helper functions
+ *
+ * Copyright 2018 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_MEM_ENCRYPT_H
+#define _ASM_POWERPC_MEM_ENCRYPT_H
+
+#include <asm/svm.h>
+
+static inline bool mem_encrypt_active(void)
+{
+ return is_secure_guest();
+}
+
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+ return is_secure_guest();
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+
+#endif /* _ASM_POWERPC_MEM_ENCRYPT_H */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index ba94ce8c22d7..0699cfeeb8c9 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -257,7 +257,7 @@ extern void radix__mmu_cleanup_all(void);
/* Functions for creating and updating partition table on POWER9 */
extern void mmu_partition_table_init(void);
extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
- unsigned long dw1);
+ unsigned long dw1, bool flush);
#endif /* CONFIG_PPC64 */
struct mm_struct;
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 58efca934311..360367c579de 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -238,11 +238,6 @@ static inline void arch_unmap(struct mm_struct *mm,
mm->context.vdso_base = 0;
}
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
- struct vm_area_struct *vma)
-{
-}
-
#ifdef CONFIG_PPC_MEM_KEYS
bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
bool execute, bool foreign);
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 1c3133b5f86a..85ed2390fb99 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -3,6 +3,7 @@
#define _ASM_POWERPC_KUP_8XX_H_
#include <asm/bug.h>
+#include <asm/mmu.h>
#ifdef CONFIG_PPC_KUAP
@@ -34,18 +35,33 @@
#include <asm/reg.h>
static inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size)
+ unsigned long size, unsigned long dir)
{
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size)
+ unsigned long size, unsigned long dir)
{
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
-static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+static inline unsigned long prevent_user_access_return(void)
+{
+ unsigned long flags = mfspr(SPRN_MD_AP);
+
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
+
+ return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+ mtspr(SPRN_MD_AP, flags);
+}
+
+static inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
"Bug: fault blocked by AP register !");
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 0284f8f5305f..60c4d829152e 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -11,8 +11,6 @@
#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
#include <asm/asm-405.h>
-extern unsigned long ioremap_bot;
-
#ifdef CONFIG_44x
extern int icache_44x_need_flush;
#endif
@@ -78,23 +76,21 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#include <asm/fixmap.h>
-#ifdef CONFIG_HIGHMEM
-#define KVIRT_TOP PKMAP_BASE
-#else
-#define KVIRT_TOP FIXADDR_START
-#endif
-
/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
* until mem_init() at which point this becomes the top of the vmalloc
* and ioremap space
*/
-#ifdef CONFIG_NOT_COHERENT_CACHE
-#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#ifdef CONFIG_HIGHMEM
+#define IOREMAP_TOP PKMAP_BASE
#else
-#define IOREMAP_TOP KVIRT_TOP
+#define IOREMAP_TOP FIXADDR_START
#endif
+/* PPC32 shares vmalloc area with ioremap */
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 16MB value just means that there will be a 64MB "hole" after the
@@ -118,7 +114,12 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
+
+#ifdef CONFIG_KASAN_VMALLOC
+#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
#define VMALLOC_END ioremap_bot
+#endif
/*
* Bits in a linux-style PTE. These match the bits in the
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index b9f66cf15c31..9a33b8bd842d 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -53,6 +53,7 @@
#define PHB_IO_BASE (ISA_IO_END)
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
+#define IOREMAP_START (ioremap_bot)
#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h
index 4c9777d256fb..b41004664312 100644
--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h
@@ -75,7 +75,6 @@
#define MAS2_E 0x00000001
#define MAS2_WIMGE_MASK 0x0000001f
#define MAS2_EPN_MASK(size) (~0 << (size + 10))
-#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
#define MAS3_RPN 0xFFFFF000
#define MAS3_U0 0x00000200
@@ -221,6 +220,16 @@
#define TLBILX_T_CLASS2 6
#define TLBILX_T_CLASS3 7
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define MAS2_M_IF_NEEDED MAS2_M
+#else
+#define MAS2_M_IF_NEEDED 0
+#endif
+
#ifndef __ASSEMBLY__
#include <asm/bug.h>
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
index 332b13b4ecdb..29c43665a753 100644
--- a/arch/powerpc/include/asm/nohash/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -46,7 +46,6 @@ static inline void pgtable_free(void *table, int shift)
#define get_hugepd_cache_index(x) (x)
-#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
unsigned long pgf = (unsigned long)table;
@@ -64,13 +63,6 @@ static inline void __tlb_remove_table(void *_table)
pgtable_free(table, shift);
}
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
-{
- pgtable_free(table, shift);
-}
-#endif
-
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 1ca1c1864b32..7fed9dc0f147 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -293,5 +293,18 @@ static inline int pgd_huge(pgd_t pgd)
#define is_hugepd(hpd) (hugepd_ok(hpd))
#endif
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ */
+#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+#else
+static inline
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 383242eb0dea..c1f25a760eb1 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -208,7 +208,13 @@
#define OPAL_HANDLE_HMI2 166
#define OPAL_NX_COPROC_INIT 167
#define OPAL_XIVE_GET_VP_STATE 170
-#define OPAL_LAST 170
+#define OPAL_MPIPL_UPDATE 173
+#define OPAL_MPIPL_REGISTER_TAG 174
+#define OPAL_MPIPL_QUERY_TAG 175
+#define OPAL_SECVAR_GET 176
+#define OPAL_SECVAR_GET_NEXT 177
+#define OPAL_SECVAR_ENQUEUE_UPDATE 178
+#define OPAL_LAST 178
#define QUIESCE_HOLD 1 /* Spin all calls at entry */
#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
@@ -453,6 +459,7 @@ enum opal_msg_type {
OPAL_MSG_DPO = 5,
OPAL_MSG_PRD = 6,
OPAL_MSG_OCC = 7,
+ OPAL_MSG_PRD2 = 8,
OPAL_MSG_TYPE_MAX,
};
@@ -1059,6 +1066,7 @@ enum {
OPAL_REBOOT_NORMAL = 0,
OPAL_REBOOT_PLATFORM_ERROR = 1,
OPAL_REBOOT_FULL_IPL = 2,
+ OPAL_REBOOT_MPIPL = 3,
};
/* Argument to OPAL_PCI_TCE_KILL */
@@ -1135,6 +1143,44 @@ enum {
#define OPAL_PCI_P2P_LOAD 0x2
#define OPAL_PCI_P2P_STORE 0x4
+/* MPIPL update operations */
+enum opal_mpipl_ops {
+ OPAL_MPIPL_ADD_RANGE = 0,
+ OPAL_MPIPL_REMOVE_RANGE = 1,
+ OPAL_MPIPL_REMOVE_ALL = 2,
+ OPAL_MPIPL_FREE_PRESERVED_MEMORY = 3,
+};
+
+/* Tag will point to various metadata area. Kernel will
+ * use tag to get metadata value.
+ */
+enum opal_mpipl_tags {
+ OPAL_MPIPL_TAG_CPU = 0,
+ OPAL_MPIPL_TAG_OPAL = 1,
+ OPAL_MPIPL_TAG_KERNEL = 2,
+ OPAL_MPIPL_TAG_BOOT_MEM = 3,
+};
+
+/* Preserved memory details */
+struct opal_mpipl_region {
+ __be64 src;
+ __be64 dest;
+ __be64 size;
+};
+
+/* Structure version */
+#define OPAL_MPIPL_VERSION 0x01
+
+struct opal_mpipl_fadump {
+ u8 version;
+ u8 reserved[7];
+ __be32 crashing_pir; /* OPAL crashing CPU PIR */
+ __be32 cpu_data_version;
+ __be32 cpu_data_size;
+ __be32 region_cnt;
+ struct opal_mpipl_region region[];
+} __packed;
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 57bd029c715e..9986ac34b8e2 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -39,6 +39,7 @@ int64_t opal_npu_spa_clear_cache(uint64_t phb_id, uint32_t bdfn,
uint64_t PE_handle);
int64_t opal_npu_tl_set(uint64_t phb_id, uint32_t bdfn, long cap,
uint64_t rate_phys, uint32_t size);
+
int64_t opal_console_write(int64_t term_number, __be64 *length,
const uint8_t *buffer);
int64_t opal_console_read(int64_t term_number, __be64 *length,
@@ -272,7 +273,7 @@ int64_t opal_xive_get_vp_info(uint64_t vp,
int64_t opal_xive_set_vp_info(uint64_t vp,
uint64_t flags,
uint64_t report_cl_pair);
-int64_t opal_xive_allocate_irq(uint32_t chip_id);
+int64_t opal_xive_allocate_irq_raw(uint32_t chip_id);
int64_t opal_xive_free_irq(uint32_t girq);
int64_t opal_xive_sync(uint32_t type, uint32_t id);
int64_t opal_xive_dump(uint32_t type, uint32_t id);
@@ -297,6 +298,17 @@ int opal_sensor_group_clear(u32 group_hndl, int token);
int opal_sensor_group_enable(u32 group_hndl, int token, bool enable);
int opal_nx_coproc_init(uint32_t chip_id, uint32_t ct);
+int opal_secvar_get(const char *key, uint64_t key_len, u8 *data,
+ uint64_t *data_size);
+int opal_secvar_get_next(const char *key, uint64_t *key_len,
+ uint64_t key_buf_size);
+int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data,
+ uint64_t data_size);
+
+s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size);
+s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr);
+s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr);
+
s64 opal_signal_system_reset(s32 cpu);
s64 opal_quiesce(u64 shutdown_type, s32 cpu);
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 0d52f57fca04..86332080399a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -209,15 +209,25 @@ static inline bool pfn_valid(unsigned long pfn)
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
-#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
+#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
#ifdef CONFIG_PPC64
/*
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ * This also results in better code generation.
*/
-#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
-#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
+#define __va(x) \
+({ \
+ VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \
+ (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
+})
+
+#define __pa(x) \
+({ \
+ VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \
+ (unsigned long)(x) & 0x0fffffffffffffffUL; \
+})
#else /* 32-bit, non book E */
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
@@ -315,17 +325,15 @@ void arch_free_page(struct page *page, int order);
struct vm_area_struct;
+extern unsigned long kernstart_virt_addr;
+
+static inline unsigned long kaslr_offset(void)
+{
+ return kernstart_virt_addr - KERNELBASE;
+}
+
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
#include <asm/slice.h>
-/*
- * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.
- */
-#ifdef CONFIG_PPC32
-#define ARCH_ZONE_DMA_BITS 30
-#else
-#define ARCH_ZONE_DMA_BITS 31
-#endif
-
#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index 683dfbc67ca8..d64dfe3ac712 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -40,6 +40,8 @@ typedef unsigned long long pte_basic_t;
typedef unsigned long pte_basic_t;
#endif
+#include <asm/bug.h>
+
/*
* Clear page using the dcbz instruction, which doesn't cause any
* memory traffic (except to write out any cache lines which get
@@ -49,6 +51,8 @@ static inline void clear_page(void *addr)
{
unsigned int i;
+ WARN_ON((unsigned long)addr & (L1_CACHE_BYTES - 1));
+
for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
dcbz(addr);
}
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 8dad1fdf4bd2..69f4cb3b7c56 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -183,6 +183,7 @@ struct iommu_table;
struct pci_dn {
int flags;
#define PCI_DN_FLAG_IOV_VF 0x01
+#define PCI_DN_FLAG_DEAD 0x02 /* Device has been hot-removed */
int busno; /* pci bus number */
int devfn; /* pci device and function number */
@@ -222,12 +223,15 @@ struct pci_dn {
extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
int devfn);
extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
-extern struct pci_dn *add_dev_pci_data(struct pci_dev *pdev);
-extern void remove_dev_pci_data(struct pci_dev *pdev);
extern struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
struct device_node *dn);
extern void pci_remove_device_node_info(struct device_node *dn);
+#ifdef CONFIG_PCI_IOV
+struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev);
+void remove_sriov_vf_pdns(struct pci_dev *pdev);
+#endif
+
static inline int pci_device_from_OF_node(struct device_node *np,
u8 *bus, u8 *devfn)
{
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 2372d35533ad..63ed7e3b0ba3 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -112,10 +112,7 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long size,
pgprot_t prot);
-#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-
extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
-extern void pcibios_setup_bus_devices(struct pci_bus *bus);
extern void pcibios_setup_bus_self(struct pci_bus *bus);
extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
extern void pcibios_scan_phb(struct pci_controller *hose);
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 2b2c60a1a66d..6dd78a2dc03a 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -64,8 +64,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) pgtable_cache[shift]
-static inline void check_pgt_cache(void) { }
-
#ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h>
#else
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index c58ba7963688..8cc543ed114c 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -68,6 +68,8 @@ extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
+extern unsigned long ioremap_bot;
+
/*
* kern_addr_valid is intended to indicate whether an address is a valid
* kernel address. Most 32-bit archs define it as always true (like this)
@@ -77,18 +79,6 @@ extern void paging_init(void);
#include <asm-generic/pgtable.h>
-
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to ensure coherency between the i-cache and d-cache
- * for the page which has just been mapped in.
- * On machines which use an MMU hash table, we use this to put a
- * corresponding HPTE into the hash table ahead of time, instead of
- * waiting for the inevitable extra hash-table miss exception.
- */
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
-
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#endif
@@ -97,7 +87,6 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned int shift);
-void pgtable_cache_init(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
void mark_initmem_nx(void);
@@ -105,12 +94,6 @@ void mark_initmem_nx(void);
static inline void mark_initmem_nx(void) { }
#endif
-#ifdef CONFIG_PPC_DEBUG_WX
-void ptdump_check_wx(void);
-#else
-static inline void ptdump_check_wx(void) { }
-#endif
-
/*
* When used, PTE_FRAG_NR is defined in subarch pgtable.h
* so we are sure it is included when arriving here.
@@ -168,13 +151,9 @@ static inline bool pgd_is_leaf(pgd_t pgd)
#define is_ioremap_addr is_ioremap_addr
static inline bool is_ioremap_addr(const void *x)
{
-#ifdef CONFIG_MMU
unsigned long addr = (unsigned long)x;
return addr >= IOREMAP_BASE && addr < IOREMAP_END;
-#else
- return false;
-#endif
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index cff5a411e595..4497c8afb573 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -340,6 +340,12 @@ static inline long plpar_set_ciabr(unsigned long ciabr)
{
return 0;
}
+
+static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
+ unsigned long *ptes)
+{
+ return 0;
+}
#endif /* CONFIG_PPC_PSERIES */
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index edcb1fc50aeb..d0ee0ede5767 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -15,6 +15,7 @@
#define PCI_SLOT_ID_PREFIX (1UL << 63)
#define PCI_SLOT_ID(phb_id, bdfn) \
(PCI_SLOT_ID_PREFIX | ((uint64_t)(bdfn) << 16) | (phb_id))
+#define PCI_PHB_SLOT_ID(phb_id) (phb_id)
extern int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id);
extern int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len);
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index cec2d6409515..7f4be5a05eb3 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -62,11 +62,6 @@ void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
void eeh_sysfs_add_device(struct pci_dev *pdev);
void eeh_sysfs_remove_device(struct pci_dev *pdev);
-static inline const char *eeh_pci_name(struct pci_dev *pdev)
-{
- return pdev ? pci_name(pdev) : "<null>";
-}
-
static inline const char *eeh_driver_name(struct pci_dev *pdev)
{
return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
@@ -74,6 +69,8 @@ static inline const char *eeh_driver_name(struct pci_dev *pdev)
#endif /* CONFIG_EEH */
+#define PCI_BUSNO(bdfn) ((bdfn >> 8) & 0xff)
+
#else /* CONFIG_PCI */
static inline void init_pci_config_tokens(void) { }
#endif /* !CONFIG_PCI */
diff --git a/arch/powerpc/include/asm/ppc4xx_ocm.h b/arch/powerpc/include/asm/ppc4xx_ocm.h
deleted file mode 100644
index fc4db6dcde84..000000000000
--- a/arch/powerpc/include/asm/ppc4xx_ocm.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * PowerPC 4xx OCM memory allocation support
- *
- * (C) Copyright 2009, Applied Micro Circuits Corporation
- * Victor Gallardo (vgallardo@amcc.com)
- *
- * See file CREDITS for list of people who contributed to this
- * project.
- */
-
-#ifndef __ASM_POWERPC_PPC4XX_OCM_H__
-#define __ASM_POWERPC_PPC4XX_OCM_H__
-
-#define PPC4XX_OCM_NON_CACHED 0
-#define PPC4XX_OCM_CACHED 1
-
-#if defined(CONFIG_PPC4xx_OCM)
-
-void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
- int flags, const char *owner);
-void ppc4xx_ocm_free(const void *virt);
-
-#else
-
-#define ppc4xx_ocm_alloc(phys, size, align, flags, owner) NULL
-#define ppc4xx_ocm_free(addr) ((void)0)
-
-#endif /* CONFIG_PPC4xx_OCM */
-
-#endif /* __ASM_POWERPC_PPC4XX_OCM_H__ */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index e0637730a8e7..6b03dff61a05 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -311,18 +311,48 @@ n:
addis reg,reg,(name - 0b)@ha; \
addi reg,reg,(name - 0b)@l;
-#ifdef __powerpc64__
-#ifdef HAVE_AS_ATHIGH
+#if defined(__powerpc64__) && defined(HAVE_AS_ATHIGH)
#define __AS_ATHIGH high
#else
#define __AS_ATHIGH h
#endif
-#define LOAD_REG_IMMEDIATE(reg,expr) \
- lis reg,(expr)@highest; \
- ori reg,reg,(expr)@higher; \
- rldicr reg,reg,32,31; \
- oris reg,reg,(expr)@__AS_ATHIGH; \
- ori reg,reg,(expr)@l;
+
+.macro __LOAD_REG_IMMEDIATE_32 r, x
+ .if (\x) >= 0x8000 || (\x) < -0x8000
+ lis \r, (\x)@__AS_ATHIGH
+ .if (\x) & 0xffff != 0
+ ori \r, \r, (\x)@l
+ .endif
+ .else
+ li \r, (\x)@l
+ .endif
+.endm
+
+.macro __LOAD_REG_IMMEDIATE r, x
+ .if (\x) >= 0x80000000 || (\x) < -0x80000000
+ __LOAD_REG_IMMEDIATE_32 \r, (\x) >> 32
+ sldi \r, \r, 32
+ .if (\x) & 0xffff0000 != 0
+ oris \r, \r, (\x)@__AS_ATHIGH
+ .endif
+ .if (\x) & 0xffff != 0
+ ori \r, \r, (\x)@l
+ .endif
+ .else
+ __LOAD_REG_IMMEDIATE_32 \r, \x
+ .endif
+.endm
+
+#ifdef __powerpc64__
+
+#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr
+
+#define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \
+ lis tmp, (expr)@highest; \
+ lis reg, (expr)@__AS_ATHIGH; \
+ ori tmp, tmp, (expr)@higher; \
+ ori reg, reg, (expr)@l; \
+ rldimi reg, tmp, 32, 0
#define LOAD_REG_ADDR(reg,name) \
ld reg,name@got(r2)
@@ -335,11 +365,13 @@ n:
#else /* 32-bit */
-#define LOAD_REG_IMMEDIATE(reg,expr) \
+#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr
+
+#define LOAD_REG_IMMEDIATE_SYM(reg,expr) \
lis reg,(expr)@ha; \
addi reg,reg,(expr)@l;
-#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name)
+#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE_SYM(reg, name)
#define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha
#define ADDROFF(name) name@l
@@ -351,19 +383,9 @@ n:
/* various errata or part fixups */
#ifdef CONFIG_PPC601_SYNC_FIX
-#define SYNC \
-BEGIN_FTR_SECTION \
- sync; \
- isync; \
-END_FTR_SECTION_IFSET(CPU_FTR_601)
-#define SYNC_601 \
-BEGIN_FTR_SECTION \
- sync; \
-END_FTR_SECTION_IFSET(CPU_FTR_601)
-#define ISYNC_601 \
-BEGIN_FTR_SECTION \
- isync; \
-END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define SYNC sync; isync
+#define SYNC_601 sync
+#define ISYNC_601 isync
#else
#define SYNC
#define SYNC_601
@@ -389,15 +411,11 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#define MFTBU(dest) mfspr dest, SPRN_TBRU
#endif
-#ifndef CONFIG_SMP
-#define TLBSYNC
-#else /* CONFIG_SMP */
/* tlbsync is not implemented on 601 */
-#define TLBSYNC \
-BEGIN_FTR_SECTION \
- tlbsync; \
- sync; \
-END_FTR_SECTION_IFCLR(CPU_FTR_601)
+#if !defined(CONFIG_SMP) || defined(CONFIG_PPC_BOOK3S_601)
+#define TLBSYNC
+#else
+#define TLBSYNC tlbsync; sync
#endif
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a9993e7a443b..8387698bd5b6 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -163,6 +163,12 @@ struct thread_struct {
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
unsigned long kuap; /* opened segments for user access */
#endif
+#ifdef CONFIG_VMAP_STACK
+ unsigned long srr0;
+ unsigned long srr1;
+ unsigned long dar;
+ unsigned long dsisr;
+#endif
/* Debug Registers */
struct debug_reg debug;
struct thread_fp_state fp_state;
@@ -412,6 +418,9 @@ static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
+#ifdef CONFIG_PPC_970_NAP
+extern void power4_idle_nap(void);
+#endif
extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index feee1b21bbd5..ee3ada66deb5 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -203,7 +203,11 @@ do { \
#endif /* __powerpc64__ */
#define arch_has_single_step() (1)
-#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601))
+#ifndef CONFIG_BOOK3S_601
+#define arch_has_block_step() (true)
+#else
+#define arch_has_block_step() (false)
+#endif
#define ARCH_HAS_USER_SINGLE_STEP_REPORT
/*
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 10caa145f98b..1aa46dff0957 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -25,9 +25,7 @@
#include <asm/reg_fsl_emb.h>
#endif
-#ifdef CONFIG_PPC_8xx
#include <asm/reg_8xx.h>
-#endif /* CONFIG_PPC_8xx */
#define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
@@ -38,6 +36,7 @@
#define MSR_TM_LG 32 /* Trans Mem Available */
#define MSR_VEC_LG 25 /* Enable AltiVec */
#define MSR_VSX_LG 23 /* Enable VSX */
+#define MSR_S_LG 22 /* Secure state */
#define MSR_POW_LG 18 /* Enable Power Management */
#define MSR_WE_LG 18 /* Wait State Enable */
#define MSR_TGPR_LG 17 /* TLB Update registers in use */
@@ -71,11 +70,13 @@
#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
+#define MSR_S __MASK(MSR_S_LG) /* Secure state */
#else
/* so tests for these bits fail on 32-bit */
#define MSR_SF 0
#define MSR_ISF 0
#define MSR_HV 0
+#define MSR_S 0
#endif
/*
@@ -472,9 +473,10 @@
#define HMER_DEBUG_TRIG (1ul << (63 - 17)) /* Debug trigger */
#define SPRN_HMEER 0x151 /* Hyp maintenance exception enable reg */
#define SPRN_PCR 0x152 /* Processor compatibility register */
-#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
-#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
-#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
+#define PCR_VEC_DIS (__MASK(63-0)) /* Vec. disable (bit NA since POWER8) */
+#define PCR_VSX_DIS (__MASK(63-1)) /* VSX disable (bit NA since POWER8) */
+#define PCR_TM_DIS (__MASK(63-2)) /* Trans. memory disable (POWER8) */
+#define PCR_HIGH_BITS (PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS)
/*
* These bits are used in the function kvmppc_set_arch_compat() to specify and
* determine both the compatibility level which we want to emulate and the
@@ -483,6 +485,8 @@
#define PCR_ARCH_207 0x8 /* Architecture 2.07 */
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
+#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205)
+#define PCR_MASK ~(PCR_HIGH_BITS | PCR_LOW_BITS) /* PCR Reserved Bits */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
@@ -742,6 +746,18 @@
#define SPRN_USPRG7 0x107 /* SPRG7 userspace read */
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * Bits loaded from MSR upon interrupt.
+ * PPC (64-bit) bits 33-36,42-47 are interrupt dependent, the others are
+ * loaded from MSR. The exception is that SRESET and MCE do not always load
+ * bit 62 (RI) from MSR. Don't use PPC_BITMASK for this because 32-bit uses
+ * it.
+ */
+#define SRR1_MSR_BITS (~0x783f0000UL)
+#endif
+
#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
@@ -1364,6 +1380,14 @@ static inline void mtmsr_isync(unsigned long val)
#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \
: : "memory")
+static inline void wrtee(unsigned long val)
+{
+ if (__builtin_constant_p(val))
+ asm volatile("wrteei %0" : : "i" ((val & MSR_EE) ? 1 : 0) : "memory");
+ else
+ asm volatile("wrtee %0" : : "r" (val) : "memory");
+}
+
extern unsigned long msr_check_and_set(unsigned long bits);
extern bool strict_msr_control;
extern void __msr_check_and_clear(unsigned long bits);
@@ -1378,19 +1402,9 @@ static inline void msr_check_and_clear(unsigned long bits)
#define mftb() ({unsigned long rval; \
asm volatile( \
"90: mfspr %0, %2;\n" \
- "97: cmpwi %0,0;\n" \
- " beq- 90b;\n" \
- "99:\n" \
- ".section __ftr_fixup,\"a\"\n" \
- ".align 3\n" \
- "98:\n" \
- " .8byte %1\n" \
- " .8byte %1\n" \
- " .8byte 97b-98b\n" \
- " .8byte 99b-98b\n" \
- " .8byte 0\n" \
- " .8byte 0\n" \
- ".previous" \
+ ASM_FTR_IFSET( \
+ "97: cmpwi %0,0;\n" \
+ " beq- 90b;\n", "", %1) \
: "=r" (rval) \
: "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
rval;})
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 7192eece6c3e..299ee7be0f67 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -5,8 +5,6 @@
#ifndef _ASM_POWERPC_REG_8xx_H
#define _ASM_POWERPC_REG_8xx_H
-#include <asm/mmu.h>
-
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
*/
@@ -37,8 +35,24 @@
#define SPRN_CMPE 152
#define SPRN_CMPF 153
#define SPRN_LCTRL1 156
+#define LCTRL1_CTE_GT 0xc0000000
+#define LCTRL1_CTF_LT 0x14000000
+#define LCTRL1_CRWE_RW 0x00000000
+#define LCTRL1_CRWE_RO 0x00040000
+#define LCTRL1_CRWE_WO 0x000c0000
+#define LCTRL1_CRWF_RW 0x00000000
+#define LCTRL1_CRWF_RO 0x00010000
+#define LCTRL1_CRWF_WO 0x00030000
#define SPRN_LCTRL2 157
+#define LCTRL2_LW0EN 0x80000000
+#define LCTRL2_LW0LA_E 0x00000000
+#define LCTRL2_LW0LA_F 0x04000000
+#define LCTRL2_LW0LA_EandF 0x08000000
+#define LCTRL2_LW0LADC 0x02000000
+#define LCTRL2_SLW0EN 0x00000002
+#ifdef CONFIG_PPC_8xx
#define SPRN_ICTRL 158
+#endif
#define SPRN_BAR 159
/* Commands. Only the first few are available to the instruction cache.
diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h
deleted file mode 100644
index 08c44396e54a..000000000000
--- a/arch/powerpc/include/asm/scom.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2010 Benjamin Herrenschmidt, IBM Corp
- * <benh@kernel.crashing.org>
- * and David Gibson, IBM Corporation.
- */
-
-#ifndef _ASM_POWERPC_SCOM_H
-#define _ASM_POWERPC_SCOM_H
-
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_PPC_SCOM
-
-/*
- * The SCOM bus is a sideband bus used for accessing various internal
- * registers of the processor or the chipset. The implementation details
- * differ between processors and platforms, and the access method as
- * well.
- *
- * This API allows to "map" ranges of SCOM register numbers associated
- * with a given SCOM controller. The later must be represented by a
- * device node, though some implementations might support NULL if there
- * is no possible ambiguity
- *
- * Then, scom_read/scom_write can be used to accesses registers inside
- * that range. The argument passed is a register number relative to
- * the beginning of the range mapped.
- */
-
-typedef void *scom_map_t;
-
-/* Value for an invalid SCOM map */
-#define SCOM_MAP_INVALID (NULL)
-
-/* The scom_controller data structure is what the platform passes
- * to the core code in scom_init, it provides the actual implementation
- * of all the SCOM functions
- */
-struct scom_controller {
- scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
- void (*unmap)(scom_map_t map);
-
- int (*read)(scom_map_t map, u64 reg, u64 *value);
- int (*write)(scom_map_t map, u64 reg, u64 value);
-};
-
-extern const struct scom_controller *scom_controller;
-
-/**
- * scom_init - Initialize the SCOM backend, called by the platform
- * @controller: The platform SCOM controller
- */
-static inline void scom_init(const struct scom_controller *controller)
-{
- scom_controller = controller;
-}
-
-/**
- * scom_map_ok - Test is a SCOM mapping is successful
- * @map: The result of scom_map to test
- */
-static inline int scom_map_ok(scom_map_t map)
-{
- return map != SCOM_MAP_INVALID;
-}
-
-/**
- * scom_map - Map a block of SCOM registers
- * @ctrl_dev: Device node of the SCOM controller
- * some implementations allow NULL here
- * @reg: first SCOM register to map
- * @count: Number of SCOM registers to map
- */
-
-static inline scom_map_t scom_map(struct device_node *ctrl_dev,
- u64 reg, u64 count)
-{
- return scom_controller->map(ctrl_dev, reg, count);
-}
-
-/**
- * scom_find_parent - Find the SCOM controller for a device
- * @dev: OF node of the device
- *
- * This is not meant for general usage, but in combination with
- * scom_map() allows to map registers not represented by the
- * device own scom-reg property. Useful for applying HW workarounds
- * on things not properly represented in the device-tree for example.
- */
-struct device_node *scom_find_parent(struct device_node *dev);
-
-
-/**
- * scom_map_device - Map a device's block of SCOM registers
- * @dev: OF node of the device
- * @index: Register bank index (index in "scom-reg" property)
- *
- * This function will use the device-tree binding for SCOM which
- * is to follow "scom-parent" properties until it finds a node with
- * a "scom-controller" property to find the controller. It will then
- * use the "scom-reg" property which is made of reg/count pairs,
- * each of them having a size defined by the controller's #scom-cells
- * property
- */
-extern scom_map_t scom_map_device(struct device_node *dev, int index);
-
-
-/**
- * scom_unmap - Unmap a block of SCOM registers
- * @map: Result of scom_map is to be unmapped
- */
-static inline void scom_unmap(scom_map_t map)
-{
- if (scom_map_ok(map))
- scom_controller->unmap(map);
-}
-
-/**
- * scom_read - Read a SCOM register
- * @map: Result of scom_map
- * @reg: Register index within that map
- * @value: Updated with the value read
- *
- * Returns 0 (success) or a negative error code
- */
-static inline int scom_read(scom_map_t map, u64 reg, u64 *value)
-{
- int rc;
-
- rc = scom_controller->read(map, reg, value);
- if (rc)
- *value = 0xfffffffffffffffful;
- return rc;
-}
-
-/**
- * scom_write - Write to a SCOM register
- * @map: Result of scom_map
- * @reg: Register index within that map
- * @value: Value to write
- *
- * Returns 0 (success) or a negative error code
- */
-static inline int scom_write(scom_map_t map, u64 reg, u64 value)
-{
- return scom_controller->write(map, reg, value);
-}
-
-
-#endif /* CONFIG_PPC_SCOM */
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_SCOM_H */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4a1664a8658d..d19871763ed4 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -5,8 +5,22 @@
#include <linux/elf.h>
#include <linux/uaccess.h>
+
+#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+
#include <asm-generic/sections.h>
+extern bool init_mem_is_free;
+
+static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+{
+ if (!init_mem_is_free)
+ return 0;
+
+ return addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end;
+}
+
extern char __head_end[];
#ifdef __powerpc64__
@@ -61,17 +75,6 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
(unsigned long)_stext < end;
}
-static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
-{
-#ifdef CONFIG_KVM_GUEST
- extern char kvm_tmp[];
- return start < (unsigned long)kvm_tmp &&
- (unsigned long)&kvm_tmp[1024 * 1024] < end;
-#else
- return 0;
-#endif
-}
-
#ifdef PPC64_ELF_ABI_v1
#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1
diff --git a/arch/powerpc/include/asm/secure_boot.h b/arch/powerpc/include/asm/secure_boot.h
new file mode 100644
index 000000000000..a2ff556916c6
--- /dev/null
+++ b/arch/powerpc/include/asm/secure_boot.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Secure boot definitions
+ *
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ */
+#ifndef _ASM_POWER_SECURE_BOOT_H
+#define _ASM_POWER_SECURE_BOOT_H
+
+#ifdef CONFIG_PPC_SECURE_BOOT
+
+bool is_ppc_secureboot_enabled(void);
+bool is_ppc_trustedboot_enabled(void);
+
+#else
+
+static inline bool is_ppc_secureboot_enabled(void)
+{
+ return false;
+}
+
+static inline bool is_ppc_trustedboot_enabled(void)
+{
+ return false;
+}
+
+#endif
+#endif
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 759597bf0fd8..7c05e95a5c44 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -9,7 +9,7 @@
#define _ASM_POWERPC_SECURITY_FEATURES_H
-extern unsigned long powerpc_security_features;
+extern u64 powerpc_security_features;
extern bool rfi_flush;
/* These are bit flags */
@@ -24,17 +24,17 @@ void setup_stf_barrier(void);
void do_stf_barrier_fixups(enum stf_barrier_type types);
void setup_count_cache_flush(void);
-static inline void security_ftr_set(unsigned long feature)
+static inline void security_ftr_set(u64 feature)
{
powerpc_security_features |= feature;
}
-static inline void security_ftr_clear(unsigned long feature)
+static inline void security_ftr_clear(u64 feature)
{
powerpc_security_features &= ~feature;
}
-static inline bool security_ftr_enabled(unsigned long feature)
+static inline bool security_ftr_enabled(u64 feature)
{
return !!(powerpc_security_features & feature);
}
@@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
// Software required to flush count cache on context switch
#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
+// Software required to flush link stack on context switch
+#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
diff --git a/arch/powerpc/include/asm/secvar.h b/arch/powerpc/include/asm/secvar.h
new file mode 100644
index 000000000000..4cc35b58b986
--- /dev/null
+++ b/arch/powerpc/include/asm/secvar.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ *
+ * PowerPC secure variable operations.
+ */
+#ifndef SECVAR_OPS_H
+#define SECVAR_OPS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+extern const struct secvar_operations *secvar_ops;
+
+struct secvar_operations {
+ int (*get)(const char *key, uint64_t key_len, u8 *data,
+ uint64_t *data_size);
+ int (*get_next)(const char *key, uint64_t *key_len,
+ uint64_t keybufsize);
+ int (*set)(const char *key, uint64_t key_len, u8 *data,
+ uint64_t data_size);
+};
+
+#ifdef CONFIG_PPC_SECURE_BOOT
+
+extern void set_secvar_ops(const struct secvar_operations *ops);
+
+#else
+
+static inline void set_secvar_ops(const struct secvar_operations *ops) { }
+
+#endif
+
+#endif
diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h
index d995061f5f86..e9f81bb3f83b 100644
--- a/arch/powerpc/include/asm/setjmp.h
+++ b/arch/powerpc/include/asm/setjmp.h
@@ -7,7 +7,7 @@
#define JMP_BUF_LEN 23
-extern long setjmp(long *);
-extern void longjmp(long *, long);
+extern long setjmp(long *) __attribute__((returns_twice));
+extern void longjmp(long *, long) __attribute__((noreturn));
#endif /* _ASM_POWERPC_SETJMP_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index a47f827bc5f1..860228e917dc 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -15,6 +15,7 @@
*
* (the type definitions are in asm/spinlock_types.h)
*/
+#include <linux/jump_label.h>
#include <linux/irqflags.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -36,10 +37,12 @@
#endif
#ifdef CONFIG_PPC_PSERIES
+DECLARE_STATIC_KEY_FALSE(shared_processor);
+
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
{
- if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ if (!static_branch_unlikely(&shared_processor))
return false;
return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
}
@@ -101,14 +104,37 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
-extern void __spin_yield(arch_spinlock_t *lock);
-extern void __rw_yield(arch_rwlock_t *lock);
+void splpar_spin_yield(arch_spinlock_t *lock);
+void splpar_rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
-#define SHARED_PROCESSOR 0
+static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
+static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
+#endif
+
+static inline bool is_shared_processor(void)
+{
+#ifdef CONFIG_PPC_SPLPAR
+ return static_branch_unlikely(&shared_processor);
+#else
+ return false;
#endif
+}
+
+static inline void spin_yield(arch_spinlock_t *lock)
+{
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ else
+ barrier();
+}
+
+static inline void rw_yield(arch_rwlock_t *lock)
+{
+ if (is_shared_processor())
+ splpar_rw_yield(lock);
+ else
+ barrier();
+}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
@@ -117,8 +143,8 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
break;
do {
HMT_low();
- if (SHARED_PROCESSOR)
- __spin_yield(lock);
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
} while (unlikely(lock->slock != 0));
HMT_medium();
}
@@ -136,8 +162,8 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
local_irq_restore(flags);
do {
HMT_low();
- if (SHARED_PROCESSOR)
- __spin_yield(lock);
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
} while (unlikely(lock->slock != 0));
HMT_medium();
local_irq_restore(flags_dis);
@@ -226,8 +252,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
break;
do {
HMT_low();
- if (SHARED_PROCESSOR)
- __rw_yield(rw);
+ if (is_shared_processor())
+ splpar_rw_yield(rw);
} while (unlikely(rw->lock < 0));
HMT_medium();
}
@@ -240,8 +266,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
break;
do {
HMT_low();
- if (SHARED_PROCESSOR)
- __rw_yield(rw);
+ if (is_shared_processor())
+ splpar_rw_yield(rw);
} while (unlikely(rw->lock != 0));
HMT_medium();
}
@@ -281,9 +307,9 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
rw->lock = 0;
}
-#define arch_spin_relax(lock) __spin_yield(lock)
-#define arch_read_relax(lock) __rw_yield(lock)
-#define arch_write_relax(lock) __rw_yield(lock)
+#define arch_spin_relax(lock) spin_yield(lock)
+#define arch_read_relax(lock) rw_yield(lock)
+#define arch_write_relax(lock) rw_yield(lock)
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index 9bf6dffb4090..b72692702f35 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -53,7 +53,9 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
#ifndef CONFIG_KASAN
#define __HAVE_ARCH_MEMSET32
#define __HAVE_ARCH_MEMSET64
+#define __HAVE_ARCH_MEMCPY_MCSAFE
+extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz);
extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
new file mode 100644
index 000000000000..85580b30aba4
--- /dev/null
+++ b/arch/powerpc/include/asm/svm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SVM helper functions
+ *
+ * Copyright 2018 Anshuman Khandual, IBM Corporation.
+ */
+
+#ifndef _ASM_POWERPC_SVM_H
+#define _ASM_POWERPC_SVM_H
+
+#ifdef CONFIG_PPC_SVM
+
+static inline bool is_secure_guest(void)
+{
+ return mfmsr() & MSR_S;
+}
+
+void dtl_cache_ctor(void *addr);
+#define get_dtl_cache_ctor() (is_secure_guest() ? dtl_cache_ctor : NULL)
+
+#else /* CONFIG_PPC_SVM */
+
+static inline bool is_secure_guest(void)
+{
+ return false;
+}
+
+#define get_dtl_cache_ctor() NULL
+
+#endif /* CONFIG_PPC_SVM */
+#endif /* _ASM_POWERPC_SVM_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 8e1d0195ac36..a2270749b282 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -10,13 +10,31 @@
#define _ASM_POWERPC_THREAD_INFO_H
#include <asm/asm-const.h>
+#include <asm/page.h>
#ifdef __KERNEL__
+#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
+#define THREAD_SHIFT PAGE_SHIFT
+#else
#define THREAD_SHIFT CONFIG_THREAD_SHIFT
+#endif
#define THREAD_SIZE (1 << THREAD_SHIFT)
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN_SHIFT (THREAD_SHIFT + 1)
+#else
+#define THREAD_ALIGN_SHIFT THREAD_SHIFT
+#endif
+
+#define THREAD_ALIGN (1 << THREAD_ALIGN_SHIFT)
+
#ifndef __ASSEMBLY__
#include <linux/cache.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 54f4ec1f9fab..08dbe3e6831c 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -41,11 +41,7 @@ struct div_result {
/* Accessor functions for the timebase (RTC on 601) registers. */
/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
-#ifdef CONFIG_PPC_BOOK3S_32
-#define __USE_RTC() (cpu_has_feature(CPU_FTR_USE_RTC))
-#else
-#define __USE_RTC() 0
-#endif
+#define __USE_RTC() (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index 926b9f91a3ef..d2d2c4bd8435 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -17,38 +17,10 @@ typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
-#ifdef __powerpc64__
+ if (IS_ENABLED(CONFIG_BOOK3S_601))
+ return 0;
+
return mftb();
-#else
- cycles_t ret;
-
- /*
- * For the "cycle" counter we use the timebase lower half.
- * Currently only used on SMP.
- */
-
- ret = 0;
-
- __asm__ __volatile__(
-#ifdef CONFIG_PPC_8xx
- "97: mftb %0\n"
-#else
- "97: mfspr %0, %2\n"
-#endif
- "99:\n"
- ".section __ftr_fixup,\"a\"\n"
- ".align 2\n"
- "98:\n"
- " .long %1\n"
- " .long 0\n"
- " .long 97b-98b\n"
- " .long 99b-98b\n"
- " .long 0\n"
- " .long 0\n"
- ".previous"
- : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
- return ret;
-#endif
}
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index b2c0be93929d..7f3a8b902325 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -26,6 +26,17 @@
#define tlb_flush tlb_flush
extern void tlb_flush(struct mmu_gather *tlb);
+/*
+ * book3s:
+ * Hash does not use the linux page-tables, so we can avoid
+ * the TLB invalidate for page-table freeing, Radix otoh does use the
+ * page-tables and needs the TLBI.
+ *
+ * nohash:
+ * We still do TLB invalidate in the __pte_free_tlb routine before we
+ * add the page table pages to mmu gather table batch.
+ */
+#define tlb_needs_table_invalidate() radix_enabled()
/* Get the generic bits... */
#include <asm-generic/tlb.h>
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 8b03eb44e876..2f500debae21 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -91,9 +91,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#define __get_user(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
#define __put_user(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
+
+#define __get_user_allowed(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
+#define __put_user_allowed(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
#define __get_user_inatomic(x, ptr) \
__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
@@ -138,10 +143,9 @@ extern long __put_user_bad(void);
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
#endif /* __powerpc64__ */
-#define __put_user_size(x, ptr, size, retval) \
+#define __put_user_size_allowed(x, ptr, size, retval) \
do { \
retval = 0; \
- allow_write_to_user(ptr, size); \
switch (size) { \
case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
@@ -149,17 +153,26 @@ do { \
case 8: __put_user_asm2(x, ptr, retval); break; \
default: __put_user_bad(); \
} \
+} while (0)
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ allow_write_to_user(ptr, size); \
+ __put_user_size_allowed(x, ptr, size, retval); \
prevent_write_to_user(ptr, size); \
} while (0)
-#define __put_user_nocheck(x, ptr, size) \
+#define __put_user_nocheck(x, ptr, size, do_allow) \
({ \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (!is_kernel_addr((unsigned long)__pu_addr)) \
might_fault(); \
__chk_user_ptr(ptr); \
- __put_user_size((x), __pu_addr, (size), __pu_err); \
+ if (do_allow) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ else \
+ __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \
__pu_err; \
})
@@ -236,13 +249,12 @@ extern long __get_user_bad(void);
: "b" (addr), "i" (-EFAULT), "0" (err))
#endif /* __powerpc64__ */
-#define __get_user_size(x, ptr, size, retval) \
+#define __get_user_size_allowed(x, ptr, size, retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
if (size > sizeof(x)) \
(x) = __get_user_bad(); \
- allow_read_from_user(ptr, size); \
switch (size) { \
case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
@@ -250,6 +262,12 @@ do { \
case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \
} \
+} while (0)
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ allow_read_from_user(ptr, size); \
+ __get_user_size_allowed(x, ptr, size, retval); \
prevent_read_from_user(ptr, size); \
} while (0)
@@ -260,7 +278,7 @@ do { \
#define __long_type(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-#define __get_user_nocheck(x, ptr, size) \
+#define __get_user_nocheck(x, ptr, size, do_allow) \
({ \
long __gu_err; \
__long_type(*(ptr)) __gu_val; \
@@ -269,7 +287,10 @@ do { \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
barrier_nospec(); \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ if (do_allow) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ else \
+ __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
@@ -313,9 +334,9 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
unsigned long ret;
barrier_nospec();
- allow_user_access(to, from, n);
+ allow_read_write_user(to, from, n);
ret = __copy_tofrom_user(to, from, n);
- prevent_user_access(to, from, n);
+ prevent_read_write_user(to, from, n);
return ret;
}
#endif /* __powerpc64__ */
@@ -356,38 +377,59 @@ static inline unsigned long raw_copy_from_user(void *to,
return ret;
}
-static inline unsigned long raw_copy_to_user(void __user *to,
- const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
{
- unsigned long ret;
if (__builtin_constant_p(n) && (n <= 8)) {
- ret = 1;
+ unsigned long ret = 1;
switch (n) {
case 1:
- __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
+ __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
break;
case 2:
- __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
+ __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
break;
case 4:
- __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
+ __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
break;
case 8:
- __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
+ __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
break;
}
if (ret == 0)
return 0;
}
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long ret;
+
allow_write_to_user(to, n);
- ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
+ ret = raw_copy_to_user_allowed(to, from, n);
prevent_write_to_user(to, n);
return ret;
}
-extern unsigned long __clear_user(void __user *addr, unsigned long size);
+static __always_inline unsigned long __must_check
+copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
+{
+ if (likely(check_copy_size(from, n, true))) {
+ if (access_ok(to, n)) {
+ allow_write_to_user(to, n);
+ n = memcpy_mcsafe((void *)to, from, n);
+ prevent_write_to_user(to, n);
+ }
+ }
+
+ return n;
+}
+
+unsigned long __arch_clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
@@ -395,12 +437,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
might_fault();
if (likely(access_ok(addr, size))) {
allow_write_to_user(addr, size);
- ret = __clear_user(addr, size);
+ ret = __arch_clear_user(addr, size);
prevent_write_to_user(addr, size);
}
return ret;
}
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
+{
+ return clear_user(addr, size);
+}
+
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
@@ -409,4 +456,22 @@ extern long __copy_from_user_flushcache(void *dst, const void __user *src,
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len);
+static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
+ allow_read_write_user((void __user *)ptr, ptr, len);
+ return true;
+}
+#define user_access_begin user_access_begin
+#define user_access_end prevent_current_access_user
+#define user_access_save prevent_user_access_return
+#define user_access_restore restore_user_access
+
+#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
+#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
+#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
+#define unsafe_copy_to_user(d, s, l, e) \
+ unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
+
#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/asm/ultravisor-api.h b/arch/powerpc/include/asm/ultravisor-api.h
new file mode 100644
index 000000000000..b66f6db7be6c
--- /dev/null
+++ b/arch/powerpc/include/asm/ultravisor-api.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Ultravisor API.
+ *
+ * Copyright 2019, IBM Corporation.
+ *
+ */
+#ifndef _ASM_POWERPC_ULTRAVISOR_API_H
+#define _ASM_POWERPC_ULTRAVISOR_API_H
+
+#include <asm/hvcall.h>
+
+/* Return codes */
+#define U_BUSY H_BUSY
+#define U_FUNCTION H_FUNCTION
+#define U_NOT_AVAILABLE H_NOT_AVAILABLE
+#define U_P2 H_P2
+#define U_P3 H_P3
+#define U_P4 H_P4
+#define U_P5 H_P5
+#define U_PARAMETER H_PARAMETER
+#define U_PERMISSION H_PERMISSION
+#define U_SUCCESS H_SUCCESS
+
+/* opcodes */
+#define UV_WRITE_PATE 0xF104
+#define UV_RETURN 0xF11C
+#define UV_ESM 0xF110
+#define UV_REGISTER_MEM_SLOT 0xF120
+#define UV_UNREGISTER_MEM_SLOT 0xF124
+#define UV_PAGE_IN 0xF128
+#define UV_PAGE_OUT 0xF12C
+#define UV_SHARE_PAGE 0xF130
+#define UV_UNSHARE_PAGE 0xF134
+#define UV_UNSHARE_ALL_PAGES 0xF140
+#define UV_PAGE_INVAL 0xF138
+#define UV_SVM_TERMINATE 0xF13C
+
+#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
diff --git a/arch/powerpc/include/asm/ultravisor.h b/arch/powerpc/include/asm/ultravisor.h
new file mode 100644
index 000000000000..790b0e63681f
--- /dev/null
+++ b/arch/powerpc/include/asm/ultravisor.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Ultravisor definitions
+ *
+ * Copyright 2019, IBM Corporation.
+ *
+ */
+#ifndef _ASM_POWERPC_ULTRAVISOR_H
+#define _ASM_POWERPC_ULTRAVISOR_H
+
+#include <asm/asm-prototypes.h>
+#include <asm/ultravisor-api.h>
+#include <asm/firmware.h>
+
+int early_init_dt_scan_ultravisor(unsigned long node, const char *uname,
+ int depth, void *data);
+
+/*
+ * In ultravisor enabled systems, PTCR becomes ultravisor privileged only for
+ * writing and an attempt to write to it will cause a Hypervisor Emulation
+ * Assistance interrupt.
+ */
+static inline void set_ptcr_when_no_uv(u64 val)
+{
+ if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR))
+ mtspr(SPRN_PTCR, val);
+}
+
+static inline int uv_register_pate(u64 lpid, u64 dw0, u64 dw1)
+{
+ return ucall_norets(UV_WRITE_PATE, lpid, dw0, dw1);
+}
+
+static inline int uv_share_page(u64 pfn, u64 npages)
+{
+ return ucall_norets(UV_SHARE_PAGE, pfn, npages);
+}
+
+static inline int uv_unshare_page(u64 pfn, u64 npages)
+{
+ return ucall_norets(UV_UNSHARE_PAGE, pfn, npages);
+}
+
+static inline int uv_unshare_all_pages(void)
+{
+ return ucall_norets(UV_UNSHARE_ALL_PAGES);
+}
+
+static inline int uv_page_in(u64 lpid, u64 src_ra, u64 dst_gpa, u64 flags,
+ u64 page_shift)
+{
+ return ucall_norets(UV_PAGE_IN, lpid, src_ra, dst_gpa, flags,
+ page_shift);
+}
+
+static inline int uv_page_out(u64 lpid, u64 dst_ra, u64 src_gpa, u64 flags,
+ u64 page_shift)
+{
+ return ucall_norets(UV_PAGE_OUT, lpid, dst_ra, src_gpa, flags,
+ page_shift);
+}
+
+static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
+ u64 flags, u64 slotid)
+{
+ return ucall_norets(UV_REGISTER_MEM_SLOT, lpid, start_gpa,
+ size, flags, slotid);
+}
+
+static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid)
+{
+ return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid);
+}
+
+static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
+{
+ return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
+}
+
+static inline int uv_svm_terminate(u64 lpid)
+{
+ return ucall_norets(UV_SVM_TERMINATE, lpid);
+}
+
+#endif /* _ASM_POWERPC_ULTRAVISOR_H */
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index c61d59ed3b45..b9ef6cf50ea5 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -81,7 +81,9 @@ struct vdso_data {
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
__s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */
__s64 wtom_clock_sec; /* Wall to monotonic clock sec */
- struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
+ __s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */
+ __s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */
+ __u32 hrtimer_res; /* hrtimer resolution */
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
};
@@ -101,19 +103,27 @@ struct vdso_data {
__u32 tz_dsttime; /* Type of dst correction 0x5C */
__s32 wtom_clock_sec; /* Wall to monotonic clock */
__s32 wtom_clock_nsec;
- struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
+ __s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */
+ __s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
+ __u32 hrtimer_res; /* hrtimer resolution */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
- __u32 dcache_block_size; /* L1 d-cache block size */
- __u32 icache_block_size; /* L1 i-cache block size */
- __u32 dcache_log_block_size; /* L1 d-cache log block size */
- __u32 icache_log_block_size; /* L1 i-cache log block size */
};
#endif /* CONFIG_PPC64 */
extern struct vdso_data *vdso_data;
+#else /* __ASSEMBLY__ */
+
+.macro get_datapage ptr, tmp
+ bcl 20, 31, .+4
+ mflr \ptr
+ addi \ptr, \ptr, (__kernel_datapage_offset - (.-4))@l
+ lwz \tmp, 0(\ptr)
+ add \ptr, \tmp, \ptr
+.endm
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
new file mode 100644
index 000000000000..b992dfaaa161
--- /dev/null
+++ b/arch/powerpc/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_POWERPC_VMALLOC_H
+#define _ASM_POWERPC_VMALLOC_H
+
+#endif /* _ASM_POWERPC_VMALLOC_H */
diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
index f2dfcd50a2d3..33aee7490cbb 100644
--- a/arch/powerpc/include/asm/xive-regs.h
+++ b/arch/powerpc/include/asm/xive-regs.h
@@ -39,6 +39,7 @@
#define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1
+#define XIVE_ESB_INVALID 0xFF
/*
* Thread Management (aka "TM") registers
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index e4016985764e..93f982dbb3d4 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -46,7 +46,15 @@ struct xive_irq_data {
/* Setup/used by frontend */
int target;
+ /*
+ * saved_p means that there is a queue entry for this interrupt
+ * in some CPU's queue (not including guest vcpu queues), even
+ * if P is not set in the source ESB.
+ * stale_p means that there is no queue entry for this interrupt
+ * in some CPU's queue, even if P is set in the source ESB.
+ */
bool saved_p;
+ bool stale_p;
};
#define XIVE_IRQ_FLAG_STORE_EOI 0x01
#define XIVE_IRQ_FLAG_LSI 0x02
@@ -79,54 +87,56 @@ extern bool __xive_enabled;
static inline bool xive_enabled(void) { return __xive_enabled; }
-extern bool xive_spapr_init(void);
-extern bool xive_native_init(void);
-extern void xive_smp_probe(void);
-extern int xive_smp_prepare_cpu(unsigned int cpu);
-extern void xive_smp_setup_cpu(void);
-extern void xive_smp_disable_cpu(void);
-extern void xive_teardown_cpu(void);
-extern void xive_shutdown(void);
-extern void xive_flush_interrupt(void);
+bool xive_spapr_init(void);
+bool xive_native_init(void);
+void xive_smp_probe(void);
+int xive_smp_prepare_cpu(unsigned int cpu);
+void xive_smp_setup_cpu(void);
+void xive_smp_disable_cpu(void);
+void xive_teardown_cpu(void);
+void xive_shutdown(void);
+void xive_flush_interrupt(void);
/* xmon hook */
-extern void xmon_xive_do_dump(int cpu);
+void xmon_xive_do_dump(int cpu);
+int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
/* APIs used by KVM */
-extern u32 xive_native_default_eq_shift(void);
-extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
-extern void xive_native_free_vp_block(u32 vp_base);
-extern int xive_native_populate_irq_data(u32 hw_irq,
- struct xive_irq_data *data);
-extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
-extern u32 xive_native_alloc_irq(void);
-extern void xive_native_free_irq(u32 irq);
-extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
-
-extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
- __be32 *qpage, u32 order, bool can_escalate);
-extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
-
-extern void xive_native_sync_source(u32 hw_irq);
-extern void xive_native_sync_queue(u32 hw_irq);
-extern bool is_xive_irq(struct irq_chip *chip);
-extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
-extern int xive_native_disable_vp(u32 vp_id);
-extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
-extern bool xive_native_has_single_escalation(void);
-
-extern int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
- u64 *out_qpage,
- u64 *out_qsize,
- u64 *out_qeoi_page,
- u32 *out_escalate_irq,
- u64 *out_qflags);
-
-extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
- u32 *qindex);
-extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
- u32 qindex);
-extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
+u32 xive_native_default_eq_shift(void);
+u32 xive_native_alloc_vp_block(u32 max_vcpus);
+void xive_native_free_vp_block(u32 vp_base);
+int xive_native_populate_irq_data(u32 hw_irq,
+ struct xive_irq_data *data);
+void xive_cleanup_irq_data(struct xive_irq_data *xd);
+u32 xive_native_alloc_irq(void);
+void xive_native_free_irq(u32 irq);
+int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
+
+int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
+ __be32 *qpage, u32 order, bool can_escalate);
+void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
+
+void xive_native_sync_source(u32 hw_irq);
+void xive_native_sync_queue(u32 hw_irq);
+bool is_xive_irq(struct irq_chip *chip);
+int xive_native_enable_vp(u32 vp_id, bool single_escalation);
+int xive_native_disable_vp(u32 vp_id);
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
+bool xive_native_has_single_escalation(void);
+
+int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
+ u64 *out_qpage,
+ u64 *out_qsize,
+ u64 *out_qeoi_page,
+ u32 *out_escalate_irq,
+ u64 *out_qflags);
+
+int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
+ u32 *qindex);
+int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
+ u32 qindex);
+int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
+bool xive_native_has_queue_state_support(void);
#else
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index b0f72dea8b11..264e266a85bf 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -667,6 +667,8 @@ struct kvm_ppc_cpu_char {
/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
+#define KVM_DEV_XICS_GRP_CTRL 2
+#define KVM_DEV_XICS_NR_SERVERS 1
/* Layout of 64-bit source attribute values */
#define KVM_XICS_DESTINATION_SHIFT 0
@@ -683,6 +685,7 @@ struct kvm_ppc_cpu_char {
#define KVM_DEV_XIVE_GRP_CTRL 1
#define KVM_DEV_XIVE_RESET 1
#define KVM_DEV_XIVE_EQ_SYNC 2
+#define KVM_DEV_XIVE_NR_SERVERS 3
#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */
diff --git a/arch/powerpc/include/uapi/asm/msgbuf.h b/arch/powerpc/include/uapi/asm/msgbuf.h
index 2b1b37797a47..7919b2ba41b5 100644
--- a/arch/powerpc/include/uapi/asm/msgbuf.h
+++ b/arch/powerpc/include/uapi/asm/msgbuf.h
@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_MSGBUF_H
#define _ASM_POWERPC_MSGBUF_H
+#include <asm/ipcbuf.h>
+
/*
* The msqid64_ds structure for the PowerPC architecture.
* Note extra padding because this structure is passed back and forth
@@ -11,9 +13,9 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifdef __powerpc64__
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
#else
unsigned long msg_stime_high;
unsigned long msg_stime; /* last msgsnd time */
diff --git a/arch/powerpc/include/uapi/asm/sembuf.h b/arch/powerpc/include/uapi/asm/sembuf.h
index 3f60946f77e3..85e96ccb5f0f 100644
--- a/arch/powerpc/include/uapi/asm/sembuf.h
+++ b/arch/powerpc/include/uapi/asm/sembuf.h
@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_SEMBUF_H
#define _ASM_POWERPC_SEMBUF_H
+#include <asm/ipcbuf.h>
+
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -26,8 +28,8 @@ struct semid64_ds {
unsigned long sem_ctime_high;
unsigned long sem_ctime; /* last change time */
#else
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
#endif
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused3;
diff --git a/arch/powerpc/include/uapi/asm/shmbuf.h b/arch/powerpc/include/uapi/asm/shmbuf.h
index b591c4d7e4c5..00422b2f3c63 100644
--- a/arch/powerpc/include/uapi/asm/shmbuf.h
+++ b/arch/powerpc/include/uapi/asm/shmbuf.h
@@ -22,9 +22,9 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
#ifdef __powerpc64__
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
#else
unsigned long shm_atime_high;
unsigned long shm_atime; /* last attach time */
diff --git a/arch/powerpc/include/uapi/asm/spu_info.h b/arch/powerpc/include/uapi/asm/spu_info.h
index cabfcbba9eac..45f97150587b 100644
--- a/arch/powerpc/include/uapi/asm/spu_info.h
+++ b/arch/powerpc/include/uapi/asm/spu_info.h
@@ -5,20 +5,6 @@
* (C) Copyright 2006 IBM Corp.
*
* Author: Dwayne Grant McConnell <decimal@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI_SPU_INFO_H
diff --git a/arch/powerpc/include/uapi/asm/stat.h b/arch/powerpc/include/uapi/asm/stat.h
index afd25f2ff4e8..7871055e5e32 100644
--- a/arch/powerpc/include/uapi/asm/stat.h
+++ b/arch/powerpc/include/uapi/asm/stat.h
@@ -40,7 +40,7 @@ struct stat {
uid_t st_uid;
gid_t st_gid;
unsigned long st_rdev;
- off_t st_size;
+ long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
OpenPOWER on IntegriCloud