summaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/Kbuild5
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/atomic.h44
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/cmpxchg.h6
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/dma-iommu.h12
-rw-r--r--arch/arm/include/asm/floppy.h2
-rw-r--r--arch/arm/include/asm/futex.h9
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h1
-rw-r--r--arch/arm/include/asm/hwcap.h3
-rw-r--r--arch/arm/include/asm/jump_label.h1
-rw-r--r--arch/arm/include/asm/kprobes.h17
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_asm.h4
-rw-r--r--arch/arm/include/asm/kvm_host.h9
-rw-r--r--arch/arm/include/asm/kvm_mmu.h30
-rw-r--r--arch/arm/include/asm/memory.h50
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h15
-rw-r--r--arch/arm/include/asm/pgtable.h7
-rw-r--r--arch/arm/include/asm/pmu.h2
-rw-r--r--arch/arm/include/asm/probes.h43
-rw-r--r--arch/arm/include/asm/ptrace.h14
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/spinlock.h15
-rw-r--r--arch/arm/include/asm/sync_bitops.h1
-rw-r--r--arch/arm/include/asm/system.h7
-rw-r--r--arch/arm/include/asm/thread_info.h5
-rw-r--r--arch/arm/include/asm/topology.h3
-rw-r--r--arch/arm/include/asm/uaccess.h2
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/include/asm/uprobes.h45
-rw-r--r--arch/arm/include/asm/xen/page.h15
-rw-r--r--arch/arm/include/debug/tegra.S18
-rw-r--r--arch/arm/include/uapi/asm/hwcap.h9
36 files changed, 286 insertions, 125 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 3278afe2c3ab..23e728ecf8ab 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -7,16 +7,19 @@ generic-y += current.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += hash.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
+generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
generic-y += poll.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += sections.h
generic-y += segment.h
@@ -33,5 +36,3 @@ generic-y += termios.h
generic-y += timex.h
generic-y += trace_clock.h
generic-y += unaligned.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c2285160575..380ac4f20000 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -30,8 +30,8 @@
* Endian independent macros for shifting bytes within registers.
*/
#ifndef __ARMEB__
-#define pull lsr
-#define push lsl
+#define lspull lsr
+#define lspush lsl
#define get_byte_0 lsl #0
#define get_byte_1 lsr #8
#define get_byte_2 lsr #16
@@ -41,8 +41,8 @@
#define put_byte_2 lsl #16
#define put_byte_3 lsl #24
#else
-#define pull lsl
-#define push lsr
+#define lspull lsl
+#define lspush lsr
#define get_byte_0 lsr #24
#define get_byte_1 lsr #16
#define get_byte_2 lsr #8
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 62d2cb53b069..9a92fd7864a8 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%3]\n"
@@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%3]\n"
@@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long res;
smp_mb();
+ prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
@@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int oldval, newval;
+ unsigned long tmp;
+
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
+"1: ldrex %0, [%4]\n"
+" teq %0, %5\n"
+" beq 2f\n"
+" add %1, %0, %6\n"
+" strex %2, %1, [%4]\n"
+" teq %2, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+
+ if (oldval != u)
+ smp_mb();
+
+ return oldval;
+}
+
#else /* ARM_ARCH_6 */
#ifdef CONFIG_SMP
@@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-#endif /* __LINUX_ARM_ARCH__ */
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
@@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+#endif /* __LINUX_ARM_ARCH__ */
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
@@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_return\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
unsigned long res;
smp_mb();
+ prefetchw(&ptr->counter);
do {
__asm__ __volatile__("@ atomic64_cmpxchg\n"
@@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
unsigned long tmp;
smp_mb();
+ prefetchw(&ptr->counter);
__asm__ __volatile__("@ atomic64_xchg\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_dec_if_positive\n"
"1: ldrexd %0, %H0, [%3]\n"
@@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
int ret = 1;
smp_mb();
+ prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_add_unless\n"
"1: ldrexd %0, %H0, [%4]\n"
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e9a49fe0284e..8b8b61685a34 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
+ dsb();
}
/*
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index df2fbba7efc8..abb2c3769b01 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
#define __ASM_ARM_CMPXCHG_H
#include <linux/irqflags.h>
+#include <linux/prefetch.h>
#include <asm/barrier.h>
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
@@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#endif
smp_mb();
+ prefetchw((const void *)ptr);
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
@@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
{
unsigned long oldval, res;
+ prefetchw((const void *)ptr);
+
switch (size) {
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1:
@@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
unsigned long long oldval;
unsigned long res;
+ prefetchw(ptr);
+
__asm__ __volatile__(
"1: ldrexd %1, %H1, [%3]\n"
" teq %1, %4\n"
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index acdde76b39bb..42f0889f0584 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -71,6 +71,7 @@
#define ARM_CPU_PART_CORTEX_A5 0xC050
#define ARM_CPU_PART_CORTEX_A15 0xC0F0
#define ARM_CPU_PART_CORTEX_A7 0xC070
+#define ARM_CPU_PART_CORTEX_A12 0xC0D0
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index a8c56acc8c98..eec0a12c5c1d 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -13,9 +13,12 @@ struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
- void *bitmap;
- size_t bits;
- unsigned int order;
+ unsigned long **bitmaps; /* array of bitmaps */
+ unsigned int nr_bitmaps; /* nr of elements in array */
+ unsigned int extensions;
+ size_t bitmap_size; /* size of a single bitmap */
+ size_t bits; /* per bitmap */
+ unsigned int size; /* per bitmap */
dma_addr_t base;
spinlock_t lock;
@@ -23,8 +26,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
- int order);
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
index c9f03eccc9d8..f4882553fbb0 100644
--- a/arch/arm/include/asm/floppy.h
+++ b/arch/arm/include/asm/floppy.h
@@ -25,7 +25,7 @@
#define fd_inb(port) inb((port))
#define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\
- IRQF_DISABLED,"floppy",NULL)
+ 0,"floppy",NULL)
#define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL)
#define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK)
#define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK)
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index e42cf597f6e6..53e69dae796f 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,11 +3,6 @@
#ifdef __KERNEL__
-#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
-/* ARM doesn't provide unprivileged exclusive memory accessors */
-#include <asm-generic/futex.h>
-#else
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
@@ -28,6 +23,7 @@
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
smp_mb(); \
+ prefetchw(uaddr); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
smp_mb();
+ /* Prefetching cannot fault */
+ prefetchw(uaddr);
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
-#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index eef55ea9ef00..8e427c7b4425 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4
#define ARM_DEBUG_ARCH_V7_1 5
+#define ARM_DEBUG_ARCH_V8 6
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index 6ff56eca3f1f..6e183fd269fb 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -9,6 +9,7 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-extern unsigned int elf_hwcap;
+#define ELF_HWCAP2 (elf_hwcap2)
+extern unsigned int elf_hwcap, elf_hwcap2;
#endif
#endif
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 863c892b4aaa..70f9b9bfb1f9 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,7 +4,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <asm/system.h>
#define JUMP_LABEL_NOP_SIZE 4
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index f82ec22eeb11..49fa0dfaad33 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -18,7 +18,7 @@
#include <linux/types.h>
#include <linux/ptrace.h>
-#include <linux/percpu.h>
+#include <linux/notifier.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 2
@@ -28,21 +28,10 @@
#define kretprobe_blacklist_size 0
typedef u32 kprobe_opcode_t;
-
struct kprobe;
-typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
-typedef unsigned long (kprobe_check_cc)(unsigned long);
-typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *);
-typedef void (kprobe_insn_fn_t)(void);
+#include <asm/probes.h>
-/* Architecture specific copy of original instruction. */
-struct arch_specific_insn {
- kprobe_opcode_t *insn;
- kprobe_insn_handler_t *insn_handler;
- kprobe_check_cc *insn_check_cc;
- kprobe_insn_singlestep_t *insn_singlestep;
- kprobe_insn_fn_t *insn_fn;
-};
+#define arch_specific_insn arch_probes_insn
struct prev_kprobe {
struct kprobe *kp;
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 1d3153c7eb41..816db0bf2dd8 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -55,6 +55,7 @@
* The bits we set in HCR:
* TAC: Trap ACTLR
* TSC: Trap SMC
+ * TVM: Trap VM ops (until MMU and caches are on)
* TSW: Trap cache operations by set/way
* TWI: Trap WFI
* TWE: Trap WFE
@@ -68,8 +69,7 @@
*/
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_TWE | HCR_SWIO | HCR_TIDCP)
-#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
+ HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
/* System Control Register (SCTLR) bits */
#define SCTLR_TE (1 << 30)
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 661da11f76f4..53b3c4a50d5c 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -48,7 +48,9 @@
#define c13_TID_URO 26 /* Thread ID, User R/O */
#define c13_TID_PRIV 27 /* Thread ID, Privileged */
#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
-#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */
+#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */
+#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */
+#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 098f7dd6d564..09af14999c9b 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -101,6 +101,12 @@ struct kvm_vcpu_arch {
/* The CPU type we expose to the VM */
u32 midr;
+ /* HYP trapping configuration */
+ u32 hcr;
+
+ /* Interrupt related fields */
+ u32 irq_lines; /* IRQ and FIQ levels */
+
/* Exception Information */
struct kvm_vcpu_fault_info fault;
@@ -128,9 +134,6 @@ struct kvm_vcpu_arch {
/* IO related fields */
struct kvm_decode mmio_decode;
- /* Interrupt related fields */
- u32 irq_lines; /* IRQ and FIQ levels */
-
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 2d122adcdb22..5c7aa3c1519f 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -114,11 +114,34 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
pmd_val(*pmd) |= L_PMD_S2_RDWR;
}
+/* Open coded p*d_addr_end that can deal with 64bit addresses */
+#define kvm_pgd_addr_end(addr, end) \
+({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#define kvm_pud_addr_end(addr,end) (end)
+
+#define kvm_pmd_addr_end(addr, end) \
+({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
struct kvm;
-static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
- unsigned long size)
+#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
+
+static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
+ return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
+}
+
+static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
+ unsigned long size)
+{
+ if (!vcpu_has_cache_enabled(vcpu))
+ kvm_flush_dcache_to_poc((void *)hva, size);
+
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
@@ -139,9 +162,10 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
}
}
-#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
+void stage2_flush_vm(struct kvm *kvm);
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 8756e4bcdba0..02fa2558f662 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -30,14 +30,15 @@
*/
#define UL(x) _AC(x, UL)
+/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+
#ifdef CONFIG_MMU
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
-#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
@@ -104,10 +105,6 @@
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
-#ifndef PAGE_OFFSET
-#define PAGE_OFFSET PLAT_PHYS_OFFSET
-#endif
-
/*
* The module can be at any place in ram in nommu mode.
*/
@@ -169,9 +166,17 @@
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ *
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
*/
-#ifndef __virt_to_phys
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#if defined(__virt_to_phys)
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+
+#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
/*
* Constants used to force the right instruction encodings and shifts
@@ -180,12 +185,17 @@
#define __PV_BITS_31_24 0x81000000
#define __PV_BITS_7_0 0x81
-extern u64 __pv_phys_offset;
+extern unsigned long __pv_phys_pfn_offset;
extern u64 __pv_offset;
extern void fixup_pv_table(const void *, unsigned long);
extern const void *__pv_table_begin, *__pv_table_end;
-#define PHYS_OFFSET __pv_phys_offset
+#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
+#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
+
+#define virt_to_pfn(kaddr) \
+ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
+ PHYS_PFN_OFFSET)
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
@@ -246,6 +256,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#else
#define PHYS_OFFSET PLAT_PHYS_OFFSET
+#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
@@ -257,18 +268,11 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
return x - PHYS_OFFSET + PAGE_OFFSET;
}
-#endif
-#endif
+#define virt_to_pfn(kaddr) \
+ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
+ PHYS_PFN_OFFSET)
-/*
- * PFNs are used to describe any physical page; this means
- * PFN 0 == physical address 0.
- *
- * This is the PFN of the first RAM page in the kernel
- * direct-mapped view. We assume this is the first page
- * of RAM in the mem_map as well.
- */
-#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
+#endif
/*
* These are *only* valid on the kernel direct mapped RAM memory.
@@ -346,9 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
*/
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
- && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
+ && pfn_valid(virt_to_pfn(kaddr)))
#endif
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index dfff709fda3c..219ac88a9542 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -140,6 +140,7 @@
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
+#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 03243f7eeddf..85c60adc8b60 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -120,13 +120,16 @@
/*
* 2nd stage PTE definitions for LPAE.
*/
-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
+#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
+#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
+#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
+#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
-#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+
+#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 7d59b524f2af..5478e5d6ad89 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID)
+#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
#define pte_special(pte) (0)
-#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
+#define pte_valid_user(pte) \
+ (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
@@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
{
unsigned long ext = 0;
- if (addr < TASK_SIZE && pte_present_user(pteval)) {
+ if (addr < TASK_SIZE && pte_valid_user(pteval)) {
__sync_icache_dcache(pteval);
ext |= PTE_EXT_NG;
}
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index f24edad26c70..ae1919be8f98 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -71,6 +71,8 @@ struct arm_pmu {
void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
struct perf_event *event);
+ void (*clear_event_idx)(struct pmu_hw_events *hw_events,
+ struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
u32 (*read_counter)(struct perf_event *event);
diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h
new file mode 100644
index 000000000000..806cfe622a9e
--- /dev/null
+++ b/arch/arm/include/asm/probes.h
@@ -0,0 +1,43 @@
+/*
+ * arch/arm/include/asm/probes.h
+ *
+ * Original contents copied from arch/arm/include/asm/kprobes.h
+ * which contains the following notice...
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ASM_PROBES_H
+#define _ASM_PROBES_H
+
+typedef u32 probes_opcode_t;
+
+struct arch_probes_insn;
+typedef void (probes_insn_handler_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ struct pt_regs *);
+typedef unsigned long (probes_check_cc)(unsigned long);
+typedef void (probes_insn_singlestep_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ struct pt_regs *);
+typedef void (probes_insn_fn_t)(void);
+
+/* Architecture specific copy of original instruction. */
+struct arch_probes_insn {
+ probes_opcode_t *insn;
+ probes_insn_handler_t *insn_handler;
+ probes_check_cc *insn_check_cc;
+ probes_insn_singlestep_t *insn_singlestep;
+ probes_insn_fn_t *insn_fn;
+};
+
+#endif
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 04c99f36ff7f..c877654fe3bf 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -27,9 +27,13 @@ struct pt_regs {
#define thumb_mode(regs) (0)
#endif
+#ifndef CONFIG_CPU_V7M
#define isa_mode(regs) \
- ((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \
- (((regs)->ARM_cpsr & PSR_T_BIT) >> 5))
+ ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
+ (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
+#else
+#define isa_mode(regs) 1 /* Thumb */
+#endif
#define processor_mode(regs) \
((regs)->ARM_cpsr & MODE_MASK)
@@ -80,6 +84,12 @@ static inline long regs_return_value(struct pt_regs *regs)
#define instruction_pointer(regs) (regs)->ARM_pc
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ instruction_pointer(regs) = val;
+}
+
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
#else
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 22a3b9b5d4a1..4157aec4e307 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,6 +74,7 @@ struct secondary_data {
};
extern struct secondary_data secondary_data;
extern volatile int pen_release;
+extern void secondary_startup(void);
extern int __cpu_disable(void);
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ef3c6072aa45..ac4bfae26702 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,18 +37,9 @@
static inline void dsb_sev(void)
{
-#if __LINUX_ARM_ARCH__ >= 7
- __asm__ __volatile__ (
- "dsb ishst\n"
- SEV
- );
-#else
- __asm__ __volatile__ (
- "mcr p15, 0, %0, c7, c10, 4\n"
- SEV
- : : "r" (0)
- );
-#endif
+
+ dsb(ishst);
+ __asm__(SEV);
}
/*
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 63479eecbf76..9732b8e11e63 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -2,7 +2,6 @@
#define __ASM_SYNC_BITOPS_H__
#include <asm/bitops.h>
-#include <asm/system.h>
/* sync_bitops functions are equivalent to the SMP implementation of the
* original functions, independently from CONFIG_SMP being defined.
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
deleted file mode 100644
index 368165e33c1c..000000000000
--- a/arch/arm/include/asm/system.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
-#include <asm/barrier.h>
-#include <asm/compiler.h>
-#include <asm/cmpxchg.h>
-#include <asm/switch_to.h>
-#include <asm/system_info.h>
-#include <asm/system_misc.h>
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 71a06b293489..f989d7c22dc5 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -153,6 +153,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_UPROBE 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
#define TIF_SYSCALL_TRACEPOINT 10
@@ -165,6 +166,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
@@ -178,7 +180,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/*
* Change these and you break ASM code in entry-common.S
*/
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd2..2fe85fff5cca 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -20,9 +20,6 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
-#define mc_capable() (cpu_topology[0].socket_id != -1)
-#define smt_capable() (cpu_topology[0].thread_id != -1)
-
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 72abdc541f38..12c3a5decc60 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -19,7 +19,7 @@
#include <asm/unified.h>
#include <asm/compiler.h>
-#if __LINUX_ARM_ARCH__ < 6
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#include <asm-generic/uaccess-unaligned.h>
#else
#define __get_user_unaligned __get_user
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index acabef1a75df..43876245fc57 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -48,6 +48,5 @@
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
-#define __IGNORE_kcmp
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h
new file mode 100644
index 000000000000..9472c20b7d49
--- /dev/null
+++ b/arch/arm/include/asm/uprobes.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 Rabin Vincent <rabin at rab.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <asm/probes.h>
+#include <asm/opcodes.h>
+
+typedef u32 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 4
+#define UPROBE_XOL_SLOT_BYTES 64
+
+#define UPROBE_SWBP_ARM_INSN 0xe7f001f9
+#define UPROBE_SS_ARM_INSN 0xe7f001fa
+#define UPROBE_SWBP_INSN __opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN)
+#define UPROBE_SWBP_INSN_SIZE 4
+
+struct arch_uprobe_task {
+ u32 backup;
+ unsigned long saved_trap_no;
+};
+
+struct arch_uprobe {
+ u8 insn[MAX_UINSN_BYTES];
+ unsigned long ixol[2];
+ uprobe_opcode_t bpinsn;
+ bool simulate;
+ u32 pcreg;
+ void (*prehandler)(struct arch_uprobe *auprobe,
+ struct arch_uprobe_task *autask,
+ struct pt_regs *regs);
+ void (*posthandler)(struct arch_uprobe *auprobe,
+ struct arch_uprobe_task *autask,
+ struct pt_regs *regs);
+ struct arch_probes_insn asi;
+};
+
+#endif
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index e0965abacb7d..cf4f3e867395 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -97,16 +97,13 @@ static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
return NULL;
}
-static inline int m2p_add_override(unsigned long mfn, struct page *page,
- struct gnttab_map_grant_ref *kmap_op)
-{
- return 0;
-}
+extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
-static inline int m2p_remove_override(struct page *page, bool clear_pte)
-{
- return 0;
-}
+extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S
index f98763f0bc17..3bc80599c022 100644
--- a/arch/arm/include/debug/tegra.S
+++ b/arch/arm/include/debug/tegra.S
@@ -53,8 +53,7 @@
#define checkuart(rp, rv, lhu, bit, uart) \
/* Load address of CLK_RST register */ \
- movw rp, #TEGRA_CLK_RST_DEVICES_##lhu & 0xffff ; \
- movt rp, #TEGRA_CLK_RST_DEVICES_##lhu >> 16 ; \
+ ldr rp, =TEGRA_CLK_RST_DEVICES_##lhu ; \
/* Load value from CLK_RST register */ \
ldr rp, [rp, #0] ; \
/* Test UART's reset bit */ \
@@ -62,8 +61,7 @@
/* If set, can't use UART; jump to save no UART */ \
bne 90f ; \
/* Load address of CLK_OUT_ENB register */ \
- movw rp, #TEGRA_CLK_OUT_ENB_##lhu & 0xffff ; \
- movt rp, #TEGRA_CLK_OUT_ENB_##lhu >> 16 ; \
+ ldr rp, =TEGRA_CLK_OUT_ENB_##lhu ; \
/* Load value from CLK_OUT_ENB register */ \
ldr rp, [rp, #0] ; \
/* Test UART's clock enable bit */ \
@@ -71,8 +69,7 @@
/* If clear, can't use UART; jump to save no UART */ \
beq 90f ; \
/* Passed all tests, load address of UART registers */ \
- movw rp, #TEGRA_UART##uart##_BASE & 0xffff ; \
- movt rp, #TEGRA_UART##uart##_BASE >> 16 ; \
+ ldr rp, =TEGRA_UART##uart##_BASE ; \
/* Jump to save UART address */ \
b 91f
@@ -90,15 +87,16 @@
#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA
/* Check ODMDATA */
-10: movw \rp, #TEGRA_PMC_SCRATCH20 & 0xffff
- movt \rp, #TEGRA_PMC_SCRATCH20 >> 16
+10: ldr \rp, =TEGRA_PMC_SCRATCH20
ldr \rp, [\rp, #0] @ Load PMC_SCRATCH20
- ubfx \rv, \rp, #18, #2 @ 19:18 are console type
+ lsr \rv, \rp, #18 @ 19:18 are console type
+ and \rv, \rv, #3
cmp \rv, #2 @ 2 and 3 mean DCC, UART
beq 11f @ some boards swap the meaning
cmp \rv, #3 @ so accept either
bne 90f
-11: ubfx \rv, \rp, #15, #3 @ 17:15 are UART ID
+11: lsr \rv, \rp, #15 @ 17:15 are UART ID
+ and \rv, #7
cmp \rv, #0 @ UART 0?
beq 20f
cmp \rv, #1 @ UART 1?
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h
index 7dcc10d67253..20d12f230a2f 100644
--- a/arch/arm/include/uapi/asm/hwcap.h
+++ b/arch/arm/include/uapi/asm/hwcap.h
@@ -28,4 +28,13 @@
#define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM (1 << 21)
+/*
+ * HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2
+ */
+#define HWCAP2_AES (1 << 0)
+#define HWCAP2_PMULL (1 << 1)
+#define HWCAP2_SHA1 (1 << 2)
+#define HWCAP2_SHA2 (1 << 3)
+#define HWCAP2_CRC32 (1 << 4)
+
#endif /* _UAPI__ASMARM_HWCAP_H */
OpenPOWER on IntegriCloud