diff options
Diffstat (limited to 'arch/arm/include')
39 files changed, 377 insertions, 163 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index c38b58c80202..3278afe2c3ab 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -34,3 +34,4 @@ generic-y += timex.h generic-y += trace_clock.h generic-y += unaligned.h generic-y += preempt.h +generic-y += hash.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 5c2285160575..380ac4f20000 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -30,8 +30,8 @@ * Endian independent macros for shifting bytes within registers. */ #ifndef __ARMEB__ -#define pull lsr -#define push lsl +#define lspull lsr +#define lspush lsl #define get_byte_0 lsl #0 #define get_byte_1 lsr #8 #define get_byte_2 lsr #16 @@ -41,8 +41,8 @@ #define put_byte_2 lsl #16 #define put_byte_3 lsl #24 #else -#define pull lsl -#define push lsr +#define lspull lsl +#define lspush lsr #define get_byte_0 lsr #24 #define get_byte_1 lsr #16 #define get_byte_2 lsr #8 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 62d2cb53b069..9a92fd7864a8 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -60,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%3]\n" @@ -99,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_sub_return\n" "1: ldrex %0, [%3]\n" @@ -121,6 +123,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) unsigned long res; smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic_cmpxchg\n" @@ -138,6 +141,33 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) return oldval; } +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int oldval, newval; + unsigned long tmp; + + smp_mb(); + prefetchw(&v->counter); + + __asm__ __volatile__ ("@ atomic_add_unless\n" +"1: ldrex %0, [%4]\n" +" teq %0, %5\n" +" beq 2f\n" +" add %1, %0, %6\n" +" strex %2, %1, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (oldval != u) + smp_mb(); + + return oldval; +} + #else /* ARM_ARCH_6 */ #ifdef CONFIG_SMP @@ -186,10 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -#endif /* __LINUX_ARM_ARCH__ */ - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; @@ -200,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return c; } +#endif /* __LINUX_ARM_ARCH__ */ + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + #define atomic_inc(v) atomic_add(1, v) #define atomic_dec(v) atomic_sub(1, v) @@ -299,6 +329,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_return\n" "1: ldrexd %0, %H0, [%3]\n" @@ -340,6 +371,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_sub_return\n" "1: ldrexd %0, %H0, [%3]\n" @@ -364,6 +396,7 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, unsigned long res; smp_mb(); + prefetchw(&ptr->counter); do { __asm__ __volatile__("@ atomic64_cmpxchg\n" @@ -388,6 +421,7 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) unsigned long tmp; smp_mb(); + prefetchw(&ptr->counter); __asm__ __volatile__("@ atomic64_xchg\n" "1: ldrexd %0, %H0, [%3]\n" @@ -409,6 +443,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_dec_if_positive\n" "1: ldrexd %0, %H0, [%3]\n" @@ -436,6 +471,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) int ret = 1; smp_mb(); + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add_unless\n" "1: ldrexd %0, %H0, [%4]\n" diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 60f15e274e6d..2f59f7443396 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -59,6 +59,21 @@ #define smp_wmb() dmb(ishst) #endif +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index e691ec91e4d3..b2e298a90d76 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -254,25 +254,59 @@ static inline int constant_fls(int x) } /* - * On ARMv5 and above those functions can be implemented around - * the clz instruction for much better code efficiency. + * On ARMv5 and above those functions can be implemented around the + * clz instruction for much better code efficiency. __clz returns + * the number of leading zeros, zero input will return 32, and + * 0x80000000 will return 0. */ +static inline unsigned int __clz(unsigned int x) +{ + unsigned int ret; + + asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); + return ret; +} + +/* + * fls() returns zero if the input is zero, otherwise returns the bit + * position of the last set bit, where the LSB is 1 and MSB is 32. + */ static inline int fls(int x) { - int ret; - if (__builtin_constant_p(x)) return constant_fls(x); - asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); - ret = 32 - ret; - return ret; + return 32 - __clz(x); +} + +/* + * __fls() returns the bit position of the last bit set, where the + * LSB is 0 and MSB is 31. Zero input is undefined. + */ +static inline unsigned long __fls(unsigned long x) +{ + return fls(x) - 1; +} + +/* + * ffs() returns zero if the input was zero, otherwise returns the bit + * position of the first set bit, where the LSB is 1 and MSB is 32. + */ +static inline int ffs(int x) +{ + return fls(x & -x); +} + +/* + * __ffs() returns the bit position of the first bit set, where the + * LSB is 0 and MSB is 31. Zero input is undefined. + */ +static inline unsigned long __ffs(unsigned long x) +{ + return ffs(x) - 1; } -#define __fls(x) (fls(x) - 1) -#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) -#define __ffs(x) (ffs(x) - 1) #define ffz(x) __ffs( ~(x) ) #endif diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index ee753f1749cd..8b8b61685a34 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, static inline void __flush_icache_all(void) { __flush_icache_preferred(); + dsb(); } /* @@ -481,4 +482,9 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ "r9","r10","lr","memory" ) +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); + #endif diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h index 6dcc16430868..523315115478 100644 --- a/arch/arm/include/asm/checksum.h +++ b/arch/arm/include/asm/checksum.h @@ -87,19 +87,33 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, __wsum sum) { - __asm__( - "adds %0, %1, %2 @ csum_tcpudp_nofold \n\ - adcs %0, %0, %3 \n" + u32 lenprot = len | proto << 16; + if (__builtin_constant_p(sum) && sum == 0) { + __asm__( + "adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t" #ifdef __ARMEB__ - "adcs %0, %0, %4 \n" + "adcs %0, %0, %3 \n\t" #else - "adcs %0, %0, %4, lsl #8 \n" + "adcs %0, %0, %3, ror #8 \n\t" #endif - "adcs %0, %0, %5 \n\ - adc %0, %0, #0" - : "=&r"(sum) - : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto)) - : "cc"); + "adc %0, %0, #0" + : "=&r" (sum) + : "r" (daddr), "r" (saddr), "r" (lenprot) + : "cc"); + } else { + __asm__( + "adds %0, %1, %2 @ csum_tcpudp_nofold \n\t" + "adcs %0, %0, %3 \n\t" +#ifdef __ARMEB__ + "adcs %0, %0, %4 \n\t" +#else + "adcs %0, %0, %4, ror #8 \n\t" +#endif + "adc %0, %0, #0" + : "=&r"(sum) + : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot) + : "cc"); + } return sum; } /* diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index 80751c15c300..4e8a4b27d7c7 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -14,12 +14,14 @@ #include <linux/slab.h> +#ifndef CONFIG_COMMON_CLK #ifdef CONFIG_HAVE_MACH_CLKDEV #include <mach/clkdev.h> #else #define __clk_get(clk) ({ 1; }) #define __clk_put(clk) do { } while (0) #endif +#endif static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) { diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index df2fbba7efc8..abb2c3769b01 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -2,6 +2,7 @@ #define __ASM_ARM_CMPXCHG_H #include <linux/irqflags.h> +#include <linux/prefetch.h> #include <asm/barrier.h> #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) @@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #endif smp_mb(); + prefetchw((const void *)ptr); switch (size) { #if __LINUX_ARM_ARCH__ >= 6 @@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, { unsigned long oldval, res; + prefetchw((const void *)ptr); + switch (size) { #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ case 1: @@ -230,6 +234,8 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr, unsigned long long oldval; unsigned long res; + prefetchw(ptr); + __asm__ __volatile__( "1: ldrexd %1, %H1, [%3]\n" " teq %1, %4\n" diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index acdde76b39bb..42f0889f0584 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -71,6 +71,7 @@ #define ARM_CPU_PART_CORTEX_A5 0xC050 #define ARM_CPU_PART_CORTEX_A15 0xC0F0 #define ARM_CPU_PART_CORTEX_A7 0xC070 +#define ARM_CPU_PART_CORTEX_A12 0xC0D0 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 #define ARM_CPU_XSCALE_ARCH_V1 0x2000 diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 58b8c6a0ab1f..99084431d6ae 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -8,8 +8,8 @@ #define MAX_DMA_ADDRESS 0xffffffffUL #else #define MAX_DMA_ADDRESS ({ \ - extern unsigned long arm_dma_zone_size; \ - arm_dma_zone_size ? \ + extern phys_addr_t arm_dma_zone_size; \ + arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \ (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) #endif diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h index c9f03eccc9d8..f4882553fbb0 100644 --- a/arch/arm/include/asm/floppy.h +++ b/arch/arm/include/asm/floppy.h @@ -25,7 +25,7 @@ #define fd_inb(port) inb((port)) #define fd_request_irq() request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ - IRQF_DISABLED,"floppy",NULL) + 0,"floppy",NULL) #define fd_free_irq() free_irq(IRQ_FLOPPYDISK,NULL) #define fd_disable_irq() disable_irq(IRQ_FLOPPYDISK) #define fd_enable_irq() enable_irq(IRQ_FLOPPYDISK) diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index e42cf597f6e6..53e69dae796f 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -3,11 +3,6 @@ #ifdef __KERNEL__ -#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP) -/* ARM doesn't provide unprivileged exclusive memory accessors */ -#include <asm-generic/futex.h> -#else - #include <linux/futex.h> #include <linux/uaccess.h> #include <asm/errno.h> @@ -28,6 +23,7 @@ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ smp_mb(); \ + prefetchw(uaddr); \ __asm__ __volatile__( \ "1: ldrex %1, [%3]\n" \ " " insn "\n" \ @@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; smp_mb(); + /* Prefetching cannot fault */ + prefetchw(uaddr); __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" "1: ldrex %1, [%4]\n" " teq %1, %2\n" @@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) return ret; } -#endif /* !(CPU_USE_DOMAINS && SMP) */ #endif /* __KERNEL__ */ #endif /* _ASM_ARM_FUTEX_H */ diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 3b2c40b5bfa2..6795ff743b3d 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -131,6 +131,7 @@ struct l2x0_regs { unsigned long prefetch_ctrl; unsigned long pwr_ctrl; unsigned long ctrl; + unsigned long aux2_ctrl; }; extern struct l2x0_regs l2x0_saved_regs; diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index eef55ea9ef00..8e427c7b4425 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_DEBUG_ARCH_V7_ECP14 3 #define ARM_DEBUG_ARCH_V7_MM 4 #define ARM_DEBUG_ARCH_V7_1 5 +#define ARM_DEBUG_ARCH_V8 6 /* Breakpoint */ #define ARM_BREAKPOINT_EXECUTE 0 diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index 6ff56eca3f1f..6e183fd269fb 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -9,6 +9,7 @@ * instruction set this cpu supports. */ #define ELF_HWCAP (elf_hwcap) -extern unsigned int elf_hwcap; +#define ELF_HWCAP2 (elf_hwcap2) +extern unsigned int elf_hwcap, elf_hwcap2; #endif #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 3c597c222ef2..8aa4cca74501 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -38,6 +38,12 @@ #define isa_bus_to_virt phys_to_virt /* + * Atomic MMIO-wide IO modify + */ +extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set); +extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set); + +/* * Generic IO read/write. These perform native-endian accesses. Note * that some architectures will want to re-define __raw_{read,write}w. */ @@ -329,7 +335,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t); */ #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) +#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) #define iounmap __arm_iounmap diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h index 863c892b4aaa..70f9b9bfb1f9 100644 --- a/arch/arm/include/asm/jump_label.h +++ b/arch/arm/include/asm/jump_label.h @@ -4,7 +4,6 @@ #ifdef __KERNEL__ #include <linux/types.h> -#include <asm/system.h> #define JUMP_LABEL_NOP_SIZE 4 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 8a6f6db14ee4..098f7dd6d564 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -225,4 +225,7 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) int kvm_perf_init(void); int kvm_perf_teardown(void); +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); +int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 77de4a41cc50..2d122adcdb22 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -140,6 +140,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, } #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) +#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) #endif /* !__ASSEMBLY__ */ diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 2fe141fcc8d6..f98c7f32c9c8 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -22,18 +22,21 @@ struct map_desc { }; /* types 0-3 are defined in asm/io.h */ -#define MT_UNCACHED 4 -#define MT_CACHECLEAN 5 -#define MT_MINICLEAN 6 -#define MT_LOW_VECTORS 7 -#define MT_HIGH_VECTORS 8 -#define MT_MEMORY 9 -#define MT_ROM 10 -#define MT_MEMORY_NONCACHED 11 -#define MT_MEMORY_DTCM 12 -#define MT_MEMORY_ITCM 13 -#define MT_MEMORY_SO 14 -#define MT_MEMORY_DMA_READY 15 +enum { + MT_UNCACHED = 4, + MT_CACHECLEAN, + MT_MINICLEAN, + MT_LOW_VECTORS, + MT_HIGH_VECTORS, + MT_MEMORY_RWX, + MT_MEMORY_RW, + MT_ROM, + MT_MEMORY_RWX_NONCACHED, + MT_MEMORY_RW_DTCM, + MT_MEMORY_RWX_ITCM, + MT_MEMORY_RW_SO, + MT_MEMORY_DMA_READY, +}; #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 9ecccc865046..02fa2558f662 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -30,14 +30,15 @@ */ #define UL(x) _AC(x, UL) +/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) + #ifdef CONFIG_MMU /* - * PAGE_OFFSET - the virtual address of the start of the kernel image * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ -#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) @@ -100,23 +101,15 @@ #define TASK_UNMAPPED_BASE UL(0x00000000) #endif -#ifndef PHYS_OFFSET -#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) -#endif - #ifndef END_MEM #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif -#ifndef PAGE_OFFSET -#define PAGE_OFFSET (PHYS_OFFSET) -#endif - /* * The module can be at any place in ram in nommu mode. */ #define MODULES_END (END_MEM) -#define MODULES_VADDR (PHYS_OFFSET) +#define MODULES_VADDR PAGE_OFFSET #define XIP_VIRT_ADDR(physaddr) (physaddr) @@ -157,15 +150,33 @@ #endif #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) +/* + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical + * memory. This is used for XIP and NoMMU kernels, or by kernels which + * have their own mach/memory.h. Assembly code must always use + * PLAT_PHYS_OFFSET and not PHYS_OFFSET. + */ +#ifndef PLAT_PHYS_OFFSET +#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) +#endif + #ifndef __ASSEMBLY__ /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + * + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0. */ -#ifndef __virt_to_phys -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#if defined(__virt_to_phys) +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) /* * Constants used to force the right instruction encodings and shifts @@ -174,12 +185,17 @@ #define __PV_BITS_31_24 0x81000000 #define __PV_BITS_7_0 0x81 -extern u64 __pv_phys_offset; +extern unsigned long __pv_phys_pfn_offset; extern u64 __pv_offset; extern void fixup_pv_table(const void *, unsigned long); extern const void *__pv_table_begin, *__pv_table_end; -#define PHYS_OFFSET __pv_phys_offset +#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) +#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) + +#define virt_to_pfn(kaddr) \ + ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ + PHYS_PFN_OFFSET) #define __pv_stub(from,to,instr,type) \ __asm__("@ __pv_stub\n" \ @@ -239,6 +255,9 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) #else +#define PHYS_OFFSET PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + static inline phys_addr_t __virt_to_phys(unsigned long x) { return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; @@ -249,30 +268,12 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) return x - PHYS_OFFSET + PAGE_OFFSET; } -#endif -#endif -#endif /* __ASSEMBLY__ */ +#define virt_to_pfn(kaddr) \ + ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ + PHYS_PFN_OFFSET) -#ifndef PHYS_OFFSET -#ifdef PLAT_PHYS_OFFSET -#define PHYS_OFFSET PLAT_PHYS_OFFSET -#else -#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) -#endif #endif -#ifndef __ASSEMBLY__ - -/* - * PFNs are used to describe any physical page; this means - * PFN 0 == physical address 0. - * - * This is the PFN of the first RAM page in the kernel - * direct-mapped view. We assume this is the first page - * of RAM in the mem_map as well. - */ -#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) - /* * These are *only* valid on the kernel direct mapped RAM memory. * Note: Drivers should NOT use these. They are the wrong @@ -349,8 +350,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x) */ #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) +#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ + && pfn_valid(virt_to_pfn(kaddr))) #endif diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index a98a2e112fae..680a83e94467 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -57,12 +57,9 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); -/* - * Dummy implementation; always return 0. - */ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { - return 0; + return channel ? 15 : 14; } #endif /* __KERNEL__ */ diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 86a659a19526..219ac88a9542 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -140,6 +140,7 @@ #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */ #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) #ifndef __ASSEMBLY__ @@ -160,6 +161,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) return (pmd_t *)pud; } +#define pmd_large(pmd) (pmd_val(pmd) & 2) #define pmd_bad(pmd) (pmd_val(pmd) & 2) #define copy_pmd(pmdpd,pmdps) \ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 4f9503908dca..85c60adc8b60 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -120,13 +120,16 @@ /* * 2nd stage PTE definitions for LPAE. */ -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ +#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ +#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ +#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ +#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) -#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ + +#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. @@ -142,6 +145,7 @@ PMD_TYPE_TABLE) #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ PMD_TYPE_SECT) +#define pmd_large(pmd) pmd_sect(pmd) #define pud_clear(pudp) \ do { \ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 1571d126e9dd..5478e5d6ad89 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID) +#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) #define pte_special(pte) (0) -#define pte_present_user(pte) (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) +#define pte_valid_user(pte) \ + (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte)) #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) @@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, { unsigned long ext = 0; - if (addr < TASK_SIZE && pte_present_user(pteval)) { + if (addr < TASK_SIZE && pte_valid_user(pteval)) { __sync_icache_dcache(pteval); ext |= PTE_EXT_NG; } @@ -254,6 +257,8 @@ PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); +PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN); +PTE_BIT_FUNC(mknexec, |= L_PTE_XN); static inline pte_t pte_mkspecial(pte_t pte) { return pte; } diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index ef3c6072aa45..ac4bfae26702 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -37,18 +37,9 @@ static inline void dsb_sev(void) { -#if __LINUX_ARM_ARCH__ >= 7 - __asm__ __volatile__ ( - "dsb ishst\n" - SEV - ); -#else - __asm__ __volatile__ ( - "mcr p15, 0, %0, c7, c10, 4\n" - SEV - : : "r" (0) - ); -#endif + + dsb(ishst); + __asm__(SEV); } /* diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h index 63479eecbf76..9732b8e11e63 100644 --- a/arch/arm/include/asm/sync_bitops.h +++ b/arch/arm/include/asm/sync_bitops.h @@ -2,7 +2,6 @@ #define __ASM_SYNC_BITOPS_H__ #include <asm/bitops.h> -#include <asm/system.h> /* sync_bitops functions are equivalent to the SMP implementation of the * original functions, independently from CONFIG_SMP being defined. diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h deleted file mode 100644 index 368165e33c1c..000000000000 --- a/arch/arm/include/asm/system.h +++ /dev/null @@ -1,7 +0,0 @@ -/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ -#include <asm/barrier.h> -#include <asm/compiler.h> -#include <asm/cmpxchg.h> -#include <asm/switch_to.h> -#include <asm/system_info.h> -#include <asm/system_misc.h> diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h new file mode 100644 index 000000000000..3bd36e2c5f2e --- /dev/null +++ b/arch/arm/include/asm/trusted_foundations.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2013, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +/* + * Support for the Trusted Foundations secure monitor. + * + * Trusted Foundation comes active on some ARM consumer devices (most + * Tegra-based devices sold on the market are concerned). Such devices can only + * perform some basic operations, like setting the CPU reset vector, through + * SMC calls to the secure monitor. The calls are completely specific to + * Trusted Foundations, and do *not* follow the SMC calling convention or the + * PSCI standard. + */ + +#ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H +#define __ASM_ARM_TRUSTED_FOUNDATIONS_H + +#include <linux/kconfig.h> +#include <linux/printk.h> +#include <linux/bug.h> +#include <linux/of.h> + +struct trusted_foundations_platform_data { + unsigned int version_major; + unsigned int version_minor; +}; + +#if IS_ENABLED(CONFIG_TRUSTED_FOUNDATIONS) + +void register_trusted_foundations(struct trusted_foundations_platform_data *pd); +void of_register_trusted_foundations(void); + +#else /* CONFIG_TRUSTED_FOUNDATIONS */ + +static inline void register_trusted_foundations( + struct trusted_foundations_platform_data *pd) +{ + /* + * If we try to register TF, this means the system needs it to continue. + * Its absence if thus a fatal error. + */ + panic("No support for Trusted Foundations, stopping...\n"); +} + +static inline void of_register_trusted_foundations(void) +{ + /* + * If we find the target should enable TF but does not support it, + * fail as the system won't be able to do much anyway + */ + if (of_find_compatible_node(NULL, NULL, "tl,trusted-foundations")) + register_trusted_foundations(NULL); +} +#endif /* CONFIG_TRUSTED_FOUNDATIONS */ + +#endif diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 72abdc541f38..12c3a5decc60 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -19,7 +19,7 @@ #include <asm/unified.h> #include <asm/compiler.h> -#if __LINUX_ARM_ARCH__ < 6 +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #include <asm-generic/uaccess-unaligned.h> #else #define __get_user_unaligned __get_user diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 141baa3f9a72..43876245fc57 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -15,7 +15,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls (380) +#define __NR_syscalls (384) #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) #define __ARCH_WANT_STAT64 @@ -48,6 +48,5 @@ */ #define __IGNORE_fadvise64_64 #define __IGNORE_migrate_pages -#define __IGNORE_kcmp #endif /* __ASM_ARM_UNISTD_H */ diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h index 4d52f92967a6..a6d0a29861e7 100644 --- a/arch/arm/include/asm/word-at-a-time.h +++ b/arch/arm/include/asm/word-at-a-time.h @@ -48,10 +48,14 @@ static inline unsigned long find_zero(unsigned long mask) return ret; } -#ifdef CONFIG_DCACHE_WORD_ACCESS - #define zero_bytemask(mask) (mask) +#else /* __ARMEB__ */ +#include <asm-generic/word-at-a-time.h> +#endif + +#ifdef CONFIG_DCACHE_WORD_ACCESS + /* * Load an unaligned word from kernel space. * @@ -73,7 +77,11 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) " bic %2, %2, #0x3\n" " ldr %0, [%2]\n" " lsl %1, %1, #0x3\n" +#ifndef __ARMEB__ " lsr %0, %0, %1\n" +#else + " lsl %0, %0, %1\n" +#endif " b 2b\n" " .popsection\n" " .pushsection __ex_table,\"a\"\n" @@ -86,11 +94,5 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) return ret; } - #endif /* DCACHE_WORD_ACCESS */ - -#else /* __ARMEB__ */ -#include <asm-generic/word-at-a-time.h> -#endif - #endif /* __ASM_ARM_WORD_AT_A_TIME_H */ diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 75579a9d6f76..e0965abacb7d 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -117,6 +117,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) return __set_phys_to_machine(pfn, mfn); } -#define xen_remap(cookie, size) ioremap_cached((cookie), (size)); +#define xen_remap(cookie, size) ioremap_cache((cookie), (size)) +#define xen_unmap(cookie) iounmap((cookie)) #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h index 29da84e183f4..42b823cd2d22 100644 --- a/arch/arm/include/debug/imx-uart.h +++ b/arch/arm/include/debug/imx-uart.h @@ -43,6 +43,14 @@ #define IMX35_UART_BASE_ADDR(n) IMX35_UART##n##_BASE_ADDR #define IMX35_UART_BASE(n) IMX35_UART_BASE_ADDR(n) +#define IMX50_UART1_BASE_ADDR 0x53fbc000 +#define IMX50_UART2_BASE_ADDR 0x53fc0000 +#define IMX50_UART3_BASE_ADDR 0x5000c000 +#define IMX50_UART4_BASE_ADDR 0x53ff0000 +#define IMX50_UART5_BASE_ADDR 0x63f90000 +#define IMX50_UART_BASE_ADDR(n) IMX50_UART##n##_BASE_ADDR +#define IMX50_UART_BASE(n) IMX50_UART_BASE_ADDR(n) + #define IMX51_UART1_BASE_ADDR 0x73fbc000 #define IMX51_UART2_BASE_ADDR 0x73fc0000 #define IMX51_UART3_BASE_ADDR 0x7000c000 @@ -85,6 +93,8 @@ #define UART_PADDR IMX_DEBUG_UART_BASE(IMX31) #elif defined(CONFIG_DEBUG_IMX35_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX35) +#elif defined(CONFIG_DEBUG_IMX50_UART) +#define UART_PADDR IMX_DEBUG_UART_BASE(IMX50) #elif defined(CONFIG_DEBUG_IMX51_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX51) #elif defined(CONFIG_DEBUG_IMX53_UART) diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S index be6a720dd183..f98763f0bc17 100644 --- a/arch/arm/include/debug/tegra.S +++ b/arch/arm/include/debug/tegra.S @@ -46,10 +46,10 @@ #define TEGRA_APB_MISC_GP_HIDREV (TEGRA_APB_MISC_BASE + 0x804) /* - * Must be 1MB-aligned since a 1MB mapping is used early on. + * Must be section-aligned since a section mapping is used early on. * Must not overlap with regions in mach-tegra/io.c:tegra_io_desc[]. */ -#define UART_VIRTUAL_BASE 0xfe100000 +#define UART_VIRTUAL_BASE 0xfe800000 #define checkuart(rp, rv, lhu, bit, uart) \ /* Load address of CLK_RST register */ \ @@ -156,28 +156,6 @@ 92: and \rv, \rp, #0xffffff @ offset within 1MB section add \rv, \rv, #UART_VIRTUAL_BASE str \rv, [\tmp, #8] @ Store in tegra_uart_virt - movw \rv, #TEGRA_APB_MISC_GP_HIDREV & 0xffff - movt \rv, #TEGRA_APB_MISC_GP_HIDREV >> 16 - ldr \rv, [\rv, #0] @ Load HIDREV - ubfx \rv, \rv, #8, #8 @ 15:8 are SoC version - cmp \rv, #0x20 @ Tegra20? - moveq \rv, #0x75 @ Tegra20 divisor - movne \rv, #0xdd @ Tegra30 divisor - str \rv, [\tmp, #12] @ Save divisor to scratch - /* uart[UART_LCR] = UART_LCR_WLEN8 | UART_LCR_DLAB; */ - mov \rv, #UART_LCR_WLEN8 | UART_LCR_DLAB - str \rv, [\rp, #UART_LCR << UART_SHIFT] - /* uart[UART_DLL] = div & 0xff; */ - ldr \rv, [\tmp, #12] - and \rv, \rv, #0xff - str \rv, [\rp, #UART_DLL << UART_SHIFT] - /* uart[UART_DLM] = div >> 8; */ - ldr \rv, [\tmp, #12] - lsr \rv, \rv, #8 - str \rv, [\rp, #UART_DLM << UART_SHIFT] - /* uart[UART_LCR] = UART_LCR_WLEN8; */ - mov \rv, #UART_LCR_WLEN8 - str \rv, [\rp, #UART_LCR << UART_SHIFT] b 100f .align @@ -205,8 +183,8 @@ cmp \rx, #0 beq 1002f 1001: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT] - and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE - teq \rd, #UART_LSR_TEMT | UART_LSR_THRE + and \rd, \rd, #UART_LSR_THRE + teq \rd, #UART_LSR_THRE bne 1001b 1002: .endm @@ -225,7 +203,7 @@ /* * Storage for the state maintained by the macros above. * - * In the kernel proper, this data is located in arch/arm/mach-tegra/common.c. + * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c. * That's because this header is included from multiple files, and we only * want a single copy of the data. In particular, the UART probing code above * assumes it's running using physical addresses. This is true when this file @@ -247,6 +225,4 @@ tegra_uart_config: .word 0 /* Debug UART virtual address */ .word 0 - /* Scratch space for debug macro */ - .word 0 #endif diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h index 7dcc10d67253..20d12f230a2f 100644 --- a/arch/arm/include/uapi/asm/hwcap.h +++ b/arch/arm/include/uapi/asm/hwcap.h @@ -28,4 +28,13 @@ #define HWCAP_LPAE (1 << 20) #define HWCAP_EVTSTRM (1 << 21) +/* + * HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2 + */ +#define HWCAP2_AES (1 << 0) +#define HWCAP2_PMULL (1 << 1) +#define HWCAP2_SHA1 (1 << 2) +#define HWCAP2_SHA2 (1 << 3) +#define HWCAP2_CRC32 (1 << 4) + #endif /* _UAPI__ASMARM_HWCAP_H */ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index c498b60c0505..ef0c8785ba16 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -119,6 +119,26 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 #define KVM_REG_ARM_32_CRN_SHIFT 11 +#define ARM_CP15_REG_SHIFT_MASK(x,n) \ + (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) + +#define __ARM_CP15_REG(op1,crn,crm,op2) \ + (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ + ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ + ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ + ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ + ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) + +#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) + +#define __ARM_CP15_REG64(op1,crm) \ + (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) +#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) + +#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) +#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) +#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) + /* Normal registers are mapped as coprocessor 16. */ #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) @@ -143,6 +163,14 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST2 0x100A +/* Device Control API: ARM VGIC */ +#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 +#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 +#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 +#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index af33b44990ed..fb5584d0cc05 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -406,6 +406,8 @@ #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) #define __NR_kcmp (__NR_SYSCALL_BASE+378) #define __NR_finit_module (__NR_SYSCALL_BASE+379) +#define __NR_sched_setattr (__NR_SYSCALL_BASE+380) +#define __NR_sched_getattr (__NR_SYSCALL_BASE+381) /* * This may need to be greater than __NR_last_syscall+1 in order to |