diff options
Diffstat (limited to 'arch/arm/include/asm')
32 files changed, 674 insertions, 321 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 749bb6622404..bc2d2d75f706 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -18,6 +18,7 @@ #endif #include <asm/ptrace.h> +#include <asm/domain.h> /* * Endian independent macros for shifting bytes within registers. @@ -157,16 +158,24 @@ #ifdef CONFIG_SMP #define ALT_SMP(instr...) \ 9998: instr +/* + * Note: if you get assembler errors from ALT_UP() when building with + * CONFIG_THUMB2_KERNEL, you almost certainly need to use + * ALT_SMP( W(instr) ... ) + */ #define ALT_UP(instr...) \ .pushsection ".alt.smp.init", "a" ;\ .long 9998b ;\ - instr ;\ +9997: instr ;\ + .if . - 9997b != 4 ;\ + .error "ALT_UP() content must assemble to exactly 4 bytes";\ + .endif ;\ .popsection #define ALT_UP_B(label) \ .equ up_b_offset, label - 9998b ;\ .pushsection ".alt.smp.init", "a" ;\ .long 9998b ;\ - b . + up_b_offset ;\ + W(b) . + up_b_offset ;\ .popsection #else #define ALT_SMP(instr...) @@ -177,16 +186,24 @@ /* * SMP data memory barrier */ - .macro smp_dmb + .macro smp_dmb mode #ifdef CONFIG_SMP #if __LINUX_ARM_ARCH__ >= 7 + .ifeqs "\mode","arm" ALT_SMP(dmb) + .else + ALT_SMP(W(dmb)) + .endif #elif __LINUX_ARM_ARCH__ == 6 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb #else #error Incompatible SMP platform #endif + .ifeqs "\mode","arm" ALT_UP(nop) + .else + ALT_UP(W(nop)) + .endif #endif .endm @@ -206,12 +223,12 @@ */ #ifdef CONFIG_THUMB2_KERNEL - .macro usraccoff, instr, reg, ptr, inc, off, cond, abort + .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T() 9999: .if \inc == 1 - \instr\cond\()bt \reg, [\ptr, #\off] + \instr\cond\()b\()\t\().w \reg, [\ptr, #\off] .elseif \inc == 4 - \instr\cond\()t \reg, [\ptr, #\off] + \instr\cond\()\t\().w \reg, [\ptr, #\off] .else .error "Unsupported inc macro argument" .endif @@ -246,13 +263,13 @@ #else /* !CONFIG_THUMB2_KERNEL */ - .macro usracc, instr, reg, ptr, inc, cond, rept, abort + .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T() .rept \rept 9999: .if \inc == 1 - \instr\cond\()bt \reg, [\ptr], #\inc + \instr\cond\()b\()\t \reg, [\ptr], #\inc .elseif \inc == 4 - \instr\cond\()t \reg, [\ptr], #\inc + \instr\cond\()\t \reg, [\ptr], #\inc .else .error "Unsupported inc macro argument" .endif diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index 9d6122096fbe..75fe66bc02b4 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h @@ -23,4 +23,6 @@ #define ARCH_SLAB_MINALIGN 8 #endif +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) + #endif diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index b56c1389b6fa..765d33222369 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -12,23 +12,13 @@ #ifndef __ASM_CLKDEV_H #define __ASM_CLKDEV_H -struct clk; -struct device; +#include <linux/slab.h> -struct clk_lookup { - struct list_head node; - const char *dev_id; - const char *con_id; - struct clk *clk; -}; +#include <mach/clkdev.h> -struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, - const char *dev_fmt, ...); - -void clkdev_add(struct clk_lookup *cl); -void clkdev_drop(struct clk_lookup *cl); - -void clkdev_add_table(struct clk_lookup *, size_t); -int clk_add_alias(const char *, const char *, char *, struct device *); +static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) +{ + return kzalloc(size, GFP_KERNEL); +} #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c568da7dcae4..4fff837363ed 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -5,24 +5,29 @@ #include <linux/mm_types.h> #include <linux/scatterlist.h> +#include <linux/dma-debug.h> #include <asm-generic/dma-coherent.h> #include <asm/memory.h> +#ifdef __arch_page_to_dma +#error Please update to __arch_pfn_to_dma +#endif + /* - * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions - * used internally by the DMA-mapping API to provide DMA addresses. They - * must not be used by drivers. + * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private + * functions used internally by the DMA-mapping API to provide DMA + * addresses. They must not be used by drivers. */ -#ifndef __arch_page_to_dma -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +#ifndef __arch_pfn_to_dma +static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) { - return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); + return (dma_addr_t)__pfn_to_bus(pfn); } -static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) +static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) { - return pfn_to_page(__bus_to_pfn(addr)); + return __bus_to_pfn(addr); } static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) @@ -35,14 +40,14 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); } #else -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) { - return __arch_page_to_dma(dev, page); + return __arch_pfn_to_dma(dev, pfn); } -static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) +static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) { - return __arch_dma_to_page(dev, addr); + return __arch_dma_to_pfn(dev, addr); } static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) @@ -293,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ -extern dma_addr_t dma_map_single(struct device *, void *, size_t, +extern dma_addr_t __dma_map_single(struct device *, void *, size_t, enum dma_data_direction); -extern void dma_unmap_single(struct device *, dma_addr_t, size_t, +extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); -extern dma_addr_t dma_map_page(struct device *, struct page *, +extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); -extern void dma_unmap_page(struct device *, dma_addr_t, size_t, +extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, enum dma_data_direction); /* @@ -323,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } +static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, + size_t size, enum dma_data_direction dir) +{ + __dma_single_cpu_to_dev(cpu_addr, size, dir); + return virt_to_dma(dev, cpu_addr); +} + +static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir) +{ + __dma_page_cpu_to_dev(page, offset, size, dir); + return pfn_to_dma(dev, page_to_pfn(page)) + offset; +} + +static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); +} + +static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), + handle & ~PAGE_MASK, size, dir); +} +#endif /* CONFIG_DMABOUNCE */ + /** * dma_map_single - map a single buffer for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -340,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { + dma_addr_t addr; + BUG_ON(!valid_dma_direction(dir)); - __dma_single_cpu_to_dev(cpu_addr, size, dir); + addr = __dma_map_single(dev, cpu_addr, size, dir); + debug_dma_map_page(dev, virt_to_page(cpu_addr), + (unsigned long)cpu_addr & ~PAGE_MASK, size, + dir, addr, true); - return virt_to_dma(dev, cpu_addr); + return addr; } /** @@ -364,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { + dma_addr_t addr; + BUG_ON(!valid_dma_direction(dir)); - __dma_page_cpu_to_dev(page, offset, size, dir); + addr = __dma_map_page(dev, page, offset, size, dir); + debug_dma_map_page(dev, page, offset, size, dir, addr, false); - return page_to_dma(dev, page) + offset; + return addr; } /** @@ -388,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); + debug_dma_unmap_page(dev, handle, size, dir, true); + __dma_unmap_single(dev, handle, size, dir); } /** @@ -408,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, - size, dir); + debug_dma_unmap_page(dev, handle, size, dir, false); + __dma_unmap_page(dev, handle, size, dir); } -#endif /* CONFIG_DMABOUNCE */ /** * dma_sync_single_range_for_cpu @@ -437,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); + debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) return; @@ -449,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); + debug_dma_sync_single_for_device(dev, handle + offset, size, dir); + if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index cc7ef4080711..af18ceaacf5d 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -45,13 +45,17 @@ */ #define DOMAIN_NOACCESS 0 #define DOMAIN_CLIENT 1 +#ifdef CONFIG_CPU_USE_DOMAINS #define DOMAIN_MANAGER 3 +#else +#define DOMAIN_MANAGER 1 +#endif #define domain_val(dom,type) ((type) << (2*(dom))) #ifndef __ASSEMBLY__ -#ifdef CONFIG_MMU +#ifdef CONFIG_CPU_USE_DOMAINS #define set_domain(x) \ do { \ __asm__ __volatile__( \ @@ -74,5 +78,28 @@ #define modify_domain(dom,type) do { } while (0) #endif +/* + * Generate the T (user) versions of the LDR/STR and related + * instructions (inline assembly) + */ +#ifdef CONFIG_CPU_USE_DOMAINS +#define T(instr) #instr "t" +#else +#define T(instr) #instr #endif -#endif /* !__ASSEMBLY__ */ + +#else /* __ASSEMBLY__ */ + +/* + * Generate the T (user) versions of the LDR/STR and related + * instructions + */ +#ifdef CONFIG_CPU_USE_DOMAINS +#define T(instr) instr ## t +#else +#define T(instr) instr +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* !__ASM_PROC_DOMAIN_H */ diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 8bb66bca2e3e..c3cd8755e648 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -99,6 +99,8 @@ struct elf32_hdr; extern int elf_check_arch(const struct elf32_hdr *); #define elf_check_arch elf_check_arch +#define vmcore_elf64_check_arch(x) (0) + extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S new file mode 100644 index 000000000000..ec0bbf79c71f --- /dev/null +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -0,0 +1,44 @@ +/* + * Interrupt handling. Preserves r7, r8, r9 + */ + .macro arch_irq_handler_default + get_irqnr_preamble r5, lr +1: get_irqnr_and_base r0, r6, r5, lr + movne r1, sp + @ + @ routine called with r0 = irq number, r1 = struct pt_regs * + @ + adrne lr, BSYM(1b) + bne asm_do_IRQ + +#ifdef CONFIG_SMP + /* + * XXX + * + * this macro assumes that irqstat (r6) and base (r5) are + * preserved from get_irqnr_and_base above + */ + ALT_SMP(test_for_ipi r0, r6, r5, lr) + ALT_UP_B(9997f) + movne r1, sp + adrne lr, BSYM(1b) + bne do_IPI + +#ifdef CONFIG_LOCAL_TIMERS + test_for_ltirq r0, r6, r5, lr + movne r0, sp + adrne lr, BSYM(1b) + bne do_local_timer +#endif +#endif +9997: + .endm + + .macro arch_irq_handler, symbol_name + .align 5 + .global \symbol_name +\symbol_name: + mov r4, lr + arch_irq_handler_default + mov pc, r4 + .endm diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 540a044153a5..b33fe7065b38 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -13,12 +13,13 @@ #include <linux/preempt.h> #include <linux/uaccess.h> #include <asm/errno.h> +#include <asm/domain.h> #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile__( \ - "1: ldrt %1, [%2]\n" \ + "1: " T(ldr) " %1, [%2]\n" \ " " insn "\n" \ - "2: strt %0, [%2]\n" \ + "2: " T(str) " %0, [%2]\n" \ " mov %0, #0\n" \ "3:\n" \ " .pushsection __ex_table,\"a\"\n" \ @@ -97,10 +98,10 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) pagefault_disable(); /* implies preempt_disable() */ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: ldrt %0, [%3]\n" + "1: " T(ldr) " %0, [%3]\n" " teq %0, %1\n" " it eq @ explicit IT needed for the 2b label\n" - "2: streqt %2, [%3]\n" + "2: " T(streq) " %2, [%3]\n" "3:\n" " .pushsection __ex_table,\"a\"\n" " .align 3\n" diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 6d7485aff955..89ad1805e579 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -5,13 +5,31 @@ #include <linux/threads.h> #include <asm/irq.h> +#define NR_IPI 5 + typedef struct { unsigned int __softirq_pending; +#ifdef CONFIG_LOCAL_TIMERS unsigned int local_timer_irqs; +#endif +#ifdef CONFIG_SMP + unsigned int ipi_irqs[NR_IPI]; +#endif } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ +#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) + +#ifdef CONFIG_SMP +u64 smp_irq_stat_cpu(unsigned int cpu); +#else +#define smp_irq_stat_cpu(cpu) 0 +#endif + +#define arch_irq_stat_cpu smp_irq_stat_cpu + #if NR_IRQS > 512 #define HARDIRQ_BITS 10 #elif NR_IRQS > 256 diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S new file mode 100644 index 000000000000..c115b82fe80a --- /dev/null +++ b/arch/arm/include/asm/hardware/entry-macro-gic.S @@ -0,0 +1,75 @@ +/* + * arch/arm/include/asm/hardware/entry-macro-gic.S + * + * Low-level IRQ helper macros for GIC + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <asm/hardware/gic.h> + +#ifndef HAVE_GET_IRQNR_PREAMBLE + .macro get_irqnr_preamble, base, tmp + ldr \base, =gic_cpu_base_addr + ldr \base, [\base] + .endm +#endif + +/* + * The interrupt numbering scheme is defined in the + * interrupt controller spec. To wit: + * + * Interrupts 0-15 are IPI + * 16-28 are reserved + * 29-31 are local. We allow 30 to be used for the watchdog. + * 32-1020 are global + * 1021-1022 are reserved + * 1023 is "spurious" (no interrupt) + * + * For now, we ignore all local interrupts so only return an interrupt if it's + * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs. + * + * A simple read from the controller will tell us the number of the highest + * priority enabled interrupt. We then just need to check whether it is in the + * valid range for an IRQ (30-1020 inclusive). + */ + + .macro get_irqnr_and_base, irqnr, irqstat, base, tmp + + ldr \irqstat, [\base, #GIC_CPU_INTACK] + /* bits 12-10 = src CPU, 9-0 = int # */ + + ldr \tmp, =1021 + bic \irqnr, \irqstat, #0x1c00 + cmp \irqnr, #29 + cmpcc \irqnr, \irqnr + cmpne \irqnr, \tmp + cmpcs \irqnr, \irqnr + .endm + +/* We assume that irqstat (the raw value of the IRQ acknowledge + * register) is preserved from the macro above. + * If there is an IPI, we immediately signal end of interrupt on the + * controller, since this requires the original irqstat value which + * we won't easily be able to recreate later. + */ + + .macro test_for_ipi, irqnr, irqstat, base, tmp + bic \irqnr, \irqstat, #0x1c00 + cmp \irqnr, #16 + strcc \irqstat, [\base, #GIC_CPU_EOI] + cmpcs \irqnr, \irqnr + .endm + +/* As above, this assumes that irqstat and base are preserved.. */ + + .macro test_for_ltirq, irqnr, irqstat, base, tmp + bic \irqnr, \irqstat, #0x1c00 + mov \tmp, #0 + cmp \irqnr, #29 + moveq \tmp, #1 + streq \irqstat, [\base, #GIC_CPU_EOI] + cmp \tmp, #0 + .endm diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 7f34333bb545..84557d321001 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -33,10 +33,13 @@ #define GIC_DIST_SOFTINT 0xf00 #ifndef __ASSEMBLY__ -void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start); -void gic_cpu_init(unsigned int gic_nr, void __iomem *base); +extern void __iomem *gic_cpu_base_addr; + +void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); +void gic_secondary_init(unsigned int); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); +void gic_enable_ppi(unsigned int); #endif #endif diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 21fa272301f8..b2f95c72287c 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -76,6 +76,7 @@ extern unsigned long it8152_base_address; IT8152_PD_IRQ(0) Audio controller (ACR) */ #define IT8152_IRQ(x) (IRQ_BOARD_START + (x)) +#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40) /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ #define IT8152_LD_IRQ_COUNT 9 diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h new file mode 100644 index 000000000000..21e75e30d497 --- /dev/null +++ b/arch/arm/include/asm/hardware/timer-sp.h @@ -0,0 +1,2 @@ +void sp804_clocksource_init(void __iomem *); +void sp804_clockevents_init(void __iomem *, unsigned int); diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 1fc684e70ab6..7080e2c8fa62 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page); extern void *kmap_high_get(struct page *page); extern void kunmap_high(struct page *page); -extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); -extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); - /* * The following functions are already defined by <linux/highmem.h> * when CONFIG_HIGHMEM is not set. diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index 4d8ae9d67abe..f389b2704d82 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -20,8 +20,8 @@ struct arch_hw_breakpoint_ctrl { struct arch_hw_breakpoint { u32 address; u32 trigger; - struct perf_event *suspended_wp; - struct arch_hw_breakpoint_ctrl ctrl; + struct arch_hw_breakpoint_ctrl step_ctrl; + struct arch_hw_breakpoint_ctrl ctrl; }; static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index 8ec9ef5c3c7b..c0094d8edae4 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -33,10 +33,20 @@ static inline void crash_setup_regs(struct pt_regs *newregs, if (oldregs) { memcpy(newregs, oldregs, sizeof(*newregs)); } else { - __asm__ __volatile__ ("stmia %0, {r0 - r15}" - : : "r" (&newregs->ARM_r0)); - __asm__ __volatile__ ("mrs %0, cpsr" - : "=r" (newregs->ARM_cpsr)); + __asm__ __volatile__ ( + "stmia %[regs_base], {r0-r12}\n\t" + "mov %[_ARM_sp], sp\n\t" + "str lr, %[_ARM_lr]\n\t" + "adr %[_ARM_pc], 1f\n\t" + "mrs %[_ARM_cpsr], cpsr\n\t" + "1:" + : [_ARM_pc] "=r" (newregs->ARM_pc), + [_ARM_cpsr] "=r" (newregs->ARM_cpsr), + [_ARM_sp] "=r" (newregs->ARM_sp), + [_ARM_lr] "=o" (newregs->ARM_lr) + : [regs_base] "r" (&newregs->ARM_r0) + : "memory" + ); } } diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index 50c7e7cfd670..6bc63ab498ce 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h @@ -30,7 +30,6 @@ asmlinkage void do_local_timer(struct pt_regs *); #include "smp_twd.h" #define local_timer_ack() twd_timer_ack() -#define local_timer_stop() twd_timer_stop() #else @@ -40,11 +39,6 @@ asmlinkage void do_local_timer(struct pt_regs *); */ int local_timer_ack(void); -/* - * Stop a local timer interrupt. - */ -void local_timer_stop(void); - #endif /* @@ -52,12 +46,6 @@ void local_timer_stop(void); */ void local_timer_setup(struct clock_event_device *); -#else - -static inline void local_timer_stop(void) -{ -} - #endif #endif diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index d97a964207fa..3a0893a76a3b 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -37,12 +37,21 @@ struct machine_desc { struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ void (*map_io)(void);/* IO mapping function */ + void (*init_early)(void); void (*init_irq)(void); struct sys_timer *timer; /* system tick timer */ void (*init_machine)(void); +#ifdef CONFIG_MULTI_IRQ_HANDLER + void (*handle_irq)(struct pt_regs *); +#endif }; /* + * Current machine - only accessible during boot. + */ +extern struct machine_desc *machine_desc; + +/* * Set of macros to define architecture features. This is built into * a table by the linker. */ diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index ce3eee9fe26c..22ac140edd9e 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -17,10 +17,12 @@ struct seq_file; /* * This is internal. Do not use it. */ -extern unsigned int arch_nr_irqs; -extern void (*init_arch_irq)(void); extern void init_FIQ(void); -extern int show_fiq_list(struct seq_file *, void *); +extern int show_fiq_list(struct seq_file *, int); + +#ifdef CONFIG_MULTI_IRQ_HANDLER +extern void (*handle_arch_irq)(struct pt_regs *); +#endif /* * This is for easy migration, but should be changed in the source diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index 35d408f6dccf..883f6be5117a 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -43,7 +43,6 @@ struct sys_timer { #endif }; -extern struct sys_timer *system_timer; extern void timer_tick(void); #endif diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index cbb0bc295d2b..12c8e680cbff 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -8,11 +8,6 @@ struct unwind_table; #ifdef CONFIG_ARM_UNWIND -struct arm_unwind_mapping { - Elf_Shdr *unw_sec; - Elf_Shdr *sec_text; - struct unwind_table *unwind; -}; enum { ARM_SEC_INIT, ARM_SEC_DEVINIT, @@ -21,13 +16,13 @@ enum { ARM_SEC_DEVEXIT, ARM_SEC_MAX, }; +#endif + struct mod_arch_specific { - struct arm_unwind_mapping map[ARM_SEC_MAX]; -}; -#else -struct mod_arch_specific { -}; +#ifdef CONFIG_ARM_UNWIND + struct unwind_table *unwind[ARM_SEC_MAX]; #endif +}; /* * Include the ARM architecture version. diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index a485ac3c8696..f51a69595f6e 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -151,13 +151,15 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +typedef unsigned long pteval_t; + #undef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS /* * These are used to make use of C type-checking.. */ -typedef struct { unsigned long pte; } pte_t; +typedef struct { pteval_t pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd[2]; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; @@ -175,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* * .. while these make it easier on the compiler */ -typedef unsigned long pte_t; +typedef pteval_t pte_t; typedef unsigned long pmd_t; typedef unsigned long pgd_t[2]; typedef unsigned long pgprot_t; diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b12cc98bbe04..9763be04f77e 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -30,14 +30,16 @@ #define pmd_free(mm, pmd) do { } while (0) #define pgd_populate(mm,pmd,pte) BUG() -extern pgd_t *get_pgd_slow(struct mm_struct *mm); -extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); - -#define pgd_alloc(mm) get_pgd_slow(mm) -#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) +extern pgd_t *pgd_alloc(struct mm_struct *mm); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) +static inline void clean_pte_table(pte_t *pte) +{ + clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE); +} + /* * Allocate one PTE table. * @@ -45,14 +47,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); * into one table thus: * * +------------+ - * | h/w pt 0 | - * +------------+ - * | h/w pt 1 | - * +------------+ * | Linux pt 0 | * +------------+ * | Linux pt 1 | * +------------+ + * | h/w pt 0 | + * +------------+ + * | h/w pt 1 | + * +------------+ */ static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) @@ -60,10 +62,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) pte_t *pte; pte = (pte_t *)__get_free_page(PGALLOC_GFP); - if (pte) { - clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); - pte += PTRS_PER_PTE; - } + if (pte) + clean_pte_table(pte); return pte; } @@ -79,10 +79,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) pte = alloc_pages(PGALLOC_GFP, 0); #endif if (pte) { - if (!PageHighMem(pte)) { - void *page = page_address(pte); - clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); - } + if (!PageHighMem(pte)) + clean_pte_table(page_address(pte)); pgtable_page_ctor(pte); } @@ -94,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) */ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - if (pte) { - pte -= PTRS_PER_PTE; + if (pte) free_page((unsigned long)pte); - } } static inline void pte_free(struct mm_struct *mm, pgtable_t pte) @@ -106,8 +102,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } -static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) +static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, + unsigned long prot) { + unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot; pmdp[0] = __pmd(pmdval); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); flush_pmd_entry(pmdp); @@ -122,20 +120,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { - unsigned long pte_ptr = (unsigned long)ptep; - /* - * The pmd must be loaded with the physical - * address of the PTE table + * The pmd must be loaded with the physical address of the PTE table */ - pte_ptr -= PTRS_PER_PTE * sizeof(void *); - __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); + __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) { - __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 53d1d5deb111..ebcb6432f45f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -10,6 +10,7 @@ #ifndef _ASMARM_PGTABLE_H #define _ASMARM_PGTABLE_H +#include <linux/const.h> #include <asm-generic/4level-fixup.h> #include <asm/proc-fns.h> @@ -54,7 +55,7 @@ * Therefore, we tweak the implementation slightly - we tell Linux that we * have 2048 entries in the first level, each of which is 8 bytes (iow, two * hardware pointers to the second level.) The second level contains two - * hardware PTE tables arranged contiguously, followed by Linux versions + * hardware PTE tables arranged contiguously, preceded by Linux versions * which contain the state information Linux needs. We, therefore, end up * with 512 entries in the "PTE" level. * @@ -62,15 +63,15 @@ * * pgd pte * | | - * +--------+ +0 - * | |-----> +------------+ +0 + * +--------+ + * | | +------------+ +0 + * +- - - - + | Linux pt 0 | + * | | +------------+ +1024 + * +--------+ +0 | Linux pt 1 | + * | |-----> +------------+ +2048 * +- - - - + +4 | h/w pt 0 | - * | |-----> +------------+ +1024 + * | |-----> +------------+ +3072 * +--------+ +8 | h/w pt 1 | - * | | +------------+ +2048 - * +- - - - + | Linux pt 0 | - * | | +------------+ +3072 - * +--------+ | Linux pt 1 | * | | +------------+ +4096 * * See L_PTE_xxx below for definitions of bits in the "Linux pt", and @@ -102,6 +103,10 @@ #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 +#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) +#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) + /* * PMD_SHIFT determines the size of the area a second-level page table can map * PGDIR_SHIFT determines what a third-level page table entry can map @@ -112,13 +117,13 @@ #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ -extern void __pte_error(const char *file, int line, unsigned long val); -extern void __pmd_error(const char *file, int line, unsigned long val); -extern void __pgd_error(const char *file, int line, unsigned long val); +extern void __pte_error(const char *file, int line, pte_t); +extern void __pmd_error(const char *file, int line, pmd_t); +extern void __pgd_error(const char *file, int line, pgd_t); -#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) -#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) -#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) +#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) +#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) +#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) #endif /* !__ASSEMBLY__ */ #define PMD_SIZE (1UL << PMD_SHIFT) @@ -133,8 +138,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); */ #define FIRST_USER_ADDRESS PAGE_SIZE -#define FIRST_USER_PGD_NR 1 -#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) /* * section address mask and size definitions. @@ -161,30 +165,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * The PTE table pointer refers to the hardware entries; the "Linux" * entries are stored 1024 bytes below. */ -#define L_PTE_PRESENT (1 << 0) -#define L_PTE_YOUNG (1 << 1) -#define L_PTE_FILE (1 << 2) /* only when !PRESENT */ -#define L_PTE_DIRTY (1 << 6) -#define L_PTE_WRITE (1 << 7) -#define L_PTE_USER (1 << 8) -#define L_PTE_EXEC (1 << 9) -#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ +#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) +#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) +#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) +#define L_PTE_USER (_AT(pteval_t, 1) << 8) +#define L_PTE_XN (_AT(pteval_t, 1) << 9) +#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ /* * These are the memory types, defined to be compatible with * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB */ -#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ -#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ -#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ -#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ -#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ -#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ -#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ -#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ -#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ -#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ -#define L_PTE_MT_MASK (0x0f << 2) +#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ +#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ +#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ +#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ +#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ +#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ +#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ +#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ +#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ +#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) #ifndef __ASSEMBLY__ @@ -201,23 +205,44 @@ extern pgprot_t pgprot_kernel; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE pgprot_user -#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) -#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) -#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) -#define PAGE_KERNEL pgprot_kernel -#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) - -#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) -#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) -#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) -#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) -#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) +#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) +#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) +#define PAGE_KERNEL_EXEC pgprot_kernel + +#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) +#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) +#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) + +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + +#define pgprot_noncached(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + +#define pgprot_writecombine(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) + +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#else +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) +#endif #endif /* __ASSEMBLY__ */ @@ -255,26 +280,84 @@ extern pgprot_t pgprot_kernel; extern struct page *empty_zero_page; #define ZERO_PAGE(vaddr) (empty_zero_page) -#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) -#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) -#define pte_none(pte) (!pte_val(pte)) -#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) -#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) -#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +/* to find an entry in a page-table-directory */ +#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) + +#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) + +/* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pgd is never bad, and a pmd always exists (as it's folded + * into the pgd entry) + */ +#define pgd_none(pgd) (0) +#define pgd_bad(pgd) (0) +#define pgd_present(pgd) (1) +#define pgd_clear(pgdp) do { } while (0) +#define set_pgd(pgd,pgdp) do { } while (0) + + +/* Find an entry in the second-level page table.. */ +#define pmd_offset(dir, addr) ((pmd_t *)(dir)) + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_present(pmd) (pmd_val(pmd)) +#define pmd_bad(pmd) (pmd_val(pmd) & 2) + +#define copy_pmd(pmdpd,pmdps) \ + do { \ + pmdpd[0] = pmdps[0]; \ + pmdpd[1] = pmdps[1]; \ + flush_pmd_entry(pmdpd); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + pmdp[0] = __pmd(0); \ + pmdp[1] = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +static inline pte_t *pmd_page_vaddr(pmd_t pmd) +{ + return __va(pmd_val(pmd) & PAGE_MASK); +} + +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) + +/* we don't need complex calculations here as the pmd is folded into the pgd */ +#define pmd_addr_end(addr,end) (end) -#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr)) -#define pte_unmap(pte) __pte_unmap(pte) #ifndef CONFIG_HIGHPTE -#define __pte_map(dir) pmd_page_vaddr(*(dir)) +#define __pte_map(pmd) pmd_page_vaddr(*(pmd)) #define __pte_unmap(pte) do { } while (0) #else -#define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE) -#define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE)) +#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) +#define __pte_unmap(pte) kunmap_atomic(pte) #endif +#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + +#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) + +#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) +#define pte_unmap(pte) __pte_unmap(pte) + +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) + +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) +#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) + #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) @@ -295,15 +378,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, } } -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ +#define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) #define pte_special(pte) (0) #define pte_present_user(pte) \ @@ -313,8 +393,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } -PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); -PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); +PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); +PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); @@ -322,101 +402,13 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); static inline pte_t pte_mkspecial(pte_t pte) { return pte; } -#define __pgprot_modify(prot,mask,bits) \ - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) - -/* - * Mark the prot value as uncacheable and unbufferable. - */ -#define pgprot_noncached(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) -#define pgprot_writecombine(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) -#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE -#define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) -#define __HAVE_PHYS_MEM_ACCESS_PROT -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -#else -#define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) -#endif - -#define pmd_none(pmd) (!pmd_val(pmd)) -#define pmd_present(pmd) (pmd_val(pmd)) -#define pmd_bad(pmd) (pmd_val(pmd) & 2) - -#define copy_pmd(pmdpd,pmdps) \ - do { \ - pmdpd[0] = pmdps[0]; \ - pmdpd[1] = pmdps[1]; \ - flush_pmd_entry(pmdpd); \ - } while (0) - -#define pmd_clear(pmdp) \ - do { \ - pmdp[0] = __pmd(0); \ - pmdp[1] = __pmd(0); \ - clean_pmd_entry(pmdp); \ - } while (0) - -static inline pte_t *pmd_page_vaddr(pmd_t pmd) -{ - unsigned long ptr; - - ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); - ptr += PTRS_PER_PTE * sizeof(void *); - - return __va(ptr); -} - -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) - -/* we don't need complex calculations here as the pmd is folded into the pgd */ -#define pmd_addr_end(addr,end) (end) - -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) - -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pgd is never bad, and a pmd always exists (as it's folded - * into the pgd entry) - */ -#define pgd_none(pgd) (0) -#define pgd_bad(pgd) (0) -#define pgd_present(pgd) (1) -#define pgd_clear(pgdp) do { } while (0) -#define set_pgd(pgd,pgdp) do { } while (0) - -/* to find an entry in a page-table-directory */ -#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) - -#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) - -/* Find an entry in the second-level page table.. */ -#define pmd_offset(dir, addr) ((pmd_t *)(dir)) - -/* Find an entry in the third-level page table.. */ -#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; + const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; - /* * Encode and decode a swap entry. Swap entries are stored in the Linux * page tables as follows: @@ -481,6 +473,9 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; #define pgtable_cache_init() do { } while (0) +void identity_mapping_add(pgd_t *, unsigned long, unsigned long); +void identity_mapping_del(pgd_t *, unsigned long, unsigned long); + #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h new file mode 100644 index 000000000000..a84628be1a7b --- /dev/null +++ b/arch/arm/include/asm/sched_clock.h @@ -0,0 +1,118 @@ +/* + * sched_clock.h: support for extending counters to full 64-bit ns counter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_SCHED_CLOCK +#define ASM_SCHED_CLOCK + +#include <linux/kernel.h> +#include <linux/types.h> + +struct clock_data { + u64 epoch_ns; + u32 epoch_cyc; + u32 epoch_cyc_copy; + u32 mult; + u32 shift; +}; + +#define DEFINE_CLOCK_DATA(name) struct clock_data name + +static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +{ + return (cyc * mult) >> shift; +} + +/* + * Atomically update the sched_clock epoch. Your update callback will + * be called from a timer before the counter wraps - read the current + * counter value, and call this function to safely move the epochs + * forward. Only use this from the update callback. + */ +static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask) +{ + unsigned long flags; + u64 ns = cd->epoch_ns + + cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift); + + /* + * Write epoch_cyc and epoch_ns in a way that the update is + * detectable in cyc_to_fixed_sched_clock(). + */ + raw_local_irq_save(flags); + cd->epoch_cyc = cyc; + smp_wmb(); + cd->epoch_ns = ns; + smp_wmb(); + cd->epoch_cyc_copy = cyc; + raw_local_irq_restore(flags); +} + +/* + * If your clock rate is known at compile time, using this will allow + * you to optimize the mult/shift loads away. This is paired with + * init_fixed_sched_clock() to ensure that your mult/shift are correct. + */ +static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd, + u32 cyc, u32 mask, u32 mult, u32 shift) +{ + u64 epoch_ns; + u32 epoch_cyc; + + /* + * Load the epoch_cyc and epoch_ns atomically. We do this by + * ensuring that we always write epoch_cyc, epoch_ns and + * epoch_cyc_copy in strict order, and read them in strict order. + * If epoch_cyc and epoch_cyc_copy are not equal, then we're in + * the middle of an update, and we should repeat the load. + */ + do { + epoch_cyc = cd->epoch_cyc; + smp_rmb(); + epoch_ns = cd->epoch_ns; + smp_rmb(); + } while (epoch_cyc != cd->epoch_cyc_copy); + + return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift); +} + +/* + * Otherwise, you need to use this, which will obtain the mult/shift + * from the clock_data structure. Use init_sched_clock() with this. + */ +static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd, + u32 cyc, u32 mask) +{ + return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift); +} + +/* + * Initialize the clock data - calculate the appropriate multiplier + * and shift. Also setup a timer to ensure that the epoch is refreshed + * at the appropriate time interval, which will call your update + * handler. + */ +void init_sched_clock(struct clock_data *, void (*)(void), + unsigned int, unsigned long); + +/* + * Use this initialization function rather than init_sched_clock() if + * you're using cyc_to_fixed_sched_clock, which will warn if your + * constants are incorrect. + */ +static inline void init_fixed_sched_clock(struct clock_data *cd, + void (*update)(void), unsigned int bits, unsigned long rate, + u32 mult, u32 shift) +{ + init_sched_clock(cd, update, bits, rate); + if (cd->mult != mult || cd->shift != shift) { + pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n" + "sched_clock: fix multiply/shift to avoid scheduler hiccups\n", + mult, shift, cd->mult, cd->shift); + } +} + +#endif diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h index 4fc1565e4f93..316bb2b2be3d 100644 --- a/arch/arm/include/asm/sizes.h +++ b/arch/arm/include/asm/sizes.h @@ -13,9 +13,6 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* DO NOT EDIT!! - this file automatically generated - * from .s file by awk -f s2h.awk - */ /* Size definitions * Copyright (C) ARM Limited 1998. All rights reserved. */ @@ -25,6 +22,9 @@ /* handy sizes */ #define SZ_16 0x00000010 +#define SZ_32 0x00000020 +#define SZ_64 0x00000040 +#define SZ_128 0x00000080 #define SZ_256 0x00000100 #define SZ_512 0x00000200 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 3d05190797cb..96ed521f2408 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -33,27 +33,23 @@ struct seq_file; /* * generate IPI list text */ -extern void show_ipi_list(struct seq_file *p); +extern void show_ipi_list(struct seq_file *, int); /* * Called from assembly code, this handles an IPI. */ -asmlinkage void do_IPI(struct pt_regs *regs); +asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); /* * Setup the set of possible CPUs (via set_cpu_possible) */ extern void smp_init_cpus(void); -/* - * Move global data into per-processor storage. - */ -extern void smp_store_cpu_info(unsigned int cpuid); /* * Raise an IPI cross call on CPUs in callmap. */ -extern void smp_cross_call(const struct cpumask *mask); +extern void smp_cross_call(const struct cpumask *mask, int ipi); /* * Boot a secondary CPU, and assign it the specified idle task. @@ -73,6 +69,11 @@ asmlinkage void secondary_start_kernel(void); extern void platform_secondary_init(unsigned int cpu); /* + * Initialize cpu_possible map, and enable coherency + */ +extern void platform_smp_prepare_cpus(unsigned int); + +/* * Initial data for bringing up a secondary CPU. */ struct secondary_data { @@ -97,6 +98,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); /* * show local interrupt info */ -extern void show_local_irqs(struct seq_file *); +extern void show_local_irqs(struct seq_file *, int); #endif /* ifndef __ASM_ARM_SMP_H */ diff --git a/arch/arm/include/asm/smp_mpidr.h b/arch/arm/include/asm/smp_mpidr.h deleted file mode 100644 index 6a9307d64900..000000000000 --- a/arch/arm/include/asm/smp_mpidr.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef ASMARM_SMP_MIDR_H -#define ASMARM_SMP_MIDR_H - -#define hard_smp_processor_id() \ - ({ \ - unsigned int cpunum; \ - __asm__("\n" \ - "1: mrc p15, 0, %0, c0, c0, 5\n" \ - " .pushsection \".alt.smp.init\", \"a\"\n"\ - " .long 1b\n" \ - " mov %0, #0\n" \ - " .popsection" \ - : "=r" (cpunum)); \ - cpunum &= 0x0F; \ - }) - -#endif diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h index 634f357be6bb..fed9981fba08 100644 --- a/arch/arm/include/asm/smp_twd.h +++ b/arch/arm/include/asm/smp_twd.h @@ -22,7 +22,6 @@ struct clock_event_device; extern void __iomem *twd_base; -void twd_timer_stop(void); int twd_timer_ack(void); void twd_timer_setup(struct clock_event_device *); diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 1120f18a6b17..97f6d60297d5 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -63,6 +63,11 @@ #include <asm/outercache.h> #define __exception __attribute__((section(".exception.text"))) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define __exception_irq_entry __irq_entry +#else +#define __exception_irq_entry __exception +#endif struct thread_info; struct task_struct; @@ -119,6 +124,13 @@ extern unsigned int user_debug; #define vectors_high() (0) #endif +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#endif + #if __LINUX_ARM_ARCH__ >= 7 #define isb() __asm__ __volatile__ ("isb" : : : "memory") #define dsb() __asm__ __volatile__ ("dsb" : : : "memory") @@ -150,6 +162,7 @@ extern unsigned int user_debug; #define rmb() dmb() #define wmb() mb() #else +#include <asm/memory.h> #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 491960bf4260..1b960d5ef6a5 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -15,16 +15,37 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static inline int __in_irqentry_text(unsigned long ptr) +{ + extern char __irqentry_text_start[]; + extern char __irqentry_text_end[]; + + return ptr >= (unsigned long)&__irqentry_text_start && + ptr < (unsigned long)&__irqentry_text_end; +} +#else +static inline int __in_irqentry_text(unsigned long ptr) +{ + return 0; +} +#endif + static inline int in_exception_text(unsigned long ptr) { extern char __exception_text_start[]; extern char __exception_text_end[]; + int in; - return ptr >= (unsigned long)&__exception_text_start && - ptr < (unsigned long)&__exception_text_end; + in = ptr >= (unsigned long)&__exception_text_start && + ptr < (unsigned long)&__exception_text_end; + + return in ? : __in_irqentry_text(ptr); } extern void __init early_trap_init(void); extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); +extern void *vectors_page; + #endif diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 33e4a48fe103..b293616a1a1a 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -227,7 +227,7 @@ do { \ #define __get_user_asm_byte(x,addr,err) \ __asm__ __volatile__( \ - "1: ldrbt %1,[%2]\n" \ + "1: " T(ldrb) " %1,[%2],#0\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -263,7 +263,7 @@ do { \ #define __get_user_asm_word(x,addr,err) \ __asm__ __volatile__( \ - "1: ldrt %1,[%2]\n" \ + "1: " T(ldr) " %1,[%2],#0\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -308,7 +308,7 @@ do { \ #define __put_user_asm_byte(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strbt %1,[%2]\n" \ + "1: " T(strb) " %1,[%2],#0\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -341,7 +341,7 @@ do { \ #define __put_user_asm_word(x,__pu_addr,err) \ __asm__ __volatile__( \ - "1: strt %1,[%2]\n" \ + "1: " T(str) " %1,[%2],#0\n" \ "2:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -366,10 +366,10 @@ do { \ #define __put_user_asm_dword(x,__pu_addr,err) \ __asm__ __volatile__( \ - ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \ - ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \ - THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ - THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ + ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \ + ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \ + THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \ + THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ |