summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S4
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi3
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi4
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/include/asm/memory.h3
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/uaccess.h58
-rw-r--r--arch/arm/kernel/hw_breakpoint.c62
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/lib/delay.c1
-rw-r--r--arch/arm/lib/getuser.S23
-rw-r--r--arch/arm/lib/putuser.S6
-rw-r--r--arch/arm/mach-imx/clk-imx25.c8
-rw-r--r--arch/arm/mach-imx/clk-imx35.c6
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c3
-rw-r--r--arch/arm/mach-kirkwood/common.c7
-rw-r--r--arch/arm/mach-omap2/Kconfig3
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/clock33xx_data.c14
-rw-r--r--arch/arm/mach-omap2/clockdomain2xxx_3xxx.c50
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c15
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/timer.c7
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c4
-rw-r--r--arch/arm/mm/context.c7
-rw-r--r--arch/arm/mm/dma-mapping.c114
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm/plat-mxc/include/mach/mx25.h1
-rw-r--r--arch/arm/plat-omap/sram.c11
-rw-r--r--arch/arm/plat-samsung/clock.c10
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/Makefile1
-rw-r--r--arch/blackfin/include/asm/smp.h2
-rw-r--r--arch/blackfin/mach-common/smp.c223
-rw-r--r--arch/s390/include/asm/hugetlb.h24
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c142
-rw-r--r--arch/s390/oprofile/init.c10
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2
-rw-r--r--arch/sh/kernel/entry-common.S2
-rw-r--r--arch/sparc/kernel/module.c13
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/include/asm/xen/page.h3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c25
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c6
-rw-r--r--arch/x86/kernel/microcode_core.c3
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/kvm/x86.c13
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/p2m.c27
68 files changed, 671 insertions, 409 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c5f9ae5dbd1a..2f88d8d97701 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@ config ARM
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
- select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+ select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f15f82bf3a50..e968a52e4881 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -356,15 +356,15 @@ choice
is nothing connected to read from the DCC.
config DEBUG_SEMIHOSTING
- bool "Kernel low-level debug output via semihosting I"
+ bool "Kernel low-level debug output via semihosting I/O"
help
Semihosting enables code running on an ARM target to use
the I/O facilities on a host debugger/emulator through a
- simple SVC calls. The host debugger or emulator must have
+ simple SVC call. The host debugger or emulator must have
semihosting enabled for the special svc call to be trapped
otherwise the kernel will crash.
- This is known to work with OpenOCD, as wellas
+ This is known to work with OpenOCD, as well as
ARM's Fast Models, or any other controlling environment
that implements semihosting.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 30eae87ead6d..a051dfbdd7db 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
-%.dtb:
+%.dtb: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-dtbs:
+dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
# We use MRPROPER_FILES and CLEAN_FILES now
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index b8c64b80bafc..81769c1341fa 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
+ mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
+ bic r6, r6, #1 << 31 @ 32-bit translation system
+ bic r6, r6, #3 << 0 @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
+ mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
#endif
mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 66389c1c6f62..7c95f76398de 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -104,6 +104,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -113,6 +114,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -122,6 +124,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index b460d6ce9eb5..195019b7ca0e 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -95,6 +95,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff400 {
@@ -104,6 +105,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff600 {
@@ -113,6 +115,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffff800 {
@@ -122,6 +125,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioE: gpio@fffffa00 {
@@ -131,6 +135,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@ffffee00 {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index bafa8806fc17..63751b1e744b 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -113,6 +113,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff400 {
@@ -122,6 +123,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff600 {
@@ -131,6 +133,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffff800 {
@@ -140,6 +143,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioE: gpio@fffffa00 {
@@ -149,6 +153,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@ffffee00 {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index bfac0dfc332c..ef9336ae9614 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -107,6 +107,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -116,6 +117,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -125,6 +127,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffffa00 {
@@ -134,6 +137,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 4a18c393b136..8a387a8d61b7 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -115,6 +115,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -124,6 +125,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -133,6 +135,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffffa00 {
@@ -142,6 +145,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 03fb93621d0d..5c8b3bf4d825 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -320,4 +320,12 @@
.size \name , . - \name
.endm
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcccs \tmp, \tmp, \limit
+ bcs \bad
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 2ae842df4551..5c44dcb0987b 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
}
/*
+ * This can be called during early boot to increase the size of the atomic
+ * coherent DMA pool above the default value of 256KiB. It must be called
+ * before postcore_initcall.
+ */
+extern void __init init_dma_coherent_pool_size(unsigned long size);
+
+/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e965f1b560f1..5f6ddcc56452 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
#endif
+#endif /* __ASSEMBLY__ */
#ifndef PHYS_OFFSET
#ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#endif
#endif
+#ifndef __ASSEMBLY__
+
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 314d4664eae7..99a19512ee26 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
pgtable_page_dtor(pte);
+#ifdef CONFIG_ARM_LPAE
+ tlb_add_flush(tlb, addr);
+#else
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
+#endif
tlb_remove_page(tlb, pte);
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 479a6352e0b5..77bd79f2ffdb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-#define __get_user_x(__r2,__p,__e,__s,__i...) \
+#define __GUP_CLOBBER_1 "lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2 "ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4 "lr", "cc"
+
+#define __get_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \
- : "0" (__p) \
- : __i, "cc")
+ : "0" (__p), "r" (__l) \
+ : __GUP_CLOBBER_##__s)
-#define get_user(x,p) \
+#define __get_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
register unsigned long __r2 asm("r2"); \
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __get_user_x(__r2, __p, __e, 1, "lr"); \
- break; \
+ __get_user_x(__r2, __p, __e, __l, 1); \
+ break; \
case 2: \
- __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __get_user_x(__r2, __p, __e, 4, "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 4); \
break; \
default: __e = __get_user_bad(); break; \
} \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
__e; \
})
+#define get_user(x,p) \
+ ({ \
+ might_fault(); \
+ __get_user_check(x,p); \
+ })
+
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2,__p,__e,__s) \
+#define __put_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \
- : "0" (__p), "r" (__r2) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc")
-#define put_user(x,p) \
+#define __put_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __r2 asm("r2") = (x); \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __put_user_x(__r2, __p, __e, 1); \
+ __put_user_x(__r2, __p, __e, __l, 1); \
break; \
case 2: \
- __put_user_x(__r2, __p, __e, 2); \
+ __put_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __put_user_x(__r2, __p, __e, 4); \
+ __put_user_x(__r2, __p, __e, __l, 4); \
break; \
case 8: \
- __put_user_x(__r2, __p, __e, 8); \
+ __put_user_x(__r2, __p, __e, __l, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
__e; \
})
+#define put_user(x,p) \
+ ({ \
+ might_fault(); \
+ __put_user_check(x,p); \
+ })
+
#else /* CONFIG_MMU */
/*
@@ -219,6 +245,7 @@ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
@@ -300,6 +327,7 @@ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index ba386bd94107..281bf3301241 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -159,6 +159,12 @@ static int debug_arch_supported(void)
arch >= ARM_DEBUG_ARCH_V7_1;
}
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+ return 0;
+}
+
/* Determine number of WRP registers available. */
static int get_num_wrp_resources(void)
{
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
default:
ret = -EINVAL;
goto out;
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
info->address &= ~alignment_mask;
info->ctrl.len <<= offset;
- /*
- * Currently we rely on an overflow handler to take
- * care of single-stepping the breakpoint when it fires.
- * In the case of userspace breakpoints on a core with V7 debug,
- * we can use the mismatch feature as a poor-man's hardware
- * single-step, but this only works for per-task breakpoints.
- */
- if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
- !core_has_mismatch_brps() || !bp->hw.bp_target)) {
- pr_warning("overflow handler required but none found\n");
- ret = -EINVAL;
+ if (!bp->overflow_handler) {
+ /*
+ * Mismatch breakpoints are required for single-stepping
+ * breakpoints.
+ */
+ if (!core_has_mismatch_brps())
+ return -EINVAL;
+
+ /* We don't allow mismatch breakpoints in kernel space. */
+ if (arch_check_bp_in_kernelspace(bp))
+ return -EPERM;
+
+ /*
+ * Per-cpu breakpoints are not supported by our stepping
+ * mechanism.
+ */
+ if (!bp->hw.bp_target)
+ return -EINVAL;
+
+ /*
+ * We only support specific access types if the fsr
+ * reports them.
+ */
+ if (!debug_exception_updates_fsr() &&
+ (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ info->ctrl.type == ARM_BREAKPOINT_STORE))
+ return -EINVAL;
}
+
out:
return ret;
}
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
goto unlock;
/* Check that the access type matches. */
- access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
- HW_BREAKPOINT_R;
- if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ if (debug_exception_updates_fsr()) {
+ access = (fsr & ARM_FSR_ACCESS_MASK) ?
+ HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+ if (!(access & hw_breakpoint_type(wp)))
+ goto unlock;
+ }
/* We have a winner. */
info->trigger = addr;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f7945218b8c6..b0179b89a04c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
#endif
instr = *(u32 *) pc;
} else if (thumb_mode(regs)) {
- get_user(instr, (u16 __user *)pc);
+ if (get_user(instr, (u16 __user *)pc))
+ goto die_sig;
if (is_wide_instruction(instr)) {
unsigned int instr2;
- get_user(instr2, (u16 __user *)pc+1);
+ if (get_user(instr2, (u16 __user *)pc+1))
+ goto die_sig;
instr <<= 16;
instr |= instr2;
}
- } else {
- get_user(instr, (u32 __user *)pc);
+ } else if (get_user(instr, (u32 __user *)pc)) {
+ goto die_sig;
}
if (call_undef_hook(regs, instr) == 0)
return;
+die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index d6dacc69254e..395d5fbb8fa2 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
{
pr_info("Switching to timer-based delay loop\n");
lpj_fine = freq / HZ;
+ loops_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 11093a7c3e32..9b06bb41fca6 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -16,8 +16,9 @@
* __get_user_X
*
* Inputs: r0 contains the address
+ * r1 contains the address limit, which must be preserved
* Outputs: r0 is the error code
- * r2, r3 contains the zero-extended value
+ * r2 contains the zero-extended value
* lr corrupted
*
* No other registers must be altered. (see <asm/uaccess.h>
@@ -27,33 +28,39 @@
* Note also that it is intended that __get_user_bad is not global.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__get_user_1)
+ check_uaccess r0, 1, r1, r2, __get_user_bad
1: TUSER(ldrb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__get_user_1)
ENTRY(__get_user_2)
-#ifdef CONFIG_THUMB2_KERNEL
-2: TUSER(ldrb) r2, [r0]
-3: TUSER(ldrb) r3, [r0, #1]
+ check_uaccess r0, 2, r1, r2, __get_user_bad
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb .req ip
+2: ldrbt r2, [r0], #1
+3: ldrbt rb, [r0], #0
#else
-2: TUSER(ldrb) r2, [r0], #1
-3: TUSER(ldrb) r3, [r0]
+rb .req r0
+2: ldrb r2, [r0]
+3: ldrb rb, [r0, #1]
#endif
#ifndef __ARMEB__
- orr r2, r2, r3, lsl #8
+ orr r2, r2, rb, lsl #8
#else
- orr r2, r3, r2, lsl #8
+ orr r2, rb, r2, lsl #8
#endif
mov r0, #0
mov pc, lr
ENDPROC(__get_user_2)
ENTRY(__get_user_4)
+ check_uaccess r0, 4, r1, r2, __get_user_bad
4: TUSER(ldr) r2, [r0]
mov r0, #0
mov pc, lr
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 7db25990c589..3d73dcb959b0 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -16,6 +16,7 @@
* __put_user_X
*
* Inputs: r0 contains the address
+ * r1 contains the address limit, which must be preserved
* r2, r3 contains the value
* Outputs: r0 is the error code
* lr corrupted
@@ -27,16 +28,19 @@
* Note also that it is intended that __put_user_bad is not global.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__put_user_1)
+ check_uaccess r0, 1, r1, ip, __put_user_bad
1: TUSER(strb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_1)
ENTRY(__put_user_2)
+ check_uaccess r0, 2, r1, ip, __put_user_bad
mov ip, r2, lsr #8
#ifdef CONFIG_THUMB2_KERNEL
#ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
ENDPROC(__put_user_2)
ENTRY(__put_user_4)
+ check_uaccess r0, 4, r1, ip, __put_user_bad
4: TUSER(str) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_4)
ENTRY(__put_user_8)
+ check_uaccess r0, 8, r1, ip, __put_user_bad
#ifdef CONFIG_THUMB2_KERNEL
5: TUSER(str) r2, [r0]
6: TUSER(str) r3, [r0, #4]
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index fdd8cc87c9fe..d20d4795f4ea 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
- clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
- clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
- clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
@@ -243,6 +241,6 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
clk_register_clkdev(clk[iim_ipg], "iim", NULL);
- mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+ mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1);
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index c6422fb10bae..65fb8bcd86cb 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -230,10 +230,8 @@ int __init mx35_clocks_init()
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
- clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
- clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
- clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
- clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
/* i.mx35 has the i.mx21 type uart */
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index 2c6ab3273f9e..5985ed1b8c98 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -526,7 +526,8 @@ static void __init armadillo5x0_init(void)
imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
/* set NAND page size to 2k if not configured via boot mode pins */
- __raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR);
+ __raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) |
+ (1 << 30), mx3_ccm_base + MXC_CCM_RCSR);
/* RTC */
/* Get RTC IRQ and register the chip */
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 3226077735b1..1201191d7f1b 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
void __init kirkwood_init_early(void)
{
orion_time_set_base(TIMER_VIRT_BASE);
+
+ /*
+ * Some Kirkwood devices allocate their coherent buffers from atomic
+ * context. Increase size of atomic coherent pool to make sure such
+ * the allocations won't fail.
+ */
+ init_dma_coherent_pool_size(SZ_1M);
}
int kirkwood_tclk;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index fcd4e85c4ddc..346fd26f3aa6 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
-config MACH_OMAP3_TOUCHBOOK
+config MACH_TOUCHBOOK
bool "OMAP3 Touch Book"
depends on ARCH_OMAP3
default y
+ select OMAP_PACKAGE_CBB
config MACH_OMAP_3430SDP
bool "OMAP 3430 SDP board"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index f6a24b3f9c4f..34c2c7f59f0a 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -255,7 +255,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
-obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o
+obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o
diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c
index 25bbcc7ca4dc..ae27de8899a6 100644
--- a/arch/arm/mach-omap2/clock33xx_data.c
+++ b/arch/arm/mach-omap2/clock33xx_data.c
@@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {
CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
- CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX),
- CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX),
- CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX),
- CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX),
- CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX),
- CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX),
- CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX),
+ CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
+ CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
+ CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
+ CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
+ CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
+ CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
+ CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
diff --git a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
index a0d68dbecfa3..f99e65cfb862 100644
--- a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
@@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
_clkdm_del_autodeps(clkdm);
}
+static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
+{
+ bool hwsup = false;
+
+ if (!clkdm->clktrctrl_mask)
+ return 0;
+
+ hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+
+ if (hwsup) {
+ /* Disable HW transitions when we are changing deps */
+ _disable_hwsup(clkdm);
+ _clkdm_add_autodeps(clkdm);
+ _enable_hwsup(clkdm);
+ } else {
+ if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+ omap3_clkdm_wakeup(clkdm);
+ }
+
+ return 0;
+}
+
+static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
+{
+ bool hwsup = false;
+
+ if (!clkdm->clktrctrl_mask)
+ return 0;
+
+ hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+
+ if (hwsup) {
+ /* Disable HW transitions when we are changing deps */
+ _disable_hwsup(clkdm);
+ _clkdm_del_autodeps(clkdm);
+ _enable_hwsup(clkdm);
+ } else {
+ if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
+ omap3_clkdm_sleep(clkdm);
+ }
+
+ return 0;
+}
+
struct clkdm_ops omap2_clkdm_operations = {
.clkdm_add_wkdep = omap2_clkdm_add_wkdep,
.clkdm_del_wkdep = omap2_clkdm_del_wkdep,
@@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
.clkdm_wakeup = omap3_clkdm_wakeup,
.clkdm_allow_idle = omap3_clkdm_allow_idle,
.clkdm_deny_idle = omap3_clkdm_deny_idle,
- .clkdm_clk_enable = omap2_clkdm_clk_enable,
- .clkdm_clk_disable = omap2_clkdm_clk_disable,
+ .clkdm_clk_enable = omap3xxx_clkdm_clk_enable,
+ .clkdm_clk_disable = omap3xxx_clkdm_clk_disable,
};
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 766338fe4d34..975f6bda0e0b 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -67,6 +67,7 @@
#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
/* CM_IDLEST_IVA2 */
+#define OMAP3430_ST_IVA2_SHIFT 0
#define OMAP3430_ST_IVA2_MASK (1 << 0)
/* CM_IDLEST_PLL_IVA2 */
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 05fdebfaa195..330d4c6e746b 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -46,7 +46,7 @@
static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
static DEFINE_SPINLOCK(wakeupgen_lock);
-static unsigned int irq_target_cpu[NR_IRQS];
+static unsigned int irq_target_cpu[MAX_IRQS];
static unsigned int irq_banks = MAX_NR_REG_BANKS;
static unsigned int max_irqs = MAX_IRQS;
static unsigned int omap_secure_apis;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 6ca8e519968d..37afbd173c2c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)
_enable_sysc(oh);
}
} else {
+ _omap4_disable_module(oh);
_disable_clocks(oh);
pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
oh->name, r);
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index c9e38200216b..ce7e6068768f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {
/* IVA2 (IVA2) */
static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = {
- { .name = "logic", .rst_shift = 0 },
- { .name = "seq0", .rst_shift = 1 },
- { .name = "seq1", .rst_shift = 2 },
+ { .name = "logic", .rst_shift = 0, .st_shift = 8 },
+ { .name = "seq0", .rst_shift = 1, .st_shift = 9 },
+ { .name = "seq1", .rst_shift = 2, .st_shift = 10 },
};
static struct omap_hwmod omap3xxx_iva_hwmod = {
@@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
.rst_lines = omap3xxx_iva_resets,
.rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets),
.main_clk = "iva2_ck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = OMAP3430_IVA2_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
+ }
+ },
};
/* timer class */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 242aee498ceb..afb60917a948 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -4210,7 +4210,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
};
/* dsp -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
.master = &omap44xx_dsp_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "dpll_iva_m5x2_ck",
@@ -4828,7 +4828,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
};
/* iva -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {
.master = &omap44xx_iva_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "dpll_iva_m5x2_ck",
@@ -5362,7 +5362,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
};
/* l3_main_2 -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
.master = &omap44xx_l3_main_2_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "l3_div_ck",
@@ -6032,7 +6032,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_abe__dmic,
&omap44xx_l4_abe__dmic_dma,
&omap44xx_dsp__iva,
- &omap44xx_dsp__sl2if,
+ /* &omap44xx_dsp__sl2if, */
&omap44xx_l4_cfg__dsp,
&omap44xx_l3_main_2__dss,
&omap44xx_l4_per__dss,
@@ -6068,7 +6068,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__i2c4,
&omap44xx_l3_main_2__ipu,
&omap44xx_l3_main_2__iss,
- &omap44xx_iva__sl2if,
+ /* &omap44xx_iva__sl2if, */
&omap44xx_l3_main_2__iva,
&omap44xx_l4_wkup__kbd,
&omap44xx_l4_cfg__mailbox,
@@ -6099,7 +6099,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__cm_core,
&omap44xx_l4_wkup__prm,
&omap44xx_l4_wkup__scrm,
- &omap44xx_l3_main_2__sl2if,
+ /* &omap44xx_l3_main_2__sl2if, */
&omap44xx_l4_abe__slimbus1,
&omap44xx_l4_abe__slimbus1_dma,
&omap44xx_l4_per__slimbus2,
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 2ff6d41ec6c6..2ba4f57dda86 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -260,6 +260,7 @@ static u32 notrace dmtimer_read_sched_clock(void)
return 0;
}
+#ifdef CONFIG_OMAP_32K_TIMER
/* Setup free-running counter for clocksource */
static int __init omap2_sync32k_clocksource_init(void)
{
@@ -299,6 +300,12 @@ static int __init omap2_sync32k_clocksource_init(void)
return ret;
}
+#else
+static inline int omap2_sync32k_clocksource_init(void)
+{
+ return -ENODEV;
+}
+#endif
static void __init omap2_gptimer_clocksource_init(int gptimer_id,
const char *fck_source)
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 53b7ea92c32c..3b8a0171c3cb 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -346,11 +346,11 @@ static struct resource sh_mmcif_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gic_spi(141),
+ .start = gic_spi(140),
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = gic_spi(140),
+ .start = gic_spi(141),
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 119bc52ab93e..4e07eec1270d 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
pid = task_pid_nr(thread->task) << ASID_BITS;
asm volatile(
" mrc p15, 0, %0, c13, c0, 1\n"
- " bfi %1, %0, #0, %2\n"
- " mcr p15, 0, %1, c13, c0, 1\n"
+ " and %0, %0, %2\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c13, c0, 1\n"
: "=r" (contextidr), "+r" (pid)
- : "I" (ASID_BITS));
+ : "I" (~ASID_MASK));
isb();
return NOTIFY_OK;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4e7d1182e8a3..e59c4ab71bcb 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
vunmap(cpu_addr);
}
+#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
+
struct dma_pool {
size_t size;
spinlock_t lock;
unsigned long *bitmap;
unsigned long nr_pages;
void *vaddr;
- struct page *page;
+ struct page **pages;
};
static struct dma_pool atomic_pool = {
- .size = SZ_256K,
+ .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
};
static int __init early_coherent_pool(char *p)
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
+void __init init_dma_coherent_pool_size(unsigned long size)
+{
+ /*
+ * Catch any attempt to set the pool size too late.
+ */
+ BUG_ON(atomic_pool.vaddr);
+
+ /*
+ * Set architecture specific coherent pool size only if
+ * it has not been changed by kernel command line parameter.
+ */
+ if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
+ atomic_pool.size = size;
+}
+
/*
* Initialise the coherent pool for atomic allocations.
*/
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
+ struct page **pages;
void *ptr;
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
@@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
if (!bitmap)
goto no_bitmap;
+ pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto no_pages;
+
if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
&page, NULL);
if (ptr) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
+
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
- pool->page = page;
+ pool->pages = pages;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)pool->size / 1024);
return 0;
}
+no_pages:
kfree(bitmap);
no_bitmap:
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;
- *ret_page = pool->page + pageno;
+ *ret_page = pool->pages[pageno];
+ } else {
+ pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
+ "Please increase it with coherent_pool= kernel parameter!\n",
+ (unsigned)pool->size / 1024);
}
spin_unlock_irqrestore(&pool->lock, flags);
return ptr;
}
+static bool __in_atomic_pool(void *start, size_t size)
+{
+ struct dma_pool *pool = &atomic_pool;
+ void *end = start + size;
+ void *pool_start = pool->vaddr;
+ void *pool_end = pool->vaddr + pool->size;
+
+ if (start < pool_start || start >= pool_end)
+ return false;
+
+ if (end <= pool_end)
+ return true;
+
+ WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
+ start, end - 1, pool_start, pool_end - 1);
+
+ return false;
+}
+
static int __free_from_pool(void *start, size_t size)
{
struct dma_pool *pool = &atomic_pool;
unsigned long pageno, count;
unsigned long flags;
- if (start < pool->vaddr || start > pool->vaddr + pool->size)
+ if (!__in_atomic_pool(start, size))
return 0;
- if (start + size > pool->vaddr + pool->size) {
- WARN(1, "freeing wrong coherent size from pool\n");
- return 0;
- }
-
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
count = size >> PAGE_SHIFT;
@@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
return 0;
}
+static struct page **__atomic_get_pages(void *addr)
+{
+ struct dma_pool *pool = &atomic_pool;
+ struct page **pages = pool->pages;
+ int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
+
+ return pages + offs;
+}
+
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{
struct vm_struct *area;
+ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+ return __atomic_get_pages(cpu_addr);
+
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
return cpu_addr;
@@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
return NULL;
}
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+ dma_addr_t *handle)
+{
+ struct page *page;
+ void *addr;
+
+ addr = __alloc_from_pool(size, &page);
+ if (!addr)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, &page, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __free_from_pool(addr, size);
+ return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, struct page **pages,
+ dma_addr_t handle, size_t size)
+{
+ __iommu_remove_mapping(dev, handle, size);
+ __free_from_pool(page_address(pages[0]), size);
+}
+
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
@@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
+ if (gfp & GFP_ATOMIC)
+ return __iommu_alloc_atomic(dev, size, handle);
+
pages = __iommu_alloc_buffer(dev, size, gfp);
if (!pages)
return NULL;
@@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
+ if (__in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, pages, handle, size);
+ return;
+ }
+
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
unmap_kernel_range((unsigned long)cpu_addr, size);
vunmap(cpu_addr);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6776160618ef..a8ee92da3544 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* permanent static mappings from iotable_init() */
#define VM_ARM_STATIC_MAPPING 0x40000000
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING 0x20000000
+
/* mapping type (attributes) for permanent static mappings */
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4c2d0451e84a..c2fa21d0103e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = SECTION_SIZE;
- vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm);
}
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue;
addr = (unsigned long)vm->addr;
if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
* Check whether this memory bank would partially overlap
* the vmalloc area.
*/
- if (__va(bank->start + bank->size) > vmalloc_min ||
- __va(bank->start + bank->size) < __va(bank->start)) {
+ if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
+ __va(bank->start + bank->size - 1) <= __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
"to -%.8llx (vmalloc region overlap).\n",
diff --git a/arch/arm/plat-mxc/include/mach/mx25.h b/arch/arm/plat-mxc/include/mach/mx25.h
index 627d94f1b010..ec466400a200 100644
--- a/arch/arm/plat-mxc/include/mach/mx25.h
+++ b/arch/arm/plat-mxc/include/mach/mx25.h
@@ -98,6 +98,7 @@
#define MX25_INT_UART1 (NR_IRQS_LEGACY + 45)
#define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51)
#define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52)
+#define MX25_INT_GPT1 (NR_IRQS_LEGACY + 54)
#define MX25_INT_FEC (NR_IRQS_LEGACY + 57)
#define MX25_DMA_REQ_SSI2_RX1 22
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 766181cb5c95..024f3b08db29 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -68,6 +68,7 @@
static unsigned long omap_sram_start;
static void __iomem *omap_sram_base;
+static unsigned long omap_sram_skip;
static unsigned long omap_sram_size;
static void __iomem *omap_sram_ceil;
@@ -106,6 +107,7 @@ static int is_sram_locked(void)
*/
static void __init omap_detect_sram(void)
{
+ omap_sram_skip = SRAM_BOOTLOADER_SZ;
if (cpu_class_is_omap2()) {
if (is_sram_locked()) {
if (cpu_is_omap34xx()) {
@@ -113,6 +115,7 @@ static void __init omap_detect_sram(void)
if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
(omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
omap_sram_size = 0x7000; /* 28K */
+ omap_sram_skip += SZ_16K;
} else {
omap_sram_size = 0x8000; /* 32K */
}
@@ -175,8 +178,10 @@ static void __init omap_map_sram(void)
return;
#ifdef CONFIG_OMAP4_ERRATA_I688
+ if (cpu_is_omap44xx()) {
omap_sram_start += PAGE_SIZE;
omap_sram_size -= SZ_16K;
+ }
#endif
if (cpu_is_omap34xx()) {
/*
@@ -203,8 +208,8 @@ static void __init omap_map_sram(void)
* Looks like we need to preserve some bootloader code at the
* beginning of SRAM for jumping to flash for reboot to work...
*/
- memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
- omap_sram_size - SRAM_BOOTLOADER_SZ);
+ memset_io(omap_sram_base + omap_sram_skip, 0,
+ omap_sram_size - omap_sram_skip);
}
/*
@@ -218,7 +223,7 @@ void *omap_sram_push_address(unsigned long size)
{
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
- available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ);
+ available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
if (size > available) {
pr_err("Not enough space in SRAM\n");
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 65c5eca475e7..d1116e2dfbea 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -144,6 +144,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
int clk_set_rate(struct clk *clk, unsigned long rate)
{
+ unsigned long flags;
int ret;
if (IS_ERR(clk))
@@ -159,9 +160,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (clk->ops == NULL || clk->ops->set_rate == NULL)
return -EINVAL;
- spin_lock(&clocks_lock);
+ spin_lock_irqsave(&clocks_lock, flags);
ret = (clk->ops->set_rate)(clk, rate);
- spin_unlock(&clocks_lock);
+ spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
@@ -173,17 +174,18 @@ struct clk *clk_get_parent(struct clk *clk)
int clk_set_parent(struct clk *clk, struct clk *parent)
{
+ unsigned long flags;
int ret = 0;
if (IS_ERR(clk))
return -EINVAL;
- spin_lock(&clocks_lock);
+ spin_lock_irqsave(&clocks_lock, flags);
if (clk->ops && clk->ops->set_parent)
ret = (clk->ops->set_parent)(clk, parent);
- spin_unlock(&clocks_lock);
+ spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index f34861920634..c7092e6057c5 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -38,6 +38,7 @@ config BLACKFIN
select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU if SMP
+ select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d3d7e64ca96d..66cf00095b84 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -20,7 +20,6 @@ endif
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
KBUILD_CFLAGS_MODULE += -mlong-calls
LDFLAGS += -m elf32bfin
-KALLSYMS += --symbol-prefix=_
KBUILD_DEFCONFIG := BF537-STAMP_defconfig
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
index dc3d144b4bb5..9631598dcc5d 100644
--- a/arch/blackfin/include/asm/smp.h
+++ b/arch/blackfin/include/asm/smp.h
@@ -18,6 +18,8 @@
#define raw_smp_processor_id() blackfin_core_id()
extern void bfin_relocate_coreb_l1_mem(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 00bbe672b3b3..a40151306b77 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
-#define BFIN_IPI_TIMER 0
-#define BFIN_IPI_RESCHEDULE 1
-#define BFIN_IPI_CALL_FUNC 2
-#define BFIN_IPI_CPU_STOP 3
+enum ipi_message_type {
+ BFIN_IPI_TIMER,
+ BFIN_IPI_RESCHEDULE,
+ BFIN_IPI_CALL_FUNC,
+ BFIN_IPI_CALL_FUNC_SINGLE,
+ BFIN_IPI_CPU_STOP,
+};
struct blackfin_flush_data {
unsigned long start;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
void *secondary_stack;
-
-struct smp_call_struct {
- void (*func)(void *info);
- void *info;
- int wait;
- cpumask_t *waitmask;
-};
-
static struct blackfin_flush_data smp_flush_data;
static DEFINE_SPINLOCK(stop_lock);
-struct ipi_message {
- unsigned long type;
- struct smp_call_struct call_struct;
-};
-
/* A magic number - stress test shows this is safe for common cases */
#define BFIN_IPI_MSGQ_LEN 5
/* Simple FIFO buffer, overflow leads to panic */
-struct ipi_message_queue {
- spinlock_t lock;
+struct ipi_data {
unsigned long count;
- unsigned long head; /* head of the queue */
- struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
+ unsigned long bits;
};
-static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
+static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
static void ipi_cpu_stop(unsigned int cpu)
{
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
blackfin_icache_flush_range(fdata->start, fdata->end);
}
-static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
-{
- int wait;
- void (*func)(void *info);
- void *info;
- func = msg->call_struct.func;
- info = msg->call_struct.info;
- wait = msg->call_struct.wait;
- func(info);
- if (wait) {
-#ifdef __ARCH_SYNC_CORE_DCACHE
- /*
- * 'wait' usually means synchronization between CPUs.
- * Invalidate D cache in case shared data was changed
- * by func() to ensure cache coherence.
- */
- resync_core_dcache();
-#endif
- cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
- }
-}
-
/* Use IRQ_SUPPLE_0 to request reschedule.
* When returning from interrupt to user space,
* there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
{
- struct ipi_message *msg;
- struct ipi_message_queue *msg_queue;
+ struct ipi_data *bfin_ipi_data;
unsigned int cpu = smp_processor_id();
- unsigned long flags;
+ unsigned long pending;
+ unsigned long msg;
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
- msg_queue = &__get_cpu_var(ipi_msg_queue);
-
- spin_lock_irqsave(&msg_queue->lock, flags);
-
- while (msg_queue->count) {
- msg = &msg_queue->ipi_message[msg_queue->head];
- switch (msg->type) {
- case BFIN_IPI_TIMER:
- ipi_timer();
- break;
- case BFIN_IPI_RESCHEDULE:
- scheduler_ipi();
- break;
- case BFIN_IPI_CALL_FUNC:
- ipi_call_function(cpu, msg);
- break;
- case BFIN_IPI_CPU_STOP:
- ipi_cpu_stop(cpu);
- break;
- default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
- cpu, msg->type);
- break;
- }
- msg_queue->head++;
- msg_queue->head %= BFIN_IPI_MSGQ_LEN;
- msg_queue->count--;
+ bfin_ipi_data = &__get_cpu_var(bfin_ipi);
+
+ while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
+ msg = 0;
+ do {
+ msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
+ switch (msg) {
+ case BFIN_IPI_TIMER:
+ ipi_timer();
+ break;
+ case BFIN_IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+ case BFIN_IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case BFIN_IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case BFIN_IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
+ }
+ } while (msg < BITS_PER_LONG);
+
+ smp_mb();
}
- spin_unlock_irqrestore(&msg_queue->lock, flags);
return IRQ_HANDLED;
}
-static void ipi_queue_init(void)
+static void bfin_ipi_init(void)
{
unsigned int cpu;
- struct ipi_message_queue *msg_queue;
+ struct ipi_data *bfin_ipi_data;
for_each_possible_cpu(cpu) {
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_init(&msg_queue->lock);
- msg_queue->count = 0;
- msg_queue->head = 0;
+ bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+ bfin_ipi_data->bits = 0;
+ bfin_ipi_data->count = 0;
}
}
-static inline void smp_send_message(cpumask_t callmap, unsigned long type,
- void (*func) (void *info), void *info, int wait)
+void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
{
unsigned int cpu;
- struct ipi_message_queue *msg_queue;
- struct ipi_message *msg;
- unsigned long flags, next_msg;
- cpumask_t waitmask; /* waitmask is shared by all cpus */
-
- cpumask_copy(&waitmask, &callmap);
- for_each_cpu(cpu, &callmap) {
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_irqsave(&msg_queue->lock, flags);
- if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
- next_msg = (msg_queue->head + msg_queue->count)
- % BFIN_IPI_MSGQ_LEN;
- msg = &msg_queue->ipi_message[next_msg];
- msg->type = type;
- if (type == BFIN_IPI_CALL_FUNC) {
- msg->call_struct.func = func;
- msg->call_struct.info = info;
- msg->call_struct.wait = wait;
- msg->call_struct.waitmask = &waitmask;
- }
- msg_queue->count++;
- } else
- panic("IPI message queue overflow\n");
- spin_unlock_irqrestore(&msg_queue->lock, flags);
+ struct ipi_data *bfin_ipi_data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_cpu(cpu, cpumask) {
+ bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+ smp_mb();
+ set_bit(msg, &bfin_ipi_data->bits);
+ bfin_ipi_data->count++;
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
}
- if (wait) {
- while (!cpumask_empty(&waitmask))
- blackfin_dcache_invalidate_range(
- (unsigned long)(&waitmask),
- (unsigned long)(&waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
- /*
- * Invalidate D cache in case shared data was changed by
- * other processors to ensure cache coherence.
- */
- resync_core_dcache();
-#endif
- }
+ local_irq_restore(flags);
}
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- cpumask_t callmap;
-
- preempt_disable();
- cpumask_copy(&callmap, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &callmap);
- if (!cpumask_empty(&callmap))
- smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
- preempt_enable();
-
- return 0;
+ send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL_GPL(smp_call_function);
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
- int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- unsigned int cpu = cpuid;
- cpumask_t callmap;
-
- if (cpu_is_offline(cpu))
- return 0;
- cpumask_clear(&callmap);
- cpumask_set_cpu(cpu, &callmap);
-
- smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
- return 0;
+ send_ipi(mask, BFIN_IPI_CALL_FUNC);
}
-EXPORT_SYMBOL_GPL(smp_call_function_single);
void smp_send_reschedule(int cpu)
{
- cpumask_t callmap;
- /* simply trigger an ipi */
-
- cpumask_clear(&callmap);
- cpumask_set_cpu(cpu, &callmap);
-
- smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
+ send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
return;
}
void smp_send_msg(const struct cpumask *mask, unsigned long type)
{
- smp_send_message(*mask, type, NULL, NULL, 0);
+ send_ipi(mask, type);
}
void smp_timer_broadcast(const struct cpumask *mask)
@@ -333,7 +242,7 @@ void smp_send_stop(void)
cpumask_copy(&callmap, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpumask_empty(&callmap))
- smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+ send_ipi(&callmap, BFIN_IPI_CPU_STOP);
preempt_enable();
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
platform_prepare_cpus(max_cpus);
- ipi_queue_init();
+ bfin_ipi_init();
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
}
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 799ed0f1643d..2d6e6e380564 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -66,16 +66,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return pte;
}
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t pte = huge_ptep_get(ptep);
-
- mm->context.flush_mm = 1;
- pmd_clear((pmd_t *) ptep);
- return pte;
-}
-
static inline void __pmd_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
@@ -117,6 +107,15 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
__pmd_csp(pmdp);
}
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = huge_ptep_get(ptep);
+
+ huge_ptep_invalidate(mm, addr, ptep);
+ return pte;
+}
+
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
@@ -131,10 +130,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (pte_write(__pte)) { \
- (__mm)->context.flush_mm = 1; \
- if (atomic_read(&(__mm)->context.attach_count) > 1 || \
- (__mm) != current->active_mm) \
- huge_ptep_invalidate(__mm, __addr, __ptep); \
+ huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \
} \
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 9fde315f3a7c..1d8fe2b17ef6 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -90,12 +90,10 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
{
- spin_lock(&mm->page_table_lock);
if (mm->context.flush_mm) {
__tlb_flush_mm(mm);
mm->context.flush_mm = 0;
}
- spin_unlock(&mm->page_table_lock);
}
/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f86c81e13c37..40b57693de38 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -974,11 +974,13 @@ static void __init setup_hwcaps(void)
if (MACHINE_HAS_HPAGE)
elf_hwcap |= HWCAP_S390_HPAGE;
+#if defined(CONFIG_64BIT)
/*
* 64-bit register support for 31-bit processes
* HWCAP_S390_HIGH_GPRS is bit 9.
*/
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
+#endif
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 60ee2b883797..2d37bb861faf 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -2,69 +2,82 @@
* User access functions based on page table walks for enhanced
* system layout without hardware support.
*
- * Copyright IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2012
* Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/mm.h>
+#include <linux/hugetlb.h>
#include <asm/uaccess.h>
#include <asm/futex.h>
#include "uaccess.h"
-static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
+
+/*
+ * Returns kernel address for user virtual address. If the returned address is
+ * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
+ * contains the (negative) exception code.
+ */
+static __always_inline unsigned long follow_table(struct mm_struct *mm,
+ unsigned long addr, int write)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
+ pte_t *ptep;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return (pte_t *) 0x3a;
+ return -0x3aUL;
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- return (pte_t *) 0x3b;
+ return -0x3bUL;
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return (pte_t *) 0x10;
+ if (pmd_none(*pmd))
+ return -0x10UL;
+ if (pmd_huge(*pmd)) {
+ if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
+ return -0x04UL;
+ return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
+ }
+ if (unlikely(pmd_bad(*pmd)))
+ return -0x10UL;
+
+ ptep = pte_offset_map(pmd, addr);
+ if (!pte_present(*ptep))
+ return -0x11UL;
+ if (write && !pte_write(*ptep))
+ return -0x04UL;
- return pte_offset_map(pmd, addr);
+ return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
}
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
size_t n, int write_user)
{
struct mm_struct *mm = current->mm;
- unsigned long offset, pfn, done, size;
- pte_t *pte;
+ unsigned long offset, done, size, kaddr;
void *from, *to;
done = 0;
retry:
spin_lock(&mm->page_table_lock);
do {
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
+ kaddr = follow_table(mm, uaddr, write_user);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
- goto fault;
- } else if (write_user && !pte_write(*pte)) {
- pte = (pte_t *) 0x04;
- goto fault;
- }
- pfn = pte_pfn(*pte);
- offset = uaddr & (PAGE_SIZE - 1);
+ offset = uaddr & ~PAGE_MASK;
size = min(n - done, PAGE_SIZE - offset);
if (write_user) {
- to = (void *)((pfn << PAGE_SHIFT) + offset);
+ to = (void *) kaddr;
from = kptr + done;
} else {
- from = (void *)((pfn << PAGE_SHIFT) + offset);
+ from = (void *) kaddr;
to = kptr + done;
}
memcpy(to, from, size);
@@ -75,7 +88,7 @@ retry:
return n - done;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, (unsigned long) pte, write_user))
+ if (__handle_fault(uaddr, -kaddr, write_user))
return n - done;
goto retry;
}
@@ -84,27 +97,22 @@ fault:
* Do DAT for user address by page table walk, return kernel address.
* This function needs to be called with current->mm->page_table_lock held.
*/
-static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
+static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
+ int write)
{
struct mm_struct *mm = current->mm;
- unsigned long pfn;
- pte_t *pte;
+ unsigned long kaddr;
int rc;
retry:
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
+ kaddr = follow_table(mm, uaddr, write);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- }
- pfn = pte_pfn(*pte);
- return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
+ return kaddr;
fault:
spin_unlock(&mm->page_table_lock);
- rc = __handle_fault(uaddr, (unsigned long) pte, 0);
+ rc = __handle_fault(uaddr, -kaddr, write);
spin_lock(&mm->page_table_lock);
if (!rc)
goto retry;
@@ -159,11 +167,9 @@ static size_t clear_user_pt(size_t n, void __user *to)
static size_t strnlen_user_pt(size_t count, const char __user *src)
{
- char *addr;
unsigned long uaddr = (unsigned long) src;
struct mm_struct *mm = current->mm;
- unsigned long offset, pfn, done, len;
- pte_t *pte;
+ unsigned long offset, done, len, kaddr;
size_t len_str;
if (segment_eq(get_fs(), KERNEL_DS))
@@ -172,19 +178,13 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
retry:
spin_lock(&mm->page_table_lock);
do {
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
+ kaddr = follow_table(mm, uaddr, 0);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- }
- pfn = pte_pfn(*pte);
- offset = uaddr & (PAGE_SIZE-1);
- addr = (char *)(pfn << PAGE_SHIFT) + offset;
+ offset = uaddr & ~PAGE_MASK;
len = min(count - done, PAGE_SIZE - offset);
- len_str = strnlen(addr, len);
+ len_str = strnlen((char *) kaddr, len);
done += len_str;
uaddr += len_str;
} while ((len_str == len) && (done < count));
@@ -192,7 +192,7 @@ retry:
return done + 1;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, (unsigned long) pte, 0))
+ if (__handle_fault(uaddr, -kaddr, 0))
return 0;
goto retry;
}
@@ -225,11 +225,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
const void __user *from)
{
struct mm_struct *mm = current->mm;
- unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
- uaddr, done, size, error_code;
+ unsigned long offset_max, uaddr, done, size, error_code;
unsigned long uaddr_from = (unsigned long) from;
unsigned long uaddr_to = (unsigned long) to;
- pte_t *pte_from, *pte_to;
+ unsigned long kaddr_to, kaddr_from;
int write_user;
if (segment_eq(get_fs(), KERNEL_DS)) {
@@ -242,38 +241,23 @@ retry:
do {
write_user = 0;
uaddr = uaddr_from;
- pte_from = follow_table(mm, uaddr_from);
- error_code = (unsigned long) pte_from;
- if (error_code < 0x1000)
- goto fault;
- if (!pte_present(*pte_from)) {
- error_code = 0x11;
+ kaddr_from = follow_table(mm, uaddr_from, 0);
+ error_code = kaddr_from;
+ if (IS_ERR_VALUE(error_code))
goto fault;
- }
write_user = 1;
uaddr = uaddr_to;
- pte_to = follow_table(mm, uaddr_to);
- error_code = (unsigned long) pte_to;
- if (error_code < 0x1000)
- goto fault;
- if (!pte_present(*pte_to)) {
- error_code = 0x11;
+ kaddr_to = follow_table(mm, uaddr_to, 1);
+ error_code = (unsigned long) kaddr_to;
+ if (IS_ERR_VALUE(error_code))
goto fault;
- } else if (!pte_write(*pte_to)) {
- error_code = 0x04;
- goto fault;
- }
- pfn_from = pte_pfn(*pte_from);
- pfn_to = pte_pfn(*pte_to);
- offset_from = uaddr_from & (PAGE_SIZE-1);
- offset_to = uaddr_from & (PAGE_SIZE-1);
- offset_max = max(offset_from, offset_to);
+ offset_max = max(uaddr_from & ~PAGE_MASK,
+ uaddr_to & ~PAGE_MASK);
size = min(n - done, PAGE_SIZE - offset_max);
- memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
- (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
+ memcpy((void *) kaddr_to, (void *) kaddr_from, size);
done += size;
uaddr_from += size;
uaddr_to += size;
@@ -282,7 +266,7 @@ retry:
return n - done;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, error_code, write_user))
+ if (__handle_fault(uaddr, -error_code, write_user))
return n - done;
goto retry;
}
@@ -341,7 +325,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
return __futex_atomic_op_pt(op, uaddr, oparg, old);
spin_lock(&current->mm->page_table_lock);
uaddr = (u32 __force __user *)
- __dat_user_addr((__force unsigned long) uaddr);
+ __dat_user_addr((__force unsigned long) uaddr, 1);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
@@ -378,7 +362,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
spin_lock(&current->mm->page_table_lock);
uaddr = (u32 __force __user *)
- __dat_user_addr((__force unsigned long) uaddr);
+ __dat_user_addr((__force unsigned long) uaddr, 1);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index a1e9d69a9c90..584b93674ea4 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val < oprofile_min_interval)
oprofile_hw_interval = oprofile_min_interval;
@@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0)
return -EINVAL;
@@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
@@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
@@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index b7cf6a547f11..7e605b95592a 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -933,7 +933,7 @@ ret_with_reschedule:
pta restore_all, tr1
- movi _TIF_SIGPENDING, r8
+ movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
and r8, r7, r8
pta work_notifysig, tr0
bne r8, ZERO, tr0
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f67601cb3f1f..b96489d8b27d 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -139,7 +139,7 @@ work_pending:
! r8: current_thread_info
! t: result of "tst #_TIF_NEED_RESCHED, r0"
bf/s work_resched
- tst #_TIF_SIGPENDING, r0
+ tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0
work_notifysig:
bt/s __restore_all
mov r15, r4
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 15e0a1693976..f1ddc0d23679 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -48,9 +48,7 @@ void *module_alloc(unsigned long size)
return NULL;
ret = module_map(size);
- if (!ret)
- ret = ERR_PTR(-ENOMEM);
- else
+ if (ret)
memset(ret, 0, size);
return ret;
@@ -116,6 +114,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
v = sym->st_value + rel[i].r_addend;
switch (ELF_R_TYPE(rel[i].r_info) & 0xff) {
+ case R_SPARC_DISP32:
+ v -= (Elf_Addr) location;
+ *loc32 = v;
+ break;
#ifdef CONFIG_SPARC64
case R_SPARC_64:
location[0] = v >> 56;
@@ -128,11 +130,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
location[7] = v >> 0;
break;
- case R_SPARC_DISP32:
- v -= (Elf_Addr) location;
- *loc32 = v;
- break;
-
case R_SPARC_WDISP19:
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x7ffff) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8ec3a1aa4abd..50a1d1f9b6d3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -746,10 +746,10 @@ config SWIOTLB
def_bool y if X86_64
---help---
Support for software bounce buffers used on x86-64 systems
- which don't have a hardware IOMMU (e.g. the current generation
- of Intel's x86-64 CPUs). Using this PCI devices which can only
- access 32-bits of memory can be used on systems with more than
- 3 GB of memory. If unsure, say Y.
+ which don't have a hardware IOMMU. Using this PCI devices
+ which can only access 32-bits of memory can be used on systems
+ with more than 3 GB of memory.
+ If unsure, say Y.
config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 93971e841dd5..472b9b783019 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -51,7 +51,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op);
-extern int m2p_remove_override(struct page *page, bool clear_pte);
+extern int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 6605a81ba339..8b6defe7eefc 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -586,6 +586,8 @@ extern struct event_constraint intel_westmere_pebs_event_constraints[];
extern struct event_constraint intel_snb_pebs_event_constraints[];
+extern struct event_constraint intel_ivb_pebs_event_constraints[];
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_enable(struct perf_event *event);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 7bfb5bec8630..eebd5ffe1bba 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -209,6 +209,15 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
return -EOPNOTSUPP;
}
+static const struct perf_event_attr ibs_notsupp = {
+ .exclude_user = 1,
+ .exclude_kernel = 1,
+ .exclude_hv = 1,
+ .exclude_idle = 1,
+ .exclude_host = 1,
+ .exclude_guest = 1,
+};
+
static int perf_ibs_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -229,6 +238,9 @@ static int perf_ibs_init(struct perf_event *event)
if (event->pmu != &perf_ibs->pmu)
return -ENOENT;
+ if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
+ return -EINVAL;
+
if (config & ~perf_ibs->config_mask)
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 7f2739e03e79..6bca492b8547 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void)
break;
case 28: /* Atom */
+ case 54: /* Cedariew */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2047,7 +2048,6 @@ __init int intel_pmu_init(void)
case 42: /* SandyBridge */
case 45: /* SandyBridge, "Romely-EP" */
x86_add_quirk(intel_sandybridge_quirk);
- case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
@@ -2072,6 +2072,29 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, ");
break;
+ case 58: /* IvyBridge */
+ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+
+ intel_pmu_lbr_init_snb();
+
+ x86_pmu.event_constraints = intel_snb_event_constraints;
+ x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
+ x86_pmu.extra_regs = intel_snb_extra_regs;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+
+ /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
+ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
+ X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
+
+ pr_cont("IvyBridge events, ");
+ break;
+
default:
switch (x86_pmu.version) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index e38d97bf4259..826054a4f2ee 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -407,6 +407,20 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_ivb_pebs_event_constraints[] = {
+ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
+ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 520b4265fcd2..da02e9cc3754 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
* to have an operational LBR which can freeze
* on PMU interrupt
*/
- if (boot_cpu_data.x86_mask < 10) {
+ if (boot_cpu_data.x86_model == 28
+ && boot_cpu_data.x86_mask < 10) {
pr_cont("LBR disabled due to erratum");
return;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 0a5571080e74..38e4894165b9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -661,6 +661,11 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
}
}
+static struct uncore_event_desc snb_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ { /* end: all zeroes */ },
+};
+
static struct attribute *snb_uncore_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -704,6 +709,7 @@ static struct intel_uncore_type snb_uncore_cbox = {
.constraints = snb_uncore_cbox_constraints,
.ops = &snb_uncore_msr_ops,
.format_group = &snb_uncore_format_group,
+ .event_descs = snb_uncore_events,
};
static struct intel_uncore_type *snb_msr_uncores[] = {
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 4873e62db6a1..9e5bcf1e2376 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
if (do_microcode_update(buf, len) == 0)
ret = (ssize_t)len;
+ if (ret > 0)
+ perf_check_microcode();
+
mutex_unlock(&microcode_mutex);
put_online_cpus();
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index e498b18f010c..9fc9aa7ac703 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
if (val & 0x10) {
u8 edge_irr = s->irr & ~s->elcr;
int i;
- bool found;
+ bool found = false;
struct kvm_vcpu *vcpu;
s->init4 = val & 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03de1b79..b1eb202ee76a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm)
{
+ struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
@@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
if (r)
goto out;
- kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
+ page = gfn_to_page(kvm, 0xfee00);
+ if (is_error_page(page)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.apic_access_page = page;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -3641,6 +3648,7 @@ out:
static int alloc_identity_pagetable(struct kvm *kvm)
{
+ struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
@@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
if (r)
goto out;
- kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
- kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+ page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+ if (is_error_page(page)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.ept_identity_pagetable = page;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
/* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() &&
- best && (best->ecx & bit(X86_FEATURE_INVPCID)) &&
+ best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
guest_cpuid_has_pcid(vcpu)) {
exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control);
if (best)
- best->ecx &= ~bit(X86_FEATURE_INVPCID);
+ best->ebx &= ~bit(X86_FEATURE_INVPCID);
}
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 148ed666e311..2966c847d489 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5113,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu);
}
-static void vapic_enter(struct kvm_vcpu *vcpu)
+static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
- return;
+ return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+ if (is_error_page(page))
+ return -EFAULT;
vcpu->arch.apic->vapic_page = page;
+ return 0;
}
static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -5430,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
}
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- vapic_enter(vcpu);
+ r = vapic_enter(vcpu);
+ if (r) {
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ return r;
+ }
r = 1;
while (r > 0) {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e0e6990723e9..ab1f6a93b527 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -319,7 +319,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr <= 256)
+ if (pagenr < 256)
return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 9642d4a38602..1fbe75a95f15 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1452,6 +1452,10 @@ asmlinkage void __init xen_start_kernel(void)
pci_request_acs();
xen_acpi_sleep_register();
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 76ba0e97e530..72213da605f5 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -828,9 +828,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
- /* let's use dev_bus_addr to record the old mfn instead */
- kmap_op->dev_bus_addr = page->index;
- page->index = (unsigned long) kmap_op;
}
spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
@@ -857,7 +854,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
return 0;
}
EXPORT_SYMBOL_GPL(m2p_add_override);
-int m2p_remove_override(struct page *page, bool clear_pte)
+int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long mfn;
@@ -887,10 +885,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
WARN_ON(!PagePrivate(page));
ClearPagePrivate(page);
- if (clear_pte) {
- struct gnttab_map_grant_ref *map_op =
- (struct gnttab_map_grant_ref *) page->index;
- set_phys_to_machine(pfn, map_op->dev_bus_addr);
+ set_phys_to_machine(pfn, page->index);
+ if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
struct gnttab_unmap_grant_ref *unmap_op;
@@ -902,13 +898,13 @@ int m2p_remove_override(struct page *page, bool clear_pte)
* issued. In this case handle is going to -1 because
* it hasn't been modified yet.
*/
- if (map_op->handle == -1)
+ if (kmap_op->handle == -1)
xen_mc_flush();
/*
- * Now if map_op->handle is negative it means that the
+ * Now if kmap_op->handle is negative it means that the
* hypercall actually returned an error.
*/
- if (map_op->handle == GNTST_general_error) {
+ if (kmap_op->handle == GNTST_general_error) {
printk(KERN_WARNING "m2p_remove_override: "
"pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn);
@@ -918,8 +914,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
mcs = xen_mc_entry(
sizeof(struct gnttab_unmap_grant_ref));
unmap_op = mcs.args;
- unmap_op->host_addr = map_op->host_addr;
- unmap_op->handle = map_op->handle;
+ unmap_op->host_addr = kmap_op->host_addr;
+ unmap_op->handle = kmap_op->handle;
unmap_op->dev_bus_addr = 0;
MULTI_grant_table_op(mcs.mc,
@@ -930,10 +926,9 @@ int m2p_remove_override(struct page *page, bool clear_pte)
set_pte_at(&init_mm, address, ptep,
pfn_pte(pfn, PAGE_KERNEL));
__flush_tlb_single(address);
- map_op->host_addr = 0;
+ kmap_op->host_addr = 0;
}
- } else
- set_phys_to_machine(pfn, page->index);
+ }
/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
* somewhere in this domain, even before being added to the
OpenPOWER on IntegriCloud