summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/m32r/boot/compressed/Makefile4
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/time.c15
-rw-r--r--arch/powerpc/kvm/timing.h4
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c4
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/platforms/pseries/xics.c9
-rw-r--r--arch/sh/include/asm/rwsem.h2
-rw-r--r--arch/sh/kernel/dwarf.c2
-rw-r--r--arch/sh/mm/cache-sh4.c5
-rw-r--r--arch/sparc/include/asm/system_64.h4
-rw-r--r--arch/sparc/kernel/prom_common.c4
-rw-r--r--arch/sparc/kernel/visemul.c2
-rw-r--r--arch/x86/include/asm/desc.h2
-rw-r--r--arch/x86/kvm/x86.c4
15 files changed, 51 insertions, 16 deletions
diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
index 1003880d0dfd..177716b1d613 100644
--- a/arch/m32r/boot/compressed/Makefile
+++ b/arch/m32r/boot/compressed/Makefile
@@ -1,5 +1,5 @@
#
-# linux/arch/sh/boot/compressed/Makefile
+# linux/arch/m32r/boot/compressed/Makefile
#
# create a compressed vmlinux image from the original vmlinux
#
@@ -47,5 +47,5 @@ suffix_$(CONFIG_KERNEL_GZIP) = gz
suffix_$(CONFIG_KERNEL_BZIP2) = bz2
suffix_$(CONFIG_KERNEL_LZMA) = lzma
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
$(call if_changed,ld)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 10a0a5488a44..2ba14e77296c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -414,6 +414,10 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_POPULATES_NODE_MAP
def_bool y
+config SYS_SUPPORTS_HUGETLBFS
+ def_bool y
+ depends on PPC_BOOK3S_64
+
source "mm/Kconfig"
config ARCH_MEMORY_PROBE
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 92dc844299b6..a136a11c490d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -777,7 +777,7 @@ int update_persistent_clock(struct timespec now)
return ppc_md.set_rtc_time(&tm);
}
-void read_persistent_clock(struct timespec *ts)
+static void __read_persistent_clock(struct timespec *ts)
{
struct rtc_time tm;
static int first = 1;
@@ -800,10 +800,23 @@ void read_persistent_clock(struct timespec *ts)
return;
}
ppc_md.get_rtc_time(&tm);
+
ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec);
}
+void read_persistent_clock(struct timespec *ts)
+{
+ __read_persistent_clock(ts);
+
+ /* Sanitize it in case real time clock is set below EPOCH */
+ if (ts->tv_sec < 0) {
+ ts->tv_sec = 0;
+ ts->tv_nsec = 0;
+ }
+
+}
+
/* clocksource code */
static cycle_t rtc_read(struct clocksource *cs)
{
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index bb13b1f3cd5a..806ef67868bd 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -48,7 +48,11 @@ static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {}
static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
{
/* type has to be known at build time for optimization */
+
+ /* The BUILD_BUG_ON below breaks in funny ways, commented out
+ * for now ... -BenH
BUILD_BUG_ON(__builtin_constant_p(type));
+ */
switch (type) {
case EXT_INTR_EXITS:
vcpu->stat.ext_intr_exits++;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index c2f93dc470e6..be4f34c30a0b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -25,8 +25,8 @@
* also clear mm->cpu_vm_mask bits when processes are migrated
*/
-#define DEBUG_MAP_CONSISTENCY
-#define DEBUG_CLAMP_LAST_CONTEXT 31
+//#define DEBUG_MAP_CONSISTENCY
+//#define DEBUG_CLAMP_LAST_CONTEXT 31
//#define DEBUG_HARDER
/* We don't use DEBUG because it tends to be compiled in always nowadays
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index bf2e1ac41308..1164c3430f2c 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -432,8 +432,6 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
/* Read config space back so we can restore after reset */
read_msi_msg(virq, &msg);
entry->msg = msg;
-
- unmask_msi_irq(virq);
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 419f8a637ffe..b9bf0eedccf2 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/radix-tree.h>
#include <linux/cpu.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <asm/firmware.h>
@@ -219,6 +220,14 @@ static void xics_unmask_irq(unsigned int virq)
static unsigned int xics_startup(unsigned int virq)
{
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ if (irq_to_desc(virq)->msi_desc)
+ unmask_msi_irq(virq);
+
/* unmask it */
xics_unmask_irq(virq);
return 0;
diff --git a/arch/sh/include/asm/rwsem.h b/arch/sh/include/asm/rwsem.h
index 1987f3ea7f1b..06e2251a5e48 100644
--- a/arch/sh/include/asm/rwsem.h
+++ b/arch/sh/include/asm/rwsem.h
@@ -41,7 +41,7 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) }
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 2d07084e4882..d76a23170dbb 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -555,7 +555,7 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
* NOTE: the return address is guaranteed to be setup by the
* time this function makes its first function call.
*/
- if (!pc && !prev)
+ if (!pc || !prev)
pc = (unsigned long)current_text_addr();
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 519e2d16cd06..b7f235c74d66 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -72,6 +72,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
for (v = start; v < end; v += L1_CACHE_BYTES) {
unsigned long icacheaddr;
+ int j, n;
__ocbwb(v);
@@ -79,8 +80,10 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
cpu_data->icache.entry_mask);
/* Clear i-cache line valid-bit */
+ n = boot_cpu_data.icache.n_aliases;
for (i = 0; i < cpu_data->icache.ways; i++) {
- __raw_writel(0, icacheaddr);
+ for (j = 0; j < n; j++)
+ __raw_writel(0, icacheaddr + (j * PAGE_SIZE));
icacheaddr += cpu_data->icache.way_incr;
}
}
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 25e848f0cad7..d47a98e66972 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -63,6 +63,10 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
: : : "memory"); \
} while (0)
+/* The kernel always executes in TSO memory model these days,
+ * and furthermore most sparc64 chips implement more stringent
+ * memory ordering than required by the specifications.
+ */
#define mb() membar_safe("#StoreLoad")
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 138910c67206..d80a65d9e893 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -79,6 +79,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
err = -ENODEV;
+ mutex_lock(&of_set_property_mutex);
write_lock(&devtree_lock);
prevp = &dp->properties;
while (*prevp) {
@@ -88,9 +89,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
void *old_val = prop->value;
int ret;
- mutex_lock(&of_set_property_mutex);
ret = prom_setprop(dp->node, name, val, len);
- mutex_unlock(&of_set_property_mutex);
err = -EINVAL;
if (ret >= 0) {
@@ -109,6 +108,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
prevp = &(*prevp)->next;
}
write_unlock(&devtree_lock);
+ mutex_unlock(&of_set_property_mutex);
/* XXX Upate procfs if necessary... */
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index b956fd71c131..d231cbd5c526 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -617,7 +617,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
rs2 = fps_regval(f, RS2(insn));
rd_val = 0;
- src2 = (rs2 >> (opf == FMUL8x16AU_OPF) ? 16 : 0);
+ src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0);
for (byte = 0; byte < 4; byte++) {
u16 src1 = (rs1 >> (byte * 8)) & 0x00ff;
u32 prod = src1 * src2;
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index e8de2f6f5ca5..617bd56b3070 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -288,7 +288,7 @@ static inline void load_LDT(mm_context_t *pc)
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
- return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
+ return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
}
static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9b9695322f56..ae07d261527c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1692,7 +1692,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL;
- if (!bank_num)
+ if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out;
if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
goto out;
@@ -4051,7 +4051,7 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
}
-static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
+static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc)
{
u32 base_addr = get_desc_base(seg_desc);
OpenPOWER on IntegriCloud