summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-pxa/include/mach/pxafb.h1
-rw-r--r--arch/arm/mach-pxa/reset.c7
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/ia64/include/asm/intrinsics.h2
-rw-r--r--arch/ia64/include/asm/paravirt_privop.h13
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/paravirt.c2
-rw-r--r--arch/ia64/kernel/pci-dma.c1
-rw-r--r--arch/ia64/xen/hypercall.S2
-rw-r--r--arch/mips/include/asm/mach-rc32434/gpio.h2
-rw-r--r--arch/mips/include/asm/mach-rc32434/rb.h14
-rw-r--r--arch/mips/include/asm/time.h2
-rw-r--r--arch/mips/kernel/csrc-r4k.c2
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mti-malta/malta-amon.c6
-rw-r--r--arch/mips/rb532/devices.c2
-rw-r--r--arch/mips/rb532/gpio.c193
-rw-r--r--arch/parisc/kernel/ptrace.c10
-rw-r--r--arch/sparc/include/asm/unistd_32.h3
-rw-r--r--arch/sparc/include/asm/unistd_64.h3
-rw-r--r--arch/sparc/kernel/systbls.S2
-rw-r--r--arch/sparc64/kernel/sys32.S13
-rw-r--r--arch/sparc64/kernel/systbls.S4
-rw-r--r--arch/x86/Kconfig9
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/include/asm/ftrace.h35
-rw-r--r--arch/x86/include/asm/iomap.h30
-rw-r--r--arch/x86/include/asm/mmzone_32.h4
-rw-r--r--arch/x86/include/asm/thread_info.h31
-rw-r--r--arch/x86/include/asm/uaccess_64.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/amd_iommu.c2
-rw-r--r--arch/x86/kernel/amd_iommu_init.c6
-rw-r--r--arch/x86/kernel/ds.c25
-rw-r--r--arch/x86/kernel/entry_32.S42
-rw-r--r--arch/x86/kernel/entry_64.S5
-rw-r--r--arch/x86/kernel/es7000_32.c9
-rw-r--r--arch/x86/kernel/ftrace.c311
-rw-r--r--arch/x86/kernel/io_apic.c14
-rw-r--r--arch/x86/kernel/reboot.c9
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c4
-rw-r--r--arch/x86/kernel/vsyscall_64.c3
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c16
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/numa_32.c35
-rw-r--r--arch/x86/power/hibernate_32.c4
-rw-r--r--arch/x86/vdso/vclock_gettime.c3
52 files changed, 718 insertions, 197 deletions
diff --git a/arch/arm/mach-pxa/include/mach/pxafb.h b/arch/arm/mach-pxa/include/mach/pxafb.h
index 8e591118371e..cbda4d35c421 100644
--- a/arch/arm/mach-pxa/include/mach/pxafb.h
+++ b/arch/arm/mach-pxa/include/mach/pxafb.h
@@ -33,6 +33,7 @@
#define LCD_CONN_TYPE(_x) ((_x) & 0x0f)
#define LCD_CONN_WIDTH(_x) (((_x) >> 4) & 0x1f)
+#define LCD_TYPE_MASK 0xf
#define LCD_TYPE_UNKNOWN 0
#define LCD_TYPE_MONO_STN 1
#define LCD_TYPE_MONO_DSTN 2
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 1b2af575c40f..00b2dc2a1074 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -90,12 +90,13 @@ void arch_reset(char mode)
/* Jump into ROM at address 0 */
cpu_reset(0);
break;
- case 'h':
- do_hw_reset();
- break;
case 'g':
do_gpio_reset();
break;
+ case 'h':
+ default:
+ do_hw_reset();
+ break;
}
}
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index f0a5bbae0b45..3be76ee2bdbf 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -67,6 +67,7 @@
static unsigned long spitz_pin_config[] __initdata = {
/* Chip Selects */
GPIO78_nCS_2, /* SCOOP #2 */
+ GPIO79_nCS_3, /* NAND */
GPIO80_nCS_4, /* SCOOP #1 */
/* LCD - 16bpp Active TFT */
@@ -97,10 +98,10 @@ static unsigned long spitz_pin_config[] __initdata = {
GPIO51_nPIOW,
GPIO85_nPCE_1,
GPIO54_nPCE_2,
- GPIO79_PSKTSEL,
GPIO55_nPREG,
GPIO56_nPWAIT,
GPIO57_nIOIS16,
+ GPIO104_PSKTSEL,
/* MMC */
GPIO32_MMC_CLK,
@@ -686,7 +687,6 @@ static void __init akita_init(void)
spitz_pcmcia_config.num_devs = 1;
platform_scoop_config = &spitz_pcmcia_config;
- pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(akita_i2c_board_info));
common_init();
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index 47d686dba1eb..a3e44a5ed497 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -226,7 +226,7 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
/************************************************/
#define ia64_ssm IA64_INTRINSIC_MACRO(ssm)
#define ia64_rsm IA64_INTRINSIC_MACRO(rsm)
-#define ia64_getreg IA64_INTRINSIC_API(getreg)
+#define ia64_getreg IA64_INTRINSIC_MACRO(getreg)
#define ia64_setreg IA64_INTRINSIC_API(setreg)
#define ia64_set_rr IA64_INTRINSIC_API(set_rr)
#define ia64_get_rr IA64_INTRINSIC_API(get_rr)
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h
index d577aac11835..0b597424fcfc 100644
--- a/arch/ia64/include/asm/paravirt_privop.h
+++ b/arch/ia64/include/asm/paravirt_privop.h
@@ -78,6 +78,19 @@ extern unsigned long ia64_native_getreg_func(int regnum);
ia64_native_rsm(mask); \
} while (0)
+/* returned ip value should be the one in the caller,
+ * not in __paravirt_getreg() */
+#define paravirt_getreg(reg) \
+ ({ \
+ unsigned long res; \
+ BUILD_BUG_ON(!__builtin_constant_p(reg)); \
+ if ((reg) == _IA64_REG_IP) \
+ res = ia64_native_getreg(_IA64_REG_IP); \
+ else \
+ res = pv_cpu_ops.getreg(reg); \
+ res; \
+ })
+
/******************************************************************************
* replacement of hand written assembly codes.
*/
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 7ef0c594f5ed..d435f4a7a96c 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -499,6 +499,7 @@ GLOBAL_ENTRY(prefetch_stack)
END(prefetch_stack)
GLOBAL_ENTRY(kernel_execve)
+ rum psr.ac
mov r15=__NR_execve // put syscall number in place
break __BREAK_SYSCALL
br.ret.sptk.many rp
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 66e491d8baac..59301c472800 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -260,7 +260,7 @@ start_ap:
* Switch into virtual mode:
*/
movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
- |IA64_PSR_DI)
+ |IA64_PSR_DI|IA64_PSR_AC)
;;
mov cr.ipsr=r16
movl r17=1f
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 7dd96c127177..bab1de2d2f6a 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1139,7 +1139,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
return previous_current;
no_mod:
- printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
+ mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
smp_processor_id(), type, msg);
return previous_current;
}
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index de35d8e8b7d2..9f14c16f6369 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -130,7 +130,7 @@ ia64_native_getreg_func(int regnum)
unsigned long res = -1;
switch (regnum) {
CASE_GET_REG(GP);
- CASE_GET_REG(IP);
+ /*CASE_GET_REG(IP);*/ /* returned ip value shouldn't be constant */
CASE_GET_REG(PSR);
CASE_GET_REG(TP);
CASE_GET_REG(SP);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index dbdb778efa05..2a92f637431d 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <asm/page.h>
-#include <asm/iommu.h>
dma_addr_t bad_dma_address __read_mostly;
EXPORT_SYMBOL(bad_dma_address);
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
index d4ff0b9e79f1..45e02bb64a92 100644
--- a/arch/ia64/xen/hypercall.S
+++ b/arch/ia64/xen/hypercall.S
@@ -58,7 +58,7 @@ __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
#ifdef CONFIG_IA32_SUPPORT
-__HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
+__HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
#endif /* CONFIG_IA32_SUPPORT */
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h
index c8e554eafce3..b5cf6457305a 100644
--- a/arch/mips/include/asm/mach-rc32434/gpio.h
+++ b/arch/mips/include/asm/mach-rc32434/gpio.h
@@ -84,5 +84,7 @@ extern void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned
extern unsigned get_434_reg(unsigned reg_offs);
extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask);
extern unsigned char get_latch_u5(void);
+extern void rb532_gpio_set_ilevel(int bit, unsigned gpio);
+extern void rb532_gpio_set_istat(int bit, unsigned gpio);
#endif /* _RC32434_GPIO_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h
index 79e8ef67d0d3..f25a84916703 100644
--- a/arch/mips/include/asm/mach-rc32434/rb.h
+++ b/arch/mips/include/asm/mach-rc32434/rb.h
@@ -40,12 +40,14 @@
#define BTCS 0x010040
#define BTCOMPARE 0x010044
#define GPIOBASE 0x050000
-#define GPIOCFG 0x050004
-#define GPIOD 0x050008
-#define GPIOILEVEL 0x05000C
-#define GPIOISTAT 0x050010
-#define GPIONMIEN 0x050014
-#define IMASK6 0x038038
+/* Offsets relative to GPIOBASE */
+#define GPIOFUNC 0x00
+#define GPIOCFG 0x04
+#define GPIOD 0x08
+#define GPIOILEVEL 0x0C
+#define GPIOISTAT 0x10
+#define GPIONMIEN 0x14
+#define IMASK6 0x38
#define LO_WPX (1 << 0)
#define LO_ALE (1 << 1)
#define LO_CLE (1 << 2)
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index d3bd5c5aa2ec..9601ea950542 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -63,7 +63,7 @@ static inline int mips_clockevent_init(void)
/*
* Initialize the count register as a clocksource
*/
-#ifdef CONFIG_CEVT_R4K
+#ifdef CONFIG_CSRC_R4K
extern int init_mips_clocksource(void);
#else
static inline int init_mips_clocksource(void)
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index 86e026f067bc..74fb74583b4e 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -27,7 +27,7 @@ int __init init_mips_clocksource(void)
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
- /* Calclate a somewhat reasonable rating value */
+ /* Calculate a somewhat reasonable rating value */
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index 1f602a110e10..13adb5782110 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -161,7 +161,7 @@ static inline int __init indy_sc_probe(void)
/* XXX Check with wje if the Indy caches can differenciate between
writeback + invalidate and just invalidate. */
-struct bcache_ops indy_sc_ops = {
+static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable,
.bc_wback_inv = indy_sc_wback_invalidate,
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 96236bf33838..df9e526312a2 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -22,9 +22,9 @@
#include <linux/init.h>
#include <linux/smp.h>
-#include <asm-mips/addrspace.h>
-#include <asm-mips/mips-boards/launch.h>
-#include <asm-mips/mipsmtregs.h>
+#include <asm/addrspace.h>
+#include <asm/mips-boards/launch.h>
+#include <asm/mipsmtregs.h>
int amon_cpu_avail(int cpu)
{
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 2f22d714d5b0..c1c29181bd46 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -118,7 +118,7 @@ static struct platform_device cf_slot0 = {
/* Resources and device for NAND */
static int rb532_dev_ready(struct mtd_info *mtd)
{
- return readl(IDT434_REG_BASE + GPIOD) & GPIO_RDY;
+ return gpio_get_value(GPIO_RDY);
}
static void rb532_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 70c4a6726377..0e84c8ab6a39 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -39,10 +39,6 @@
struct rb532_gpio_chip {
struct gpio_chip chip;
void __iomem *regbase;
- void (*set_int_level)(struct gpio_chip *chip, unsigned offset, int value);
- int (*get_int_level)(struct gpio_chip *chip, unsigned offset);
- void (*set_int_status)(struct gpio_chip *chip, unsigned offset, int value);
- int (*get_int_status)(struct gpio_chip *chip, unsigned offset);
};
struct mpmc_device dev3;
@@ -111,15 +107,47 @@ unsigned char get_latch_u5(void)
}
EXPORT_SYMBOL(get_latch_u5);
+/* rb532_set_bit - sanely set a bit
+ *
+ * bitval: new value for the bit
+ * offset: bit index in the 4 byte address range
+ * ioaddr: 4 byte aligned address being altered
+ */
+static inline void rb532_set_bit(unsigned bitval,
+ unsigned offset, void __iomem *ioaddr)
+{
+ unsigned long flags;
+ u32 val;
+
+ bitval = !!bitval; /* map parameter to {0,1} */
+
+ local_irq_save(flags);
+
+ val = readl(ioaddr);
+ val &= ~( ~bitval << offset ); /* unset bit if bitval == 0 */
+ val |= ( bitval << offset ); /* set bit if bitval == 1 */
+ writel(val, ioaddr);
+
+ local_irq_restore(flags);
+}
+
+/* rb532_get_bit - read a bit
+ *
+ * returns the boolean state of the bit, which may be > 1
+ */
+static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr)
+{
+ return (readl(ioaddr) & (1 << offset));
+}
+
/*
* Return GPIO level */
static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- u32 mask = 1 << offset;
struct rb532_gpio_chip *gpch;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
- return readl(gpch->regbase + GPIOD) & mask;
+ return rb532_get_bit(offset, gpch->regbase + GPIOD);
}
/*
@@ -128,23 +156,10 @@ static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
static void rb532_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
- unsigned long flags;
- u32 mask = 1 << offset;
- u32 tmp;
struct rb532_gpio_chip *gpch;
- void __iomem *gpvr;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
- gpvr = gpch->regbase + GPIOD;
-
- local_irq_save(flags);
- tmp = readl(gpvr);
- if (value)
- tmp |= mask;
- else
- tmp &= ~mask;
- writel(tmp, gpvr);
- local_irq_restore(flags);
+ rb532_set_bit(value, offset, gpch->regbase + GPIOD);
}
/*
@@ -152,21 +167,14 @@ static void rb532_gpio_set(struct gpio_chip *chip,
*/
static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- unsigned long flags;
- u32 mask = 1 << offset;
- u32 value;
struct rb532_gpio_chip *gpch;
- void __iomem *gpdr;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
- gpdr = gpch->regbase + GPIOCFG;
- local_irq_save(flags);
- value = readl(gpdr);
- value &= ~mask;
- writel(value, gpdr);
- local_irq_restore(flags);
+ if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC))
+ return 1; /* alternate function, GPIOCFG is ignored */
+ rb532_set_bit(0, offset, gpch->regbase + GPIOCFG);
return 0;
}
@@ -176,117 +184,60 @@ static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int rb532_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- unsigned long flags;
- u32 mask = 1 << offset;
- u32 tmp;
struct rb532_gpio_chip *gpch;
- void __iomem *gpdr;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
- writel(mask, gpch->regbase + GPIOD);
- gpdr = gpch->regbase + GPIOCFG;
- local_irq_save(flags);
- tmp = readl(gpdr);
- tmp |= mask;
- writel(tmp, gpdr);
- local_irq_restore(flags);
+ if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC))
+ return 1; /* alternate function, GPIOCFG is ignored */
+ /* set the initial output value */
+ rb532_set_bit(value, offset, gpch->regbase + GPIOD);
+
+ rb532_set_bit(1, offset, gpch->regbase + GPIOCFG);
return 0;
}
-/*
- * Set the GPIO interrupt level
- */
-static void rb532_gpio_set_int_level(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- unsigned long flags;
- u32 mask = 1 << offset;
- u32 tmp;
- struct rb532_gpio_chip *gpch;
- void __iomem *gpil;
-
- gpch = container_of(chip, struct rb532_gpio_chip, chip);
- gpil = gpch->regbase + GPIOILEVEL;
-
- local_irq_save(flags);
- tmp = readl(gpil);
- if (value)
- tmp |= mask;
- else
- tmp &= ~mask;
- writel(tmp, gpil);
- local_irq_restore(flags);
-}
+static struct rb532_gpio_chip rb532_gpio_chip[] = {
+ [0] = {
+ .chip = {
+ .label = "gpio0",
+ .direction_input = rb532_gpio_direction_input,
+ .direction_output = rb532_gpio_direction_output,
+ .get = rb532_gpio_get,
+ .set = rb532_gpio_set,
+ .base = 0,
+ .ngpio = 32,
+ },
+ },
+};
/*
- * Get the GPIO interrupt level
+ * Set GPIO interrupt level
*/
-static int rb532_gpio_get_int_level(struct gpio_chip *chip, unsigned offset)
+void rb532_gpio_set_ilevel(int bit, unsigned gpio)
{
- u32 mask = 1 << offset;
- struct rb532_gpio_chip *gpch;
-
- gpch = container_of(chip, struct rb532_gpio_chip, chip);
- return readl(gpch->regbase + GPIOILEVEL) & mask;
+ rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOILEVEL);
}
+EXPORT_SYMBOL(rb532_gpio_set_ilevel);
/*
- * Set the GPIO interrupt status
+ * Set GPIO interrupt status
*/
-static void rb532_gpio_set_int_status(struct gpio_chip *chip,
- unsigned offset, int value)
+void rb532_gpio_set_istat(int bit, unsigned gpio)
{
- unsigned long flags;
- u32 mask = 1 << offset;
- u32 tmp;
- struct rb532_gpio_chip *gpch;
- void __iomem *gpis;
-
- gpch = container_of(chip, struct rb532_gpio_chip, chip);
- gpis = gpch->regbase + GPIOISTAT;
-
- local_irq_save(flags);
- tmp = readl(gpis);
- if (value)
- tmp |= mask;
- else
- tmp &= ~mask;
- writel(tmp, gpis);
- local_irq_restore(flags);
+ rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOISTAT);
}
+EXPORT_SYMBOL(rb532_gpio_set_istat);
/*
- * Get the GPIO interrupt status
+ * Configure GPIO alternate function
*/
-static int rb532_gpio_get_int_status(struct gpio_chip *chip, unsigned offset)
+static void rb532_gpio_set_func(int bit, unsigned gpio)
{
- u32 mask = 1 << offset;
- struct rb532_gpio_chip *gpch;
-
- gpch = container_of(chip, struct rb532_gpio_chip, chip);
- return readl(gpch->regbase + GPIOISTAT) & mask;
+ rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOFUNC);
}
-static struct rb532_gpio_chip rb532_gpio_chip[] = {
- [0] = {
- .chip = {
- .label = "gpio0",
- .direction_input = rb532_gpio_direction_input,
- .direction_output = rb532_gpio_direction_output,
- .get = rb532_gpio_get,
- .set = rb532_gpio_set,
- .base = 0,
- .ngpio = 32,
- },
- .get_int_level = rb532_gpio_get_int_level,
- .set_int_level = rb532_gpio_set_int_level,
- .get_int_status = rb532_gpio_get_int_status,
- .set_int_status = rb532_gpio_set_int_status,
- },
-};
-
int __init rb532_gpio_init(void)
{
struct resource *r;
@@ -310,9 +261,11 @@ int __init rb532_gpio_init(void)
return -ENXIO;
}
- /* Set the interrupt status and level for the CF pin */
- rb532_gpio_set_int_level(&rb532_gpio_chip->chip, CF_GPIO_NUM, 1);
- rb532_gpio_set_int_status(&rb532_gpio_chip->chip, CF_GPIO_NUM, 0);
+ /* configure CF_GPIO_NUM as CFRDY IRQ source */
+ rb532_gpio_set_func(0, CF_GPIO_NUM);
+ rb532_gpio_direction_input(&rb532_gpio_chip->chip, CF_GPIO_NUM);
+ rb532_gpio_set_ilevel(1, CF_GPIO_NUM);
+ rb532_gpio_set_istat(0, CF_GPIO_NUM);
return 0;
}
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 90904f9dfc50..927db3668b6f 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -183,10 +183,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* being 64 bit in both cases.
*/
-static long translate_usr_offset(long offset)
+static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
{
if (offset < 0)
- return -1;
+ return sizeof(struct pt_regs);
else if (offset <= 32*4) /* gr[0..31] */
return offset * 2 + 4;
else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
@@ -194,7 +194,7 @@ static long translate_usr_offset(long offset)
else if (offset < sizeof(struct pt_regs)/2 + 32*4)
return offset * 2 + 4 - 32*8;
else
- return -1;
+ return sizeof(struct pt_regs);
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
@@ -209,7 +209,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr & (sizeof(compat_uint_t)-1))
break;
addr = translate_usr_offset(addr);
- if (addr < 0)
+ if (addr >= sizeof(struct pt_regs))
break;
tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
@@ -236,7 +236,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr & (sizeof(compat_uint_t)-1))
break;
addr = translate_usr_offset(addr);
- if (addr < 0)
+ if (addr >= sizeof(struct pt_regs))
break;
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
diff --git a/arch/sparc/include/asm/unistd_32.h b/arch/sparc/include/asm/unistd_32.h
index 648643a9f139..0d13d2a4c76f 100644
--- a/arch/sparc/include/asm/unistd_32.h
+++ b/arch/sparc/include/asm/unistd_32.h
@@ -338,8 +338,9 @@
#define __NR_dup3 320
#define __NR_pipe2 321
#define __NR_inotify_init1 322
+#define __NR_accept4 323
-#define NR_SYSCALLS 323
+#define NR_SYSCALLS 324
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
* it never had the plain ones and there is no value to adding those
diff --git a/arch/sparc/include/asm/unistd_64.h b/arch/sparc/include/asm/unistd_64.h
index c5cc0e052321..fa5d3c0343c7 100644
--- a/arch/sparc/include/asm/unistd_64.h
+++ b/arch/sparc/include/asm/unistd_64.h
@@ -340,8 +340,9 @@
#define __NR_dup3 320
#define __NR_pipe2 321
#define __NR_inotify_init1 322
+#define __NR_accept4 323
-#define NR_SYSCALLS 323
+#define NR_SYSCALLS 324
#ifdef __KERNEL__
#define __ARCH_WANT_IPC_PARSE_VERSION
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index e1b9233b90ab..7d0807586442 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -81,4 +81,4 @@ sys_call_table:
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1
+/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index ade18ba0c686..f061c4dda9ef 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -150,7 +150,7 @@ sys32_mmap2:
sys32_socketcall: /* %o0=call, %o1=args */
cmp %o0, 1
bl,pn %xcc, do_einval
- cmp %o0, 17
+ cmp %o0, 18
bg,pn %xcc, do_einval
sub %o0, 1, %o0
sllx %o0, 5, %o0
@@ -319,6 +319,15 @@ do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int)
nop
nop
nop
+do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
+63: ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_accept4), %g1
+64: lduwa [%o1 + 0x8] %asi, %o2
+65: ldswa [%o1 + 0xc] %asi, %o3
+ jmpl %g1 + %lo(sys_accept4), %g0
+66: lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
.section __ex_table,"a"
.align 4
@@ -353,4 +362,6 @@ do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int)
.word 57b, __retl_efault, 58b, __retl_efault
.word 59b, __retl_efault, 60b, __retl_efault
.word 61b, __retl_efault, 62b, __retl_efault
+ .word 63b, __retl_efault, 64b, __retl_efault
+ .word 65b, __retl_efault, 66b, __retl_efault
.previous
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index b2fa4c163638..9fc78cf354bd 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -82,7 +82,7 @@ sys_call_table32:
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1
+/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
#endif /* CONFIG_COMPAT */
@@ -156,4 +156,4 @@ sys_call_table:
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1
+/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 93224b569187..7a146baaa990 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -29,6 +29,8 @@ config X86
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_RET_TRACER if X86_32
+ select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
select HAVE_ARCH_KGDB if !X86_VOYAGER
select HAVE_ARCH_TRACEHOOK
@@ -167,9 +169,12 @@ config GENERIC_PENDING_IRQ
config X86_SMP
bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
- select USE_GENERIC_SMP_HELPERS
default y
+config USE_GENERIC_SMP_HELPERS
+ def_bool y
+ depends on SMP
+
config X86_32_SMP
def_bool y
depends on X86_32 && SMP
@@ -957,7 +962,7 @@ config ARCH_PHYS_ADDR_T_64BIT
config NUMA
bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
depends on SMP
- depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN)
+ depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
default n if X86_PC
default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
help
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 2a3dfbd5e677..fa013f529b74 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -186,14 +186,10 @@ config IOMMU_LEAK
Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings.
-config MMIOTRACE_HOOKS
- bool
-
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on DEBUG_KERNEL && PCI
select TRACING
- select MMIOTRACE_HOOKS
help
Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 9e8bc29b8b17..2bb43b433e07 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -17,8 +17,41 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
*/
return addr - 1;
}
-#endif
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+struct dyn_arch_ftrace {
+ /* No extra data needed for x86 */
+};
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
+#ifdef CONFIG_FUNCTION_RET_TRACER
+#define FTRACE_RET_STACK_SIZE 20
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Stack of return addresses for functions
+ * of a thread.
+ * Used in struct thread_info
+ */
+struct ftrace_ret_stack {
+ unsigned long ret;
+ unsigned long func;
+ unsigned long long calltime;
+};
+
+/*
+ * Primary handler of a function return.
+ * It relays on ftrace_return_to_handler.
+ * Defined in entry32.S
+ */
+extern void return_to_handler(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_RET_TRACER */
+
#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
new file mode 100644
index 000000000000..c1f06289b14b
--- /dev/null
+++ b/arch/x86/include/asm/iomap.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright © 2008 Ingo Molnar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+void *
+iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+
+void
+iounmap_atomic(void *kvaddr, enum km_type type);
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 485bdf059ffb..07f1af494ca5 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -34,10 +34,14 @@ static inline void get_memcfg_numa(void)
extern int early_pfn_to_nid(unsigned long pfn);
+extern void resume_map_numa_kva(pgd_t *pgd);
+
#else /* !CONFIG_NUMA */
#define get_memcfg_numa get_memcfg_numa_flat
+static inline void resume_map_numa_kva(pgd_t *pgd) {}
+
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DISCONTIGMEM
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e44d379faad2..e90e81ef6ab9 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -20,6 +20,8 @@
struct task_struct;
struct exec_domain;
#include <asm/processor.h>
+#include <asm/ftrace.h>
+#include <asm/atomic.h>
struct thread_info {
struct task_struct *task; /* main task structure */
@@ -38,8 +40,36 @@ struct thread_info {
*/
__u8 supervisor_stack[0];
#endif
+
+#ifdef CONFIG_FUNCTION_RET_TRACER
+ /* Index of current stored adress in ret_stack */
+ int curr_ret_stack;
+ /* Stack of return addresses for return function tracing */
+ struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
+ /*
+ * Number of functions that haven't been traced
+ * because of depth overrun.
+ */
+ atomic_t trace_overrun;
+#endif
};
+#ifdef CONFIG_FUNCTION_RET_TRACER
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .curr_ret_stack = -1,\
+ .trace_overrun = ATOMIC_INIT(0) \
+}
+#else
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
@@ -52,6 +82,7 @@ struct thread_info {
.fn = do_no_restart_syscall, \
}, \
}
+#endif
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 664f15280f14..f8cfd00db450 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -46,7 +46,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
return ret;
case 10:
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
- ret, "q", "", "=r", 16);
+ ret, "q", "", "=r", 10);
if (unlikely(ret))
return ret;
__get_user_asm(*(u16 *)(8 + (char *)dst),
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 834b2c1d89fb..d2e415e6666f 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -639,8 +639,8 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
#define __NR_timerfd_gettime 287
__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
-#define __NR_paccept 288
-__SYSCALL(__NR_paccept, sys_paccept)
+#define __NR_accept4 288
+__SYSCALL(__NR_accept4, sys_accept4)
#define __NR_signalfd4 289
__SYSCALL(__NR_signalfd4, sys_signalfd4)
#define __NR_eventfd2 290
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e489ff9cb3e2..1d8ed95da846 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
endif
+ifdef CONFIG_FUNCTION_RET_TRACER
+# Don't trace __switch_to() but let it for function tracer
+CFLAGS_REMOVE_process_32.o = -pg
+endif
+
#
# vsyscalls (which work on the user stack) should have
# no stack-protector checks:
@@ -65,6 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 331b318304eb..e4899e0e8787 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -537,7 +537,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address >>= PAGE_SHIFT;
iommu_area_free(dom->bitmap, address, pages);
- if (address + pages >= dom->next_bit)
+ if (address >= dom->next_bit)
dom->need_flush = true;
}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 0cdcda35a05f..30ae2701b3df 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -121,7 +121,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
-int amd_iommu_isolate; /* if 1, device isolation is enabled */
+int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
@@ -1213,7 +1213,9 @@ static int __init parse_amd_iommu_options(char *str)
for (; *str; ++str) {
if (strncmp(str, "isolate", 7) == 0)
amd_iommu_isolate = 1;
- if (strncmp(str, "fullflush", 11) == 0)
+ if (strncmp(str, "share", 5) == 0)
+ amd_iommu_isolate = 0;
+ if (strncmp(str, "fullflush", 9) == 0)
amd_iommu_unmap_flush = true;
}
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 2b69994fd3a8..d1a121443bde 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
struct ds_context *context = *p_context;
if (!context) {
+ spin_unlock(&ds_lock);
+
context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
+ if (!context) {
+ spin_lock(&ds_lock);
return NULL;
+ }
context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
if (!context->ds) {
kfree(context);
+ spin_lock(&ds_lock);
return NULL;
}
+ spin_lock(&ds_lock);
+ /*
+ * Check for race - another CPU could have allocated
+ * it meanwhile:
+ */
+ if (*p_context) {
+ kfree(context->ds);
+ kfree(context);
+ return *p_context;
+ }
+
*p_context = context;
context->this = p_context;
@@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
spin_lock(&ds_lock);
- if (!check_tracer(task))
- return -EPERM;
-
error = -ENOMEM;
context = ds_alloc_context(task);
if (!context)
goto out_unlock;
+ error = -EPERM;
+ if (!check_tracer(task))
+ goto out_unlock;
+
error = -EALREADY;
if (context->owner[qual] == current)
goto out_unlock;
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 28b597ef9ca1..74defe21ba42 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1157,6 +1157,9 @@ ENTRY(mcount)
END(mcount)
ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+ jne ftrace_stub
+
pushl %eax
pushl %ecx
pushl %edx
@@ -1180,8 +1183,15 @@ END(ftrace_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount)
+ cmpl $0, function_trace_stop
+ jne ftrace_stub
+
cmpl $ftrace_stub, ftrace_trace_function
jnz trace
+#ifdef CONFIG_FUNCTION_RET_TRACER
+ cmpl $ftrace_stub, ftrace_function_return
+ jnz ftrace_return_caller
+#endif
.globl ftrace_stub
ftrace_stub:
ret
@@ -1200,12 +1210,42 @@ trace:
popl %edx
popl %ecx
popl %eax
-
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
+#ifdef CONFIG_FUNCTION_RET_TRACER
+ENTRY(ftrace_return_caller)
+ cmpl $0, function_trace_stop
+ jne ftrace_stub
+
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ movl 0xc(%esp), %edx
+ lea 0x4(%ebp), %eax
+ call prepare_ftrace_return
+ popl %edx
+ popl %ecx
+ popl %eax
+ ret
+END(ftrace_return_caller)
+
+.globl return_to_handler
+return_to_handler:
+ pushl $0
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ call ftrace_return_to_handler
+ movl %eax, 0xc(%esp)
+ popl %edx
+ popl %ecx
+ popl %eax
+ ret
+#endif
+
.section .rodata,"a"
#include "syscall_table_32.S"
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b86f332c96a6..08aa6b10933c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -68,6 +68,8 @@ ENTRY(mcount)
END(mcount)
ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+ jne ftrace_stub
/* taken from glibc */
subq $0x38, %rsp
@@ -103,6 +105,9 @@ END(ftrace_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount)
+ cmpl $0, function_trace_stop
+ jne ftrace_stub
+
cmpq $ftrace_stub, ftrace_trace_function
jnz trace
.globl ftrace_stub
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
index f454c78fcef6..0aa2c443d600 100644
--- a/arch/x86/kernel/es7000_32.c
+++ b/arch/x86/kernel/es7000_32.c
@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
{
struct acpi_table_header *header = NULL;
int i = 0;
- acpi_size tbl_size;
- while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
+ while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
struct oem_table *t = (struct oem_table *)header;
oem_addrX = t->OEMTableAddr;
oem_size = t->OEMTableSize;
- early_acpi_os_unmap_memory(header, tbl_size);
*oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
oem_size);
return 0;
}
- early_acpi_os_unmap_memory(header, tbl_size);
}
return -1;
}
void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
{
- if (!oem_addr)
- return;
-
- __acpi_unmap_table((char *)oem_addr, oem_size);
}
#endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 50ea0ac8c9bf..356bb1eb6e9a 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -14,14 +14,17 @@
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
+#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/ftrace.h>
+#include <linux/ftrace.h>
#include <asm/nops.h>
+#include <asm/nmi.h>
-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
+#ifdef CONFIG_DYNAMIC_FTRACE
union ftrace_code_union {
char code[MCOUNT_INSN_SIZE];
@@ -31,18 +34,12 @@ union ftrace_code_union {
} __attribute__((packed));
};
-
static int ftrace_calc_offset(long ip, long addr)
{
return (int)(addr - ip);
}
-unsigned char *ftrace_nop_replace(void)
-{
- return ftrace_nop;
-}
-
-unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static union ftrace_code_union calc;
@@ -56,7 +53,143 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
return calc.code;
}
-int
+/*
+ * Modifying code must take extra care. On an SMP machine, if
+ * the code being modified is also being executed on another CPU
+ * that CPU will have undefined results and possibly take a GPF.
+ * We use kstop_machine to stop other CPUS from exectuing code.
+ * But this does not stop NMIs from happening. We still need
+ * to protect against that. We separate out the modification of
+ * the code to take care of this.
+ *
+ * Two buffers are added: An IP buffer and a "code" buffer.
+ *
+ * 1) Put the instruction pointer into the IP buffer
+ * and the new code into the "code" buffer.
+ * 2) Set a flag that says we are modifying code
+ * 3) Wait for any running NMIs to finish.
+ * 4) Write the code
+ * 5) clear the flag.
+ * 6) Wait for any running NMIs to finish.
+ *
+ * If an NMI is executed, the first thing it does is to call
+ * "ftrace_nmi_enter". This will check if the flag is set to write
+ * and if it is, it will write what is in the IP and "code" buffers.
+ *
+ * The trick is, it does not matter if everyone is writing the same
+ * content to the code location. Also, if a CPU is executing code
+ * it is OK to write to that code location if the contents being written
+ * are the same as what exists.
+ */
+
+static atomic_t in_nmi = ATOMIC_INIT(0);
+static int mod_code_status; /* holds return value of text write */
+static int mod_code_write; /* set when NMI should do the write */
+static void *mod_code_ip; /* holds the IP to write to */
+static void *mod_code_newcode; /* holds the text to write to the IP */
+
+static unsigned nmi_wait_count;
+static atomic_t nmi_update_count = ATOMIC_INIT(0);
+
+int ftrace_arch_read_dyn_info(char *buf, int size)
+{
+ int r;
+
+ r = snprintf(buf, size, "%u %u",
+ nmi_wait_count,
+ atomic_read(&nmi_update_count));
+ return r;
+}
+
+static void ftrace_mod_code(void)
+{
+ /*
+ * Yes, more than one CPU process can be writing to mod_code_status.
+ * (and the code itself)
+ * But if one were to fail, then they all should, and if one were
+ * to succeed, then they all should.
+ */
+ mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+ MCOUNT_INSN_SIZE);
+
+}
+
+void ftrace_nmi_enter(void)
+{
+ atomic_inc(&in_nmi);
+ /* Must have in_nmi seen before reading write flag */
+ smp_mb();
+ if (mod_code_write) {
+ ftrace_mod_code();
+ atomic_inc(&nmi_update_count);
+ }
+}
+
+void ftrace_nmi_exit(void)
+{
+ /* Finish all executions before clearing in_nmi */
+ smp_wmb();
+ atomic_dec(&in_nmi);
+}
+
+static void wait_for_nmi(void)
+{
+ int waited = 0;
+
+ while (atomic_read(&in_nmi)) {
+ waited = 1;
+ cpu_relax();
+ }
+
+ if (waited)
+ nmi_wait_count++;
+}
+
+static int
+do_ftrace_mod_code(unsigned long ip, void *new_code)
+{
+ mod_code_ip = (void *)ip;
+ mod_code_newcode = new_code;
+
+ /* The buffers need to be visible before we let NMIs write them */
+ smp_wmb();
+
+ mod_code_write = 1;
+
+ /* Make sure write bit is visible before we wait on NMIs */
+ smp_mb();
+
+ wait_for_nmi();
+
+ /* Make sure all running NMIs have finished before we write the code */
+ smp_mb();
+
+ ftrace_mod_code();
+
+ /* Make sure the write happens before clearing the bit */
+ smp_wmb();
+
+ mod_code_write = 0;
+
+ /* make sure NMIs see the cleared bit */
+ smp_mb();
+
+ wait_for_nmi();
+
+ return mod_code_status;
+}
+
+
+
+
+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
+
+static unsigned char *ftrace_nop_replace(void)
+{
+ return ftrace_nop;
+}
+
+static int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
@@ -81,7 +214,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return -EINVAL;
/* replace the text with the new text */
- if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
+ if (do_ftrace_mod_code(ip, new_code))
return -EPERM;
sync_core();
@@ -89,6 +222,29 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return 0;
}
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned char *new, *old;
+ unsigned long ip = rec->ip;
+
+ old = ftrace_call_replace(ip, addr);
+ new = ftrace_nop_replace();
+
+ return ftrace_modify_code(rec->ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned char *new, *old;
+ unsigned long ip = rec->ip;
+
+ old = ftrace_nop_replace();
+ new = ftrace_call_replace(ip, addr);
+
+ return ftrace_modify_code(rec->ip, old, new);
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
@@ -165,3 +321,138 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
+#endif
+
+#ifdef CONFIG_FUNCTION_RET_TRACER
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+
+/*
+ * These functions are picked from those used on
+ * this page for dynamic ftrace. They have been
+ * simplified to ignore all traces in NMI context.
+ */
+static atomic_t in_nmi;
+
+void ftrace_nmi_enter(void)
+{
+ atomic_inc(&in_nmi);
+}
+
+void ftrace_nmi_exit(void)
+{
+ atomic_dec(&in_nmi);
+}
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+/* Add a function return address to the trace stack on thread info.*/
+static int push_return_trace(unsigned long ret, unsigned long long time,
+ unsigned long func)
+{
+ int index;
+ struct thread_info *ti = current_thread_info();
+
+ /* The return trace stack is full */
+ if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
+ atomic_inc(&ti->trace_overrun);
+ return -EBUSY;
+ }
+
+ index = ++ti->curr_ret_stack;
+ barrier();
+ ti->ret_stack[index].ret = ret;
+ ti->ret_stack[index].func = func;
+ ti->ret_stack[index].calltime = time;
+
+ return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+static void pop_return_trace(unsigned long *ret, unsigned long long *time,
+ unsigned long *func, unsigned long *overrun)
+{
+ int index;
+
+ struct thread_info *ti = current_thread_info();
+ index = ti->curr_ret_stack;
+ *ret = ti->ret_stack[index].ret;
+ *func = ti->ret_stack[index].func;
+ *time = ti->ret_stack[index].calltime;
+ *overrun = atomic_read(&ti->trace_overrun);
+ ti->curr_ret_stack--;
+}
+
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(void)
+{
+ struct ftrace_retfunc trace;
+ pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
+ &trace.overrun);
+ trace.rettime = cpu_clock(raw_smp_processor_id());
+ ftrace_function_return(&trace);
+
+ return trace.ret;
+}
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ unsigned long long calltime;
+ int faulted;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+
+ /* Nmi's are currently unsupported */
+ if (atomic_read(&in_nmi))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ asm volatile(
+ "1: movl (%[parent_old]), %[old]\n"
+ "2: movl %[return_hooker], (%[parent_replaced])\n"
+ " movl $0, %[faulted]\n"
+
+ ".section .fixup, \"ax\"\n"
+ "3: movl $1, %[faulted]\n"
+ ".previous\n"
+
+ ".section __ex_table, \"a\"\n"
+ " .long 1b, 3b\n"
+ " .long 2b, 3b\n"
+ ".previous\n"
+
+ : [parent_replaced] "=r" (parent), [old] "=r" (old),
+ [faulted] "=r" (faulted)
+ : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
+ : "memory"
+ );
+
+ if (WARN_ON(faulted)) {
+ unregister_ftrace_return();
+ return;
+ }
+
+ if (WARN_ON(!__kernel_text_address(old))) {
+ unregister_ftrace_return();
+ *parent = old;
+ return;
+ }
+
+ calltime = cpu_clock(raw_smp_processor_id());
+
+ if (push_return_trace(old, calltime, self_addr) == -EBUSY)
+ *parent = old;
+}
+
+#endif /* CONFIG_FUNCTION_RET_TRACER */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 7a3f2028e2eb..c9513e1ff28d 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -1140,6 +1140,20 @@ static void __clear_irq_vector(int irq)
cfg->vector = 0;
cpus_clear(cfg->domain);
+
+ if (likely(!cfg->move_in_progress))
+ return;
+ cpus_and(mask, cfg->old_domain, cpu_online_map);
+ for_each_cpu_mask_nr(cpu, mask) {
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
+ vector++) {
+ if (per_cpu(vector_irq, cpu)[vector] != irq)
+ continue;
+ per_cpu(vector_irq, cpu)[vector] = -1;
+ break;
+ }
+ }
+ cfg->move_in_progress = 0;
}
void __setup_vector_irq(int cpu)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 724adfc63cb9..cc5a2545dd41 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -169,6 +169,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
},
},
+ { /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
+ .callback = set_bios_reboot,
+ .ident = "Dell OptiPlex 330",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
+ DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
+ },
+ },
{ /* Handle problems with rebooting on Dell 2400's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 2400",
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0fa6790c1dd3..9d5674f7b6cc 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -764,7 +764,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
.callback = dmi_low_memory_corruption,
.ident = "Phoenix BIOS",
.matches = {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
},
},
#endif
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 9ffb01c31c40..1c0dfbca87c1 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
cycles_t start, now, prev, end;
int i;
+ rdtsc_barrier();
start = get_cycles();
+ rdtsc_barrier();
/*
* The measurement runs for 20 msecs:
*/
@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
*/
__raw_spin_lock(&sync_lock);
prev = last_tsc;
+ rdtsc_barrier();
now = get_cycles();
+ rdtsc_barrier();
last_tsc = now;
__raw_spin_unlock(&sync_lock);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 0b8b6690a86d..6f3d3d4cd973 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -17,6 +17,9 @@
* want per guest time just set the kernel.vsyscall64 sysctl to 0.
*/
+/* Disable profiling for userspace code: */
+#define DISABLE_BRANCH_PROFILING
+
#include <linux/time.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 0e331652681e..52145007bd7e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -7,6 +7,7 @@
* This file provides all the same external entries as smp.c but uses
* the voyager hal to provide the functionality
*/
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
@@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
x86_write_percpu(cpu_number, hard_smp_processor_id());
}
+static void voyager_send_call_func(cpumask_t callmask)
+{
+ __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
+ send_CPI(mask, VIC_CALL_FUNCTION_CPI);
+}
+
+static void voyager_send_call_func_single(int cpu)
+{
+ send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
+}
+
struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
.smp_prepare_cpus = voyager_smp_prepare_cpus,
@@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
.smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule,
- .send_call_func_ipi = native_send_call_func_ipi,
- .send_call_func_single_ipi = native_send_call_func_single_ipi,
+ .send_call_func_ipi = voyager_send_call_func,
+ .send_call_func_single_ipi = voyager_send_call_func_single,
};
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index fea4565ff576..d8cc96a2738f 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -8,9 +8,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_HIGHMEM) += highmem_32.o
-obj-$(CONFIG_MMIOTRACE_HOOKS) += kmmio.o
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
-mmiotrace-y := pf_in.o mmio-mod.o
+mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
obj-$(CONFIG_NUMA) += numa_$(BITS).o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 31e8730fa246..4152d3c3b138 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -53,7 +53,7 @@
static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
{
-#ifdef CONFIG_MMIOTRACE_HOOKS
+#ifdef CONFIG_MMIOTRACE
if (unlikely(is_kmmio_active()))
if (kmmio_handler(regs, addr) == 1)
return -1;
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 847c164725f4..8518c678d83f 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -222,6 +222,41 @@ static void __init remap_numa_kva(void)
}
}
+#ifdef CONFIG_HIBERNATION
+/**
+ * resume_map_numa_kva - add KVA mapping to the temporary page tables created
+ * during resume from hibernation
+ * @pgd_base - temporary resume page directory
+ */
+void resume_map_numa_kva(pgd_t *pgd_base)
+{
+ int node;
+
+ for_each_online_node(node) {
+ unsigned long start_va, start_pfn, size, pfn;
+
+ start_va = (unsigned long)node_remap_start_vaddr[node];
+ start_pfn = node_remap_start_pfn[node];
+ size = node_remap_size[node];
+
+ printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
+
+ for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
+ unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
+ pgd_t *pgd = pgd_base + pgd_index(vaddr);
+ pud_t *pud = pud_offset(pgd, vaddr);
+ pmd_t *pmd = pmd_offset(pud, vaddr);
+
+ set_pmd(pmd, pfn_pmd(start_pfn + pfn,
+ PAGE_KERNEL_LARGE_EXEC));
+
+ printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
+ __FUNCTION__, vaddr, start_pfn + pfn);
+ }
+ }
+}
+#endif
+
static unsigned long calculate_numa_remap_pages(void)
{
int nid;
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index f2b6e3f11bfc..81197c62d5b3 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -12,6 +12,7 @@
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/mmzone.h>
/* Defined in hibernate_asm_32.S */
extern int restore_image(void);
@@ -127,6 +128,9 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
}
}
}
+
+ resume_map_numa_kva(pgd_base);
+
return 0;
}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 1ef0f90813d6..d9d35824c56f 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -9,6 +9,9 @@
* Also alternative() doesn't work.
*/
+/* Disable profiling for userspace code: */
+#define DISABLE_BRANCH_PROFILING
+
#include <linux/kernel.h>
#include <linux/posix-timers.h>
#include <linux/time.h>
OpenPOWER on IntegriCloud