summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S44
-rw-r--r--arch/powerpc/kernel/kgdb.c6
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c17
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/vdso.c11
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S4
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
11 files changed, 58 insertions, 44 deletions
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 0b9c9135922e..03c862b6a9c4 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -711,6 +711,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_750,
.machine_check = machine_check_generic,
.platform = "ppc750",
+ .oprofile_cpu_type = "ppc/750",
+ .oprofile_type = PPC_OPROFILE_G4,
},
{ /* 745/755 */
.pvr_mask = 0xfffff000,
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 900e0eea0099..9763267e38b4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -658,42 +658,43 @@ do_work:
cmpdi r0,0
crandc eq,cr1*4+eq,eq
bne restore
- /* here we are preempting the current task */
-1:
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_on
- /* Note: we just clobbered r10 which used to contain the previous
- * MSR before the hard-disabling done by the caller of do_work.
- * We don't have that value anymore, but it doesn't matter as
- * we will hard-enable unconditionally, we can just reload the
- * current MSR into r10
+
+ /* Here we are preempting the current task.
+ *
+ * Ensure interrupts are soft-disabled. We also properly mark
+ * the PACA to reflect the fact that they are hard-disabled
+ * and trace the change
*/
- mfmsr r10
-#endif /* CONFIG_TRACE_IRQFLAGS */
- li r0,1
+ li r0,0
stb r0,PACASOFTIRQEN(r13)
stb r0,PACAHARDIRQEN(r13)
+ TRACE_DISABLE_INTS
+
+ /* Call the scheduler with soft IRQs off */
+1: bl .preempt_schedule_irq
+
+ /* Hard-disable interrupts again (and update PACA) */
#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
- bl .preempt_schedule
wrteei 0
#else
- ori r10,r10,MSR_EE
- mtmsrd r10,1 /* reenable interrupts */
- bl .preempt_schedule
mfmsr r10
- clrrdi r9,r1,THREAD_SHIFT
- rldicl r10,r10,48,1 /* disable interrupts again */
+ rldicl r10,r10,48,1
rotldi r10,r10,16
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
+ li r0,0
+ stb r0,PACAHARDIRQEN(r13)
+
+ /* Re-test flags and eventually loop */
+ clrrdi r9,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_NEED_RESCHED
bne 1b
b restore
user_work:
-#endif
+#endif /* CONFIG_PREEMPT */
+
/* Enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
@@ -1038,8 +1039,7 @@ _GLOBAL(mod_return_to_handler)
* We are in a module using the module's TOC.
* Switch to our TOC to run inside the core kernel.
*/
- LOAD_REG_IMMEDIATE(r4,ftrace_return_to_handler)
- ld r2, 8(r4)
+ ld r2, PACATOC(r13)
bl .ftrace_return_to_handler
nop
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index fe8f71dd0b3f..641c74bb8e27 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -282,12 +282,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
unsigned long *ptr = gdb_regs;
int reg;
-#ifdef CONFIG_SPE
- union {
- u32 v32[2];
- u64 v64;
- } acc;
-#endif
for (reg = 0; reg < 32; reg++)
UNPACK64(regs->gpr[reg], ptr);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index bb8209e34931..e8dfdbd9327a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1190,7 +1190,7 @@ EXPORT_SYMBOL(pcibios_align_resource);
* Reparent resource children of pr that conflict with res
* under res, and make res replace those children.
*/
-static int __init reparent_resources(struct resource *parent,
+static int reparent_resources(struct resource *parent,
struct resource *res)
{
struct resource *p, **pp;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index ba949a2c93ac..ccf56ac92de5 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -97,7 +97,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
* to do an appropriate TLB flush here too
*/
if (bus->self) {
+#ifdef CONFIG_PPC_STD_MMU_64
struct resource *res = bus->resource[0];
+#endif
pr_debug("IO unmapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index bbcbae183e92..87f1663584b0 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -116,20 +116,23 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
+ unsigned long sihv = MMCRA_SIHV;
+ unsigned long sipr = MMCRA_SIPR;
if (TRAP(regs) != 0xf00)
return 0; /* not a PMU interrupt */
if (ppmu->flags & PPMU_ALT_SIPR) {
- if (mmcra & POWER6_MMCRA_SIHV)
- return PERF_RECORD_MISC_HYPERVISOR;
- return (mmcra & POWER6_MMCRA_SIPR) ?
- PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL;
+ sihv = POWER6_MMCRA_SIHV;
+ sipr = POWER6_MMCRA_SIPR;
}
- if (mmcra & MMCRA_SIHV)
+
+ /* PR has priority over HV, so order below is important */
+ if (mmcra & sipr)
+ return PERF_RECORD_MISC_USER;
+ if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
- return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER :
- PERF_RECORD_MISC_KERNEL;
+ return PERF_RECORD_MISC_KERNEL;
}
/*
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1168c5f440ab..c930ac38e59f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1016,9 +1016,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int curr_frame = current->curr_ret_stack;
extern void return_to_handler(void);
- unsigned long addr = (unsigned long)return_to_handler;
+ unsigned long rth = (unsigned long)return_to_handler;
+ unsigned long mrth = -1;
#ifdef CONFIG_PPC64
- addr = *(unsigned long*)addr;
+ extern void mod_return_to_handler(void);
+ rth = *(unsigned long *)rth;
+ mrth = (unsigned long)mod_return_to_handler;
+ mrth = *(unsigned long *)mrth;
#endif
#endif
@@ -1044,7 +1048,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
if (!firstframe || ip != lr) {
printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if (ip == addr && curr_frame >= 0) {
+ if ((ip == rth || ip == mrth) && curr_frame >= 0) {
printk(" (%pS)",
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
@@ -1168,7 +1172,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
unsigned long base = mm->brk;
unsigned long ret;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
/*
* If we are using 1TB segments and we are allowed to randomise
* the heap, we can put it above 1TB so it is backed by a 1TB
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 797ea95aae2e..04f638d82fb3 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -57,7 +57,6 @@
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/mmu-hash64.h>
#include <asm/firmware.h>
#include <asm/xmon.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 94e2df3cae07..137dc22afa42 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -50,6 +50,9 @@
/* Max supported size for symbol names */
#define MAX_SYMNAME 64
+/* The alignment of the vDSO */
+#define VDSO_ALIGNMENT (1 << 16)
+
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
@@ -231,15 +234,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* pick a base address for the vDSO in process space. We try to put it
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
+ * Add enough to the size so that the result can be aligned.
*/
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, vdso_base,
- vdso_pages << PAGE_SHIFT, 0, 0);
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+ 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
rc = vdso_base;
goto fail_mmapsem;
}
+ /* Add required alignment. */
+ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
+
/*
* Put vDSO base into mm struct. We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 904ef1360dd7..0546bcd49cd0 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -25,7 +25,7 @@ SECTIONS
. = ALIGN(16);
.text : {
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
- }
+ } :text
PROVIDE(__etext = .);
PROVIDE(_etext = .);
PROVIDE(etext = .);
@@ -56,7 +56,7 @@ SECTIONS
.fixup : { *(.fixup) }
.dynamic : { *(.dynamic) } :text :dynamic
- .got : { *(.got) }
+ .got : { *(.got) } :text
.plt : { *(.plt) }
_end = .;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index f56429362a12..27735a7ac12b 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -236,6 +236,7 @@ SECTIONS
READ_MOSTLY_DATA(L1_CACHE_BYTES)
}
+ . = ALIGN(PAGE_SIZE);
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
NOSAVE_DATA
}
OpenPOWER on IntegriCloud