summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/align.c36
-rw-r--r--arch/powerpc/kernel/asm-offsets.c7
-rw-r--r--arch/powerpc/kernel/cacheinfo.c10
-rw-r--r--arch/powerpc/kernel/dma-iommu.c4
-rw-r--r--arch/powerpc/kernel/ftrace.c5
-rw-r--r--arch/powerpc/kernel/head_64.S9
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S8
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c10
-rw-r--r--arch/powerpc/kernel/machine_kexec.c25
-rw-r--r--arch/powerpc/kernel/pci-common.c22
-rw-r--r--arch/powerpc/kernel/pci_64.c6
-rw-r--r--arch/powerpc/kernel/prom.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/kernel/vio.c7
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
17 files changed, 120 insertions, 59 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 5af4e9b2dbe2..73cb6a3229ae 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -367,27 +367,24 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
unsigned int flags)
{
- char *ptr = (char *) &current->thread.TS_FPR(reg);
- int i, ret;
+ char *ptr0 = (char *) &current->thread.TS_FPR(reg);
+ char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
+ int i, ret, sw = 0;
if (!(flags & F))
return 0;
if (reg & 1)
return 0; /* invalid form: FRS/FRT must be even */
- if (!(flags & SW)) {
- /* not byte-swapped - easy */
- if (!(flags & ST))
- ret = __copy_from_user(ptr, addr, 16);
- else
- ret = __copy_to_user(addr, ptr, 16);
- } else {
- /* each FPR value is byte-swapped separately */
- ret = 0;
- for (i = 0; i < 16; ++i) {
- if (!(flags & ST))
- ret |= __get_user(ptr[i^7], addr + i);
- else
- ret |= __put_user(ptr[i^7], addr + i);
+ if (flags & SW)
+ sw = 7;
+ ret = 0;
+ for (i = 0; i < 8; ++i) {
+ if (!(flags & ST)) {
+ ret |= __get_user(ptr0[i^sw], addr + i);
+ ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ } else {
+ ret |= __put_user(ptr0[i^sw], addr + i);
+ ret |= __put_user(ptr1[i^sw], addr + i + 8);
}
}
if (ret)
@@ -646,11 +643,16 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
unsigned int areg, struct pt_regs *regs,
unsigned int flags, unsigned int length)
{
- char *ptr = (char *) &current->thread.TS_FPR(reg);
+ char *ptr;
int ret = 0;
flush_vsx_to_thread(current);
+ if (reg < 32)
+ ptr = (char *) &current->thread.TS_FPR(reg);
+ else
+ ptr = (char *) &current->thread.vr[reg - 32];
+
if (flags & ST)
ret = __copy_to_user(addr, ptr, length);
else {
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9937fe44555f..19ee491e9e23 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -56,6 +56,10 @@
#include "head_booke.h"
#endif
+#if defined(CONFIG_FSL_BOOKE)
+#include "../mm/mmu_decl.h"
+#endif
+
int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
@@ -382,6 +386,9 @@ int main(void)
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
+#ifdef CONFIG_FSL_BOOKE
+ DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
+#endif
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index b33f0417a4bf..bb37b1d19a58 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -113,7 +113,7 @@ struct cache {
struct cache *next_local; /* next cache of >= level */
};
-static DEFINE_PER_CPU(struct cache_dir *, cache_dir);
+static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
/* traversal/modification of this list occurs only at cpu hotplug time;
* access is serialized by cpu hotplug locking
@@ -468,9 +468,9 @@ static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_i
cache_dir->kobj = kobj;
- WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL);
+ WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
- per_cpu(cache_dir, cpu_id) = cache_dir;
+ per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
return cache_dir;
err:
@@ -820,13 +820,13 @@ void cacheinfo_cpu_offline(unsigned int cpu_id)
/* Prevent userspace from seeing inconsistent state - remove
* the sysfs hierarchy first */
- cache_dir = per_cpu(cache_dir, cpu_id);
+ cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
/* careful, sysfs population may have failed */
if (cache_dir)
remove_cache_dir(cache_dir);
- per_cpu(cache_dir, cpu_id) = NULL;
+ per_cpu(cache_dir_pcpu, cpu_id) = NULL;
/* clear the CPU's bit in its cache chain, possibly freeing
* cache objects */
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 14183af1b3fb..2983adac8cc3 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -79,10 +79,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
"Warning: IOMMU offset too big for device mask\n");
if (tbl)
printk(KERN_INFO
- "mask: 0x%08lx, table offset: 0x%08lx\n",
+ "mask: 0x%08llx, table offset: 0x%08lx\n",
mask, tbl->it_offset);
else
- printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
+ printk(KERN_INFO "mask: 0x%08llx, table unavailable\n",
mask);
return 0;
} else
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 5355244c99ff..60c60ccf5e3c 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -195,8 +195,9 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
- offset = (unsigned)((unsigned short)jmp[0]) << 16 |
- (unsigned)((unsigned short)jmp[1]);
+ /* The bottom half is signed extended */
+ offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
+ (int)((short)jmp[1]);
DEBUGP(" %x ", offset);
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b4bcf5a930fa..ebaedafc8e67 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -1518,6 +1518,15 @@ _GLOBAL(pmac_secondary_start)
/* turn on 64-bit mode */
bl .enable_64b_mode
+ li r0,0
+ mfspr r3,SPRN_HID4
+ rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
+ sync
+ mtspr SPRN_HID4,r3
+ isync
+ sync
+ slbia
+
/* get TOC pointer (real address) */
bl .relative_toc
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 11b549acc034..36ffb3504a4f 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -389,10 +389,6 @@ skpinv: addi r6,r6,1 /* Increment */
#endif
#endif
- mfspr r3,SPRN_TLB1CFG
- andi. r3,r3,0xfff
- lis r4,num_tlbcam_entries@ha
- stw r3,num_tlbcam_entries@l(r4)
/*
* Decide what sort of machine this is and initialize the MMU.
*/
@@ -711,7 +707,7 @@ interrupt_base:
EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
#ifdef CONFIG_PPC_E500MC
- EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_STD)
#endif
/* Debug Interrupt */
@@ -909,7 +905,7 @@ KernelSPE:
_GLOBAL(loadcam_entry)
lis r4,TLBCAM@ha
addi r4,r4,TLBCAM@l
- mulli r5,r3,20
+ mulli r5,r3,TLBCAM_SIZE
add r3,r5,r4
lwz r4,0(r3)
mtspr SPRN_MAS0,r4
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 1bfa706b96e7..fd51578e29dd 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -239,12 +239,12 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_free: invalid entry\n");
printk(KERN_INFO "\tentry = 0x%lx\n", entry);
- printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
- printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
- printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
- printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
- printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
- printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
+ printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
+ printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
+ printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
+ printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
+ printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
+ printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
WARN_ON(1);
}
return;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 23b8b5e36f98..ad1e5ac721d8 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
if (irq_desc[irq].status & IRQ_PER_CPU)
continue;
- cpus_and(mask, irq_desc[irq].affinity, map);
+ cpumask_and(&mask, irq_desc[irq].affinity, &map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d051e8cbcd03..182e0f642f36 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -240,7 +240,7 @@ static void parse_ppp_data(struct seq_file *m)
if (rc)
return;
- seq_printf(m, "partition_entitled_capacity=%ld\n",
+ seq_printf(m, "partition_entitled_capacity=%lld\n",
ppp_data.entitlement);
seq_printf(m, "group=%d\n", ppp_data.group_num);
seq_printf(m, "system_active_processors=%d\n",
@@ -265,7 +265,7 @@ static void parse_ppp_data(struct seq_file *m)
ppp_data.unallocated_weight);
seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
seq_printf(m, "capped=%d\n", ppp_data.capped);
- seq_printf(m, "unallocated_capacity=%ld\n",
+ seq_printf(m, "unallocated_capacity=%lld\n",
ppp_data.unallocated_entitlement);
}
@@ -509,10 +509,10 @@ static ssize_t update_ppp(u64 *entitlement, u8 *weight)
} else
return -EINVAL;
- pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
+ pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
__func__, ppp_data.entitlement, ppp_data.weight);
- pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
+ pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight);
retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
@@ -558,7 +558,7 @@ static ssize_t update_mpp(u64 *entitlement, u8 *weight)
pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__func__, mpp_data.entitled_mem, mpp_data.mem_weight);
- pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
+ pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight);
rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index b3abebb7ee64..d59e2b1bdcba 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -93,10 +93,35 @@ void __init reserve_crashkernel(void)
KDUMP_KERNELBASE);
crashk_res.start = KDUMP_KERNELBASE;
+#else
+ if (!crashk_res.start) {
+ /*
+ * unspecified address, choose a region of specified size
+ * can overlap with initrd (ignoring corruption when retained)
+ * ppc64 requires kernel and some stacks to be in first segemnt
+ */
+ crashk_res.start = KDUMP_KERNELBASE;
+ }
+
+ crash_base = PAGE_ALIGN(crashk_res.start);
+ if (crash_base != crashk_res.start) {
+ printk("Crash kernel base must be aligned to 0x%lx\n",
+ PAGE_SIZE);
+ crashk_res.start = crash_base;
+ }
+
#endif
crash_size = PAGE_ALIGN(crash_size);
crashk_res.end = crashk_res.start + crash_size - 1;
+ /* The crash region must not overlap the current kernel */
+ if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
+ printk(KERN_WARNING
+ "Crash kernel can not overlap current kernel\n");
+ crashk_res.start = crashk_res.end = 0;
+ return;
+ }
+
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index da5a3855a0c4..0f4181272311 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -16,8 +16,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#define DEBUG
-
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -258,7 +256,8 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
- oirq.controller->full_name);
+ oirq.controller ? oirq.controller->full_name :
+ "<default>");
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@@ -562,8 +561,21 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
(unsigned long long)(offset + size - 1));
if (mmap_state == pci_mmap_mem) {
- if ((offset + size) > hose->isa_mem_size)
- return -ENXIO;
+ /* Hack alert !
+ *
+ * Because X is lame and can fail starting if it gets an error trying
+ * to mmap legacy_mem (instead of just moving on without legacy memory
+ * access) we fake it here by giving it anonymous memory, effectively
+ * behaving just like /dev/zero
+ */
+ if ((offset + size) > hose->isa_mem_size) {
+ printk(KERN_DEBUG
+ "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
+ current->comm, current->pid, pci_domain_nr(bus), bus->number);
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ return 0;
+ }
offset += hose->isa_mem_phys;
} else {
unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 586962f65c2a..ea8eda8c87cf 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -470,7 +470,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
if (bus->self) {
pr_debug("IO mapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
- pr_debug(" virt=0x%016lx...0x%016lx\n",
+ pr_debug(" virt=0x%016llx...0x%016llx\n",
bus->resource[0]->start + _IO_BASE,
bus->resource[0]->end + _IO_BASE);
return 0;
@@ -502,7 +502,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_base_phys - phys_page);
pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
- pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
+ pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
hose->pci_io_size, size_page);
@@ -517,7 +517,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_resource.start += io_virt_offset;
hose->io_resource.end += io_virt_offset;
- pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n",
+ pr_debug(" hose->io_resource=0x%016llx...0x%016llx\n",
hose->io_resource.start, hose->io_resource.end);
return 0;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index c09cffafb6ee..f00f83109ab3 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -590,6 +590,11 @@ static void __init check_cpu_slb_size(unsigned long node)
{
u32 *slb_size_ptr;
+ slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
+ if (slb_size_ptr != NULL) {
+ mmu_slb_size = *slb_size_ptr;
+ return;
+ }
slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
if (slb_size_ptr != NULL) {
mmu_slb_size = *slb_size_ptr;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index d8bd2161e738..2d34196bba8c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -434,8 +434,8 @@ void __init setup_system(void)
printk("Starting Linux PPC64 %s\n", init_utsname()->version);
printk("-----------------------------------------------------\n");
- printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
- printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
+ printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
+ printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size());
if (ppc64_caches.dline_size != 0x80)
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
@@ -493,7 +493,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(0x10000000UL, lmb.rmo_size);
+ limit = min(0x10000000ULL, lmb.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 94aa7b011b27..d3694498f3af 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -492,14 +492,14 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
- if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
+ if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
atomic_inc(&viodev->cmo.allocs_failed);
return NULL;
}
ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
if (unlikely(ret == NULL)) {
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
}
@@ -513,7 +513,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
}
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
@@ -572,6 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(!ret)) {
vio_cmo_dealloc(viodev, alloc_size);
atomic_inc(&viodev->cmo.allocs_failed);
+ return ret;
}
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 47bf15cd2c9e..295ccc5e86b1 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -87,7 +87,9 @@ SECTIONS
/* The dummy segment contents for the bug workaround mentioned above
near PHDRS. */
.dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
- LONG(0xf177)
+ LONG(0)
+ LONG(0)
+ LONG(0)
} :kernel :dummy
/*
@@ -182,6 +184,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
__per_cpu_start = .;
+ *(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
OpenPOWER on IntegriCloud