summaryrefslogtreecommitdiffstats
path: root/arch/hexagon/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/hexagon/kernel')
-rw-r--r--arch/hexagon/kernel/Makefile2
-rw-r--r--arch/hexagon/kernel/asm-offsets.c5
-rw-r--r--arch/hexagon/kernel/dma.c27
-rw-r--r--arch/hexagon/kernel/head.S107
-rw-r--r--arch/hexagon/kernel/kgdb.c4
-rw-r--r--arch/hexagon/kernel/process.c42
-rw-r--r--arch/hexagon/kernel/ptrace.c26
-rw-r--r--arch/hexagon/kernel/setup.c9
-rw-r--r--arch/hexagon/kernel/signal.c45
-rw-r--r--arch/hexagon/kernel/topology.c52
-rw-r--r--arch/hexagon/kernel/traps.c36
-rw-r--r--arch/hexagon/kernel/vm_entry.S282
-rw-r--r--arch/hexagon/kernel/vm_events.c4
-rw-r--r--arch/hexagon/kernel/vm_vectors.S4
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S12
15 files changed, 439 insertions, 218 deletions
diff --git a/arch/hexagon/kernel/Makefile b/arch/hexagon/kernel/Makefile
index 6c19501b487c..29fc933a7722 100644
--- a/arch/hexagon/kernel/Makefile
+++ b/arch/hexagon/kernel/Makefile
@@ -1,6 +1,6 @@
extra-y := head.o vmlinux.lds
-obj-$(CONFIG_SMP) += smp.o topology.o
+obj-$(CONFIG_SMP) += smp.o
obj-y += setup.o irq_cpu.o traps.o syscalltab.o signal.o time.o
obj-y += process.o trampoline.o reset.o ptrace.o vdso.o
diff --git a/arch/hexagon/kernel/asm-offsets.c b/arch/hexagon/kernel/asm-offsets.c
index 2d5e84d3b00d..308be68d4fb3 100644
--- a/arch/hexagon/kernel/asm-offsets.c
+++ b/arch/hexagon/kernel/asm-offsets.c
@@ -5,7 +5,7 @@
* Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,7 +44,8 @@ int main(void)
COMMENT("Hexagon pt_regs definitions");
OFFSET(_PT_SYSCALL_NR, pt_regs, syscall_nr);
- OFFSET(_PT_UGPGP, pt_regs, ugpgp);
+ OFFSET(_PT_GPUGP, pt_regs, gpugp);
+ OFFSET(_PT_CS1CS0, pt_regs, cs1cs0);
OFFSET(_PT_R3130, pt_regs, r3130);
OFFSET(_PT_R2928, pt_regs, r2928);
OFFSET(_PT_R2726, pt_regs, r2726);
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 65c7bdcf565e..b74f9bae31a3 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -1,7 +1,7 @@
/*
* DMA implementation for Hexagon
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,12 +23,18 @@
#include <linux/genalloc.h>
#include <asm/dma-mapping.h>
#include <linux/module.h>
+#include <asm/page.h>
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
int bad_dma_address; /* globals are automatically initialized to zero */
+static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
+{
+ return phys_to_virt((unsigned long) dma_addr);
+}
+
int dma_supported(struct device *dev, u64 mask)
{
if (mask == DMA_BIT_MASK(32))
@@ -60,6 +66,12 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
{
void *ret;
+ /*
+ * Our max_low_pfn should have been backed off by 16MB in
+ * mm/init.c to create DMA coherent space. Use that as the VA
+ * for the pool.
+ */
+
if (coherent_pool == NULL) {
coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
@@ -67,7 +79,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
- (PAGE_OFFSET + (max_low_pfn << PAGE_SHIFT)),
+ pfn_to_virt(max_low_pfn),
hexagon_coherent_pool_size, -1);
}
@@ -75,7 +87,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
if (ret) {
memset(ret, 0, size);
- *dma_addr = (dma_addr_t) (ret - PAGE_OFFSET);
+ *dma_addr = (dma_addr_t) virt_to_phys(ret);
} else
*dma_addr = ~0;
@@ -118,8 +130,8 @@ static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
s->dma_length = s->length;
- flush_dcache_range(PAGE_OFFSET + s->dma_address,
- PAGE_OFFSET + s->dma_address + s->length);
+ flush_dcache_range(dma_addr_to_virt(s->dma_address),
+ dma_addr_to_virt(s->dma_address + s->length));
}
return nents;
@@ -149,11 +161,6 @@ static inline void dma_sync(void *addr, size_t size,
}
}
-static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
-{
- return phys_to_virt((unsigned long) dma_addr);
-}
-
/**
* hexagon_map_page() - maps an address for device DMA
* @dev: pointer to DMA device
diff --git a/arch/hexagon/kernel/head.S b/arch/hexagon/kernel/head.S
index d859402c73ba..b9b63d085db2 100644
--- a/arch/hexagon/kernel/head.S
+++ b/arch/hexagon/kernel/head.S
@@ -1,7 +1,7 @@
/*
* Early kernel startup code for Hexagon
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
@@ -25,6 +25,9 @@
#include <asm/mem-layout.h>
#include <asm/vm_mmu.h>
#include <asm/page.h>
+#include <asm/hexagon_vm.h>
+
+#define SEGTABLE_ENTRIES #0x0e0
__INIT
ENTRY(stext)
@@ -43,40 +46,93 @@ ENTRY(stext)
* Symbol is kernel segment address, but we need
* the logical/physical address.
*/
- r24 = asl(r24, #2)
- r24 = lsr(r24, #2)
+ r25 = pc;
+ r2.h = #0xffc0;
+ r2.l = #0x0000;
+ r25 = and(r2,r25); /* R25 holds PHYS_OFFSET now */
+ r1.h = #HI(PAGE_OFFSET);
+ r1.l = #LO(PAGE_OFFSET);
+ r24 = sub(r24,r1); /* swapper_pg_dir - PAGE_OFFSET */
+ r24 = add(r24,r25); /* + PHYS_OFFSET */
- r0 = r24
+ r0 = r24; /* aka __pa(swapper_pg_dir) */
/*
- * Initialize a 16MB PTE to make the virtual and physical
+ * Initialize page dir to make the virtual and physical
* addresses where the kernel was loaded be identical.
+ * Done in 4MB chunks.
*/
#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_WB_L2 << 6 \
| __HVM_PDE_S_4MB)
- r1 = pc
- r2.H = #0xffc0
- r2.L = #0x0000
- r1 = and(r1,r2) /* round PC to 4MB boundary */
+ /*
+ * Get number of VA=PA entries; only really needed for jump
+ * to hyperspace; gets blown away immediately after
+ */
+
+ {
+ r1.l = #LO(_end);
+ r2.l = #LO(stext);
+ r3 = #1;
+ }
+ {
+ r1.h = #HI(_end);
+ r2.h = #HI(stext);
+ r3 = asl(r3, #22);
+ }
+ {
+ r1 = sub(r1, r2);
+ r3 = add(r3, #-1);
+ } /* r1 = _end - stext */
+ r1 = add(r1, r3); /* + (4M-1) */
+ r26 = lsr(r1, #22); /* / 4M = # of entries */
+
+ r1 = r25;
+ r2.h = #0xffc0;
+ r2.l = #0x0000; /* round back down to 4MB boundary */
+ r1 = and(r1,r2);
r2 = lsr(r1, #22) /* 4MB page number */
r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
r0 = add(r0,r2) /* r0 = address of correct PTE */
r2 = #PTE_BITS
r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */
r2.h = #0x0040
- r2.l = #0x0000 /* 4MB */
- memw(r0 ++ #4) = r1
- r1 = add(r1, r2)
+ r2.l = #0x0000 /* 4MB increments */
+ loop0(1f,r26);
+1:
memw(r0 ++ #4) = r1
+ { r1 = add(r1, r2); } :endloop0
+
+ /* Also need to overwrite the initial 0xc0000000 entries */
+ /* PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift) */
+ R1.H = #HI(PAGE_OFFSET >> (22 - 2))
+ R1.L = #LO(PAGE_OFFSET >> (22 - 2))
+
+ r0 = add(r1, r24); /* advance to 0xc0000000 entry */
+ r1 = r25;
+ r2.h = #0xffc0;
+ r2.l = #0x0000; /* round back down to 4MB boundary */
+ r1 = and(r1,r2); /* for huge page */
+ r2 = #PTE_BITS
+ r1 = add(r1,r2);
+ r2.h = #0x0040
+ r2.l = #0x0000 /* 4MB increments */
- r0 = r24
+ loop0(1f,SEGTABLE_ENTRIES);
+1:
+ memw(r0 ++ #4) = r1;
+ { r1 = add(r1,r2); } :endloop0
+
+ r0 = r24;
/*
* The subroutine wrapper around the virtual instruction touches
* no memory, so we should be able to use it even here.
+ * Note that in this version, R1 and R2 get "clobbered"; see
+ * vm_ops.S
*/
+ r1 = #VM_TRANS_TYPE_TABLE
call __vmnewmap;
/* Jump into virtual address range. */
@@ -90,17 +146,29 @@ ENTRY(stext)
__head_s_vaddr_target:
/*
* Tear down VA=PA translation now that we are running
- * in the desgnated kernel segments.
+ * in kernel virtual space.
*/
r0 = #__HVM_PDE_S_INVALID
- r1 = r24
- loop0(1f,#0x100)
+
+ r1.h = #0xffc0;
+ r1.l = #0x0000;
+ r2 = r25; /* phys_offset */
+ r2 = and(r1,r2);
+
+ r1.l = #lo(swapper_pg_dir)
+ r1.h = #hi(swapper_pg_dir)
+ r2 = lsr(r2, #22) /* 4MB page number */
+ r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
+ r1 = add(r1,r2);
+ loop0(1f,r26)
+
1:
{
memw(R1 ++ #4) = R0
}:endloop0
r0 = r24
+ r1 = #VM_TRANS_TYPE_TABLE
call __vmnewmap
/* Go ahead and install the trap0 return so angel calls work */
@@ -143,6 +211,13 @@ __head_s_vaddr_target:
r2 = sub(r2,r0);
call memset;
+ /* Set PHYS_OFFSET; should be in R25 */
+#ifdef CONFIG_HEXAGON_PHYS_OFFSET
+ r0.l = #LO(__phys_offset);
+ r0.h = #HI(__phys_offset);
+ memw(r0) = r25;
+#endif
+
/* Time to make the doughnuts. */
call start_kernel
diff --git a/arch/hexagon/kernel/kgdb.c b/arch/hexagon/kernel/kgdb.c
index 344645370646..82d5c2593323 100644
--- a/arch/hexagon/kernel/kgdb.c
+++ b/arch/hexagon/kernel/kgdb.c
@@ -1,7 +1,7 @@
/*
* arch/hexagon/kernel/kgdb.c - Hexagon KGDB Support
*
- * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,8 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "lc1", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc1)},
{ " gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
{ "ugp", GDB_SIZEOF_REG, offsetof(struct pt_regs, ugp)},
+ { "cs0", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs0)},
+ { "cs1", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs1)},
{ "psp", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmpsp)},
{ "elr", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmel)},
{ "est", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmest)},
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index 9b948c619a03..0a0dd5c05b46 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -24,6 +24,7 @@
#include <linux/tick.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/tracehook.h>
/*
* Program thread launch. Often defined as a macro in processor.h,
@@ -95,7 +96,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
/* r24 <- fn, r25 <- arg */
- ss->r2524 = usp | ((u64)arg << 32);
+ ss->r24 = usp;
+ ss->r25 = arg;
pt_set_kmode(childregs);
return 0;
}
@@ -185,3 +187,41 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
return 0;
}
+
+
+/*
+ * Called on the exit path of event entry; see vm_entry.S
+ *
+ * Interrupts will already be disabled.
+ *
+ * Returns 0 if there's no need to re-check for more work.
+ */
+
+int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
+{
+ if (!(thread_info_flags & _TIF_WORK_MASK)) {
+ return 0;
+ } /* shortcut -- no work to be done */
+
+ local_irq_enable();
+
+ if (thread_info_flags & _TIF_NEED_RESCHED) {
+ schedule();
+ return 1;
+ }
+
+ if (thread_info_flags & _TIF_SIGPENDING) {
+ do_signal(regs);
+ return 1;
+ }
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ return 1;
+ }
+
+ /* Should not even reach here */
+ panic("%s: bad thread_info flags 0x%08x\n", __func__,
+ thread_info_flags);
+}
diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c
index 670b1b0bee63..de829eb7f185 100644
--- a/arch/hexagon/kernel/ptrace.c
+++ b/arch/hexagon/kernel/ptrace.c
@@ -1,7 +1,7 @@
/*
* Ptrace support for Hexagon
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,21 @@
#include <asm/user.h>
+#if arch_has_single_step()
+/* Both called from ptrace_resume */
+void user_enable_single_step(struct task_struct *child)
+{
+ pt_set_singlestep(task_pt_regs(child));
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ pt_clr_singlestep(task_pt_regs(child));
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+#endif
+
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -76,6 +91,10 @@ static int genregs_get(struct task_struct *target,
dummy = pt_cause(regs);
ONEXT(&dummy, cause);
ONEXT(&pt_badva(regs), badva);
+#if CONFIG_HEXAGON_ARCH_VERSION >=4
+ ONEXT(&regs->cs0, cs0);
+ ONEXT(&regs->cs1, cs1);
+#endif
/* Pad the rest with zeros, if needed */
if (!ret)
@@ -123,6 +142,11 @@ static int genregs_set(struct task_struct *target,
INEXT(&bucket, cause);
INEXT(&bucket, badva);
+#if CONFIG_HEXAGON_ARCH_VERSION >=4
+ INEXT(&regs->cs0, cs0);
+ INEXT(&regs->cs1, cs1);
+#endif
+
/* Ignore the rest, if needed */
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index 94a387835008..bfe13311d70d 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -1,7 +1,7 @@
/*
* Arch related setup for Hexagon
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -68,6 +68,8 @@ void __init setup_arch(char **cmdline_p)
*/
__vmsetvec(_K_VM_event_vector);
+ printk(KERN_INFO "PHYS_OFFSET=0x%08x\n", PHYS_OFFSET);
+
/*
* Simulator has a few differences from the hardware.
* For now, check uninitialized-but-mapped memory
@@ -128,6 +130,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{
int cpu = (unsigned long) v - 1;
+#ifdef CONFIG_SMP
+ if (!cpu_online(cpu))
+ return 0;
+#endif
+
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "model name\t: Hexagon Virtual Machine\n");
seq_printf(m, "BogoMips\t: %lu.%02lu\n",
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
index 60fa2ca3202b..d7c73874b515 100644
--- a/arch/hexagon/kernel/signal.c
+++ b/arch/hexagon/kernel/signal.c
@@ -1,7 +1,7 @@
/*
* Signal support for Hexagon processor
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -41,6 +41,10 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
{
unsigned long sp = regs->r29;
+ /* check if we would overflow the alt stack */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
+ return (void __user __force *)-1UL;
+
/* Switch to signal stack if appropriate */
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
@@ -66,7 +70,10 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
err |= __put_user(regs->preds, &sc->sc_regs.p3_0);
err |= __put_user(regs->gp, &sc->sc_regs.gp);
err |= __put_user(regs->ugp, &sc->sc_regs.ugp);
-
+#if CONFIG_HEXAGON_ARCH_VERSION >= 4
+ err |= __put_user(regs->cs0, &sc->sc_regs.cs0);
+ err |= __put_user(regs->cs1, &sc->sc_regs.cs1);
+#endif
tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc);
tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause);
tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva);
@@ -93,7 +100,10 @@ static int restore_sigcontext(struct pt_regs *regs,
err |= __get_user(regs->preds, &sc->sc_regs.p3_0);
err |= __get_user(regs->gp, &sc->sc_regs.gp);
err |= __get_user(regs->ugp, &sc->sc_regs.ugp);
-
+#if CONFIG_HEXAGON_ARCH_VERSION >= 4
+ err |= __get_user(regs->cs0, &sc->sc_regs.cs0);
+ err |= __get_user(regs->cs1, &sc->sc_regs.cs1);
+#endif
err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp);
return err;
@@ -193,7 +203,7 @@ static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
/*
* Called from return-from-event code.
*/
-static void do_signal(struct pt_regs *regs)
+void do_signal(struct pt_regs *regs)
{
struct k_sigaction sigact;
siginfo_t info;
@@ -210,8 +220,9 @@ static void do_signal(struct pt_regs *regs)
}
/*
- * If we came from a system call, handle the restart.
+ * No (more) signals; if we came from a system call, handle the restart.
*/
+
if (regs->syscall_nr >= 0) {
switch (regs->r00) {
case -ERESTARTNOHAND:
@@ -234,17 +245,6 @@ no_restart:
restore_saved_sigmask();
}
-void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
-{
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs);
-
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
-}
-
/*
* Architecture-specific wrappers for signal-related system calls
*/
@@ -272,21 +272,12 @@ asmlinkage int sys_rt_sigreturn(void)
/* Restore the user's stack as well */
pt_psp(regs) = regs->r29;
- /*
- * Leave a trace in the stack frame that this was a sigreturn.
- * If the system call is to replay, we've already restored the
- * number in the GPR slot and it will be regenerated on the
- * new system call trap entry. Note that if restore_sigcontext()
- * did something other than a bulk copy of the pt_regs struct,
- * we could avoid this assignment by simply not overwriting
- * regs->syscall_nr.
- */
- regs->syscall_nr = __NR_rt_sigreturn;
+ regs->syscall_nr = -1;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- return 0;
+ return regs->r00;
badframe:
force_sig(SIGSEGV, current);
diff --git a/arch/hexagon/kernel/topology.c b/arch/hexagon/kernel/topology.c
deleted file mode 100644
index 352f27e809fd..000000000000
--- a/arch/hexagon/kernel/topology.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * CPU topology for Hexagon
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/init.h>
-#include <linux/node.h>
-#include <linux/nodemask.h>
-#include <linux/percpu.h>
-
-/* Swiped from MIPS. */
-
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-
-static int __init topology_init(void)
-{
- int i, ret;
-
- for_each_present_cpu(i) {
-
- /*
- * register_cpu takes a per_cpu pointer and
- * just points it at another per_cpu struct...
- */
-
- ret = register_cpu(&per_cpu(cpu_devices, i), i);
- if (ret)
- printk(KERN_WARNING "topology_init: register_cpu %d "
- "failed (%d)\n", i, ret);
- }
-
- return 0;
-}
-
-subsys_initcall(topology_init);
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index cc2171b2aa04..7858663352b9 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -1,7 +1,7 @@
/*
* Kernel traps/events for Hexagon processor
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -65,6 +65,10 @@ static const char *ex_name(int ex)
return "Write protection fault";
case HVM_GE_C_XMAL:
return "Misaligned instruction";
+ case HVM_GE_C_WREG:
+ return "Multiple writes to same register in packet";
+ case HVM_GE_C_PCAL:
+ return "Program counter values that are not properly aligned";
case HVM_GE_C_RMAL:
return "Misaligned data load";
case HVM_GE_C_WMAL:
@@ -316,6 +320,12 @@ void do_genex(struct pt_regs *regs)
case HVM_GE_C_XMAL:
misaligned_instruction(regs);
break;
+ case HVM_GE_C_WREG:
+ illegal_instruction(regs);
+ break;
+ case HVM_GE_C_PCAL:
+ misaligned_instruction(regs);
+ break;
case HVM_GE_C_RMAL:
misaligned_data_load(regs);
break;
@@ -348,7 +358,6 @@ long sys_syscall(void)
void do_trap0(struct pt_regs *regs)
{
- unsigned long syscallret = 0;
syscall_fn syscall;
switch (pt_cause(regs)) {
@@ -388,21 +397,11 @@ void do_trap0(struct pt_regs *regs)
} else {
syscall = (syscall_fn)
(sys_call_table[regs->syscall_nr]);
- syscallret = syscall(regs->r00, regs->r01,
+ regs->r00 = syscall(regs->r00, regs->r01,
regs->r02, regs->r03,
regs->r04, regs->r05);
}
- /*
- * If it was a sigreturn system call, don't overwrite
- * r0 value in stack frame with return value.
- *
- * __NR_sigreturn doesn't seem to exist in new unistd.h
- */
-
- if (regs->syscall_nr != __NR_rt_sigreturn)
- regs->r00 = syscallret;
-
/* allow strace to get the syscall return state */
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
tracehook_report_syscall_exit(regs, 0);
@@ -444,3 +443,14 @@ void do_machcheck(struct pt_regs *regs)
/* Halt and catch fire */
__vmstop();
}
+
+/*
+ * Treat this like the old 0xdb trap.
+ */
+
+void do_debug_exception(struct pt_regs *regs)
+{
+ regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
+ regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
+ do_trap0(regs);
+}
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
index 425e50c694f7..e3086185fc9f 100644
--- a/arch/hexagon/kernel/vm_entry.S
+++ b/arch/hexagon/kernel/vm_entry.S
@@ -1,7 +1,7 @@
/*
* Event entry/exit for Hexagon
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,48 +45,88 @@
* number in the case where we decode a system call (trap0(#1)).
*/
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define save_pt_regs()\
- memd(R0 + #_PT_R3130) = R31:30; \
+ memd(R0 + #_PT_R3130) = R31:30; \
+ { memw(R0 + #_PT_R2928) = R28; \
+ R31 = memw(R0 + #_PT_ER_VMPSP); }\
+ { memw(R0 + #(_PT_R2928 + 4)) = R31; \
+ R31 = ugp; } \
+ { memd(R0 + #_PT_R2726) = R27:26; \
+ R30 = gp ; } \
+ memd(R0 + #_PT_R2524) = R25:24; \
+ memd(R0 + #_PT_R2322) = R23:22; \
+ memd(R0 + #_PT_R2120) = R21:20; \
+ memd(R0 + #_PT_R1918) = R19:18; \
+ memd(R0 + #_PT_R1716) = R17:16; \
+ memd(R0 + #_PT_R1514) = R15:14; \
+ memd(R0 + #_PT_R1312) = R13:12; \
+ { memd(R0 + #_PT_R1110) = R11:10; \
+ R15 = lc0; } \
+ { memd(R0 + #_PT_R0908) = R9:8; \
+ R14 = sa0; } \
+ { memd(R0 + #_PT_R0706) = R7:6; \
+ R13 = lc1; } \
+ { memd(R0 + #_PT_R0504) = R5:4; \
+ R12 = sa1; } \
+ { memd(R0 + #_PT_GPUGP) = R31:30; \
+ R11 = m1; \
+ R2.H = #HI(_THREAD_SIZE); } \
+ { memd(R0 + #_PT_LC0SA0) = R15:14; \
+ R10 = m0; \
+ R2.L = #LO(_THREAD_SIZE); } \
+ { memd(R0 + #_PT_LC1SA1) = R13:12; \
+ R15 = p3:0; \
+ R2 = neg(R2); } \
+ { memd(R0 + #_PT_M1M0) = R11:10; \
+ R14 = usr; \
+ R2 = and(R0,R2); } \
+ { memd(R0 + #_PT_PREDSUSR) = R15:14; \
+ THREADINFO_REG = R2; } \
+ { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
+ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
+ R2 = #-1; } \
+ { memw(R0 + #_PT_SYSCALL_NR) = R2; \
+ R30 = #0; }
+#else
+/* V4+ */
+/* the # ## # syntax inserts a literal ## */
+#define save_pt_regs()\
+ { memd(R0 + #_PT_R3130) = R31:30; \
+ R30 = memw(R0 + #_PT_ER_VMPSP); }\
{ memw(R0 + #_PT_R2928) = R28; \
- R31 = memw(R0 + #_PT_ER_VMPSP); }\
- { memw(R0 + #(_PT_R2928 + 4)) = R31; \
- R31 = ugp; } \
- { memd(R0 + #_PT_R2726) = R27:26; \
- R30 = gp ; } \
- memd(R0 + #_PT_R2524) = R25:24; \
- memd(R0 + #_PT_R2322) = R23:22; \
- memd(R0 + #_PT_R2120) = R21:20; \
- memd(R0 + #_PT_R1918) = R19:18; \
- memd(R0 + #_PT_R1716) = R17:16; \
- memd(R0 + #_PT_R1514) = R15:14; \
- memd(R0 + #_PT_R1312) = R13:12; \
+ memw(R0 + #(_PT_R2928 + 4)) = R30; }\
+ { R31:30 = C11:10; \
+ memd(R0 + #_PT_R2726) = R27:26; \
+ memd(R0 + #_PT_R2524) = R25:24; }\
+ { memd(R0 + #_PT_R2322) = R23:22; \
+ memd(R0 + #_PT_R2120) = R21:20; }\
+ { memd(R0 + #_PT_R1918) = R19:18; \
+ memd(R0 + #_PT_R1716) = R17:16; }\
+ { memd(R0 + #_PT_R1514) = R15:14; \
+ memd(R0 + #_PT_R1312) = R13:12; \
+ R17:16 = C13:12; }\
{ memd(R0 + #_PT_R1110) = R11:10; \
- R15 = lc0; } \
- { memd(R0 + #_PT_R0908) = R9:8; \
- R14 = sa0; } \
+ memd(R0 + #_PT_R0908) = R9:8; \
+ R15:14 = C1:0; } \
{ memd(R0 + #_PT_R0706) = R7:6; \
- R13 = lc1; } \
- { memd(R0 + #_PT_R0504) = R5:4; \
- R12 = sa1; } \
- { memd(R0 + #_PT_UGPGP) = R31:30; \
- R11 = m1; \
- R2.H = #HI(_THREAD_SIZE); } \
- { memd(R0 + #_PT_LC0SA0) = R15:14; \
- R10 = m0; \
- R2.L = #LO(_THREAD_SIZE); } \
- { memd(R0 + #_PT_LC1SA1) = R13:12; \
- R15 = p3:0; \
- R2 = neg(R2); } \
+ memd(R0 + #_PT_R0504) = R5:4; \
+ R13:12 = C3:2; } \
+ { memd(R0 + #_PT_GPUGP) = R31:30; \
+ memd(R0 + #_PT_LC0SA0) = R15:14; \
+ R11:10 = C7:6; }\
+ { THREADINFO_REG = and(R0, # ## #-_THREAD_SIZE); \
+ memd(R0 + #_PT_LC1SA1) = R13:12; \
+ R15 = p3:0; }\
{ memd(R0 + #_PT_M1M0) = R11:10; \
- R14 = usr; \
- R2 = and(R0,R2); } \
- { memd(R0 + #_PT_PREDSUSR) = R15:14; \
- THREADINFO_REG = R2; } \
+ memw(R0 + #_PT_PREDSUSR + 4) = R15; }\
{ r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
R2 = #-1; } \
{ memw(R0 + #_PT_SYSCALL_NR) = R2; \
+ memd(R0 + #_PT_CS1CS0) = R17:16; \
R30 = #0; }
+#endif
/*
* Restore registers and thread_info.regs state. THREADINFO_REG
@@ -94,6 +134,7 @@
* preserved. Don't restore R29 (SP) until later.
*/
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define restore_pt_regs() \
{ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
R15:14 = memd(R0 + #_PT_PREDSUSR); } \
@@ -121,11 +162,44 @@
R23:22 = memd(R0 + #_PT_R2322); } \
{ R25:24 = memd(R0 + #_PT_R2524); \
R27:26 = memd(R0 + #_PT_R2726); } \
- R31:30 = memd(R0 + #_PT_UGPGP); \
+ R31:30 = memd(R0 + #_PT_GPUGP); \
{ R28 = memw(R0 + #_PT_R2928); \
ugp = R31; } \
{ R31:30 = memd(R0 + #_PT_R3130); \
gp = R30; }
+#else
+/* V4+ */
+#define restore_pt_regs() \
+ { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
+ R15:14 = memd(R0 + #_PT_PREDSUSR); } \
+ { R11:10 = memd(R0 + #_PT_M1M0); \
+ R13:12 = memd(R0 + #_PT_LC1SA1); \
+ p3:0 = R15; } \
+ { R15:14 = memd(R0 + #_PT_LC0SA0); \
+ R3:2 = memd(R0 + #_PT_R0302); \
+ usr = R14; } \
+ { R5:4 = memd(R0 + #_PT_R0504); \
+ R7:6 = memd(R0 + #_PT_R0706); \
+ C7:6 = R11:10; }\
+ { R9:8 = memd(R0 + #_PT_R0908); \
+ R11:10 = memd(R0 + #_PT_R1110); \
+ C3:2 = R13:12; }\
+ { R13:12 = memd(R0 + #_PT_R1312); \
+ R15:14 = memd(R0 + #_PT_R1514); \
+ C1:0 = R15:14; }\
+ { R17:16 = memd(R0 + #_PT_R1716); \
+ R19:18 = memd(R0 + #_PT_R1918); } \
+ { R21:20 = memd(R0 + #_PT_R2120); \
+ R23:22 = memd(R0 + #_PT_R2322); } \
+ { R25:24 = memd(R0 + #_PT_R2524); \
+ R27:26 = memd(R0 + #_PT_R2726); } \
+ R31:30 = memd(R0 + #_PT_CS1CS0); \
+ { C13:12 = R31:30; \
+ R31:30 = memd(R0 + #_PT_GPUGP) ; \
+ R28 = memw(R0 + #_PT_R2928); }\
+ { C11:10 = R31:30; \
+ R31:30 = memd(R0 + #_PT_R3130); }
+#endif
/*
* Clears off enough space for the rest of pt_regs; evrec is a part
@@ -139,6 +213,7 @@
* Need to save off R0, R1, R2, R3 immediately.
*/
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define vm_event_entry(CHandler) \
{ \
R29 = add(R29, #-(_PT_REGS_SIZE)); \
@@ -158,6 +233,34 @@
R1.H = #HI(CHandler); \
jump event_dispatch; \
}
+#else
+/* V4+ */
+/* turn on I$ prefetch early */
+/* the # ## # syntax inserts a literal ## */
+#define vm_event_entry(CHandler) \
+ { \
+ R29 = add(R29, #-(_PT_REGS_SIZE)); \
+ memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
+ memd(R29 + #(_PT_R0302 + -_PT_REGS_SIZE)) = R3:2; \
+ R0 = usr; \
+ } \
+ { \
+ memw(R29 + #_PT_PREDSUSR) = R0; \
+ R0 = setbit(R0, #16); \
+ } \
+ usr = R0; \
+ R1:0 = G1:0; \
+ { \
+ memd(R29 + #_PT_ER_VMEL) = R1:0; \
+ R1 = # ## #(CHandler); \
+ R3:2 = G3:2; \
+ } \
+ { \
+ R0 = R29; \
+ memd(R29 + #_PT_ER_VMPSP) = R3:2; \
+ jump event_dispatch; \
+ }
+#endif
.text
/*
@@ -171,6 +274,9 @@ event_dispatch:
callr r1
/*
+ * Coming back from the C-world, our thread info pointer
+ * should be in the designated register (usually R19)
+ *
* If we were in kernel mode, we don't need to check scheduler
* or signals if CONFIG_PREEMPT is not set. If set, then it has
* to jump to a need_resched kind of block.
@@ -183,69 +289,68 @@ event_dispatch:
#endif
/* "Nested control path" -- if the previous mode was kernel */
- R0 = memw(R29 + #_PT_ER_VMEST);
- P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
- if !P0 jump restore_all;
- /*
- * Returning from system call, normally coming back from user mode
- */
-return_from_syscall:
- /* Disable interrupts while checking TIF */
- R0 = #VM_INT_DISABLE
- trap1(#HVM_TRAP1_VMSETIE)
-
- /*
- * Coming back from the C-world, our thread info pointer
- * should be in the designated register (usually R19)
- */
- R1.L = #LO(_TIF_ALLWORK_MASK)
{
- R1.H = #HI(_TIF_ALLWORK_MASK);
- R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
+ R0 = memw(R29 + #_PT_ER_VMEST);
+ R16.L = #LO(do_work_pending);
+ }
+ {
+ P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
+ if (!P0.new) jump:nt restore_all;
+ R16.H = #HI(do_work_pending);
+ R0 = #VM_INT_DISABLE;
}
/*
- * Compare against the "return to userspace" _TIF_WORK_MASK
+ * Check also the return from fork/system call, normally coming back from
+ * user mode
+ *
+ * R16 needs to have do_work_pending, and R0 should have VM_INT_DISABLE
*/
- R1 = and(R1,R0);
- { P0 = cmp.eq(R1,#0); if (!P0.new) jump:t work_pending;}
- jump restore_all; /* we're outta here! */
-work_pending:
+check_work_pending:
+ /* Disable interrupts while checking TIF */
+ trap1(#HVM_TRAP1_VMSETIE)
{
- P0 = tstbit(R1, #TIF_NEED_RESCHED);
- if (!P0.new) jump:nt work_notifysig;
+ R0 = R29; /* regs should still be at top of stack */
+ R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
+ callr R16;
}
- call schedule
- jump return_from_syscall; /* check for more work */
-work_notifysig:
- /* this is the part that's kind of fuzzy. */
- R1 = and(R0, #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME));
- P0 = cmp.eq(R1, #0);
- if P0 jump restore_all
- R1 = R0; /* unsigned long thread_info_flags */
- R0 = R29; /* regs should still be at top of stack */
- call do_notify_resume
+ {
+ P0 = cmp.eq(R0, #0); if (!P0.new) jump:nt check_work_pending;
+ R0 = #VM_INT_DISABLE;
+ }
restore_all:
- /* Disable interrupts, if they weren't already, before reg restore. */
- R0 = #VM_INT_DISABLE
+ /*
+ * Disable interrupts, if they weren't already, before reg restore.
+ * R0 gets preloaded with #VM_INT_DISABLE before we get here.
+ */
trap1(#HVM_TRAP1_VMSETIE)
/* do the setregs here for VM 0.5 */
/* R29 here should already be pointing at pt_regs */
- R1:0 = memd(R29 + #_PT_ER_VMEL);
- R3:2 = memd(R29 + #_PT_ER_VMPSP);
+ {
+ R1:0 = memd(R29 + #_PT_ER_VMEL);
+ R3:2 = memd(R29 + #_PT_ER_VMPSP);
+ }
+#if CONFIG_HEXAGON_ARCH_VERSION < 4
trap1(#HVM_TRAP1_VMSETREGS);
+#else
+ G1:0 = R1:0;
+ G3:2 = R3:2;
+#endif
R0 = R29
restore_pt_regs()
- R1:0 = memd(R29 + #_PT_R0100);
- R29 = add(R29, #_PT_REGS_SIZE);
+ {
+ R1:0 = memd(R29 + #_PT_R0100);
+ R29 = add(R29, #_PT_REGS_SIZE);
+ }
trap1(#HVM_TRAP1_VMRTE)
/* Notreached */
+
.globl _K_enter_genex
_K_enter_genex:
vm_event_entry(do_genex)
@@ -262,12 +367,27 @@ _K_enter_trap0:
_K_enter_machcheck:
vm_event_entry(do_machcheck)
+ .globl _K_enter_debug
+_K_enter_debug:
+ vm_event_entry(do_debug_exception)
.globl ret_from_fork
ret_from_fork:
- call schedule_tail
- P0 = cmp.eq(R24, #0);
- if P0 jump return_from_syscall
- R0 = R25;
- callr R24
- jump return_from_syscall
+ {
+ call schedule_tail
+ R16.H = #HI(do_work_pending);
+ }
+ {
+ P0 = cmp.eq(R24, #0);
+ R16.L = #LO(do_work_pending);
+ R0 = #VM_INT_DISABLE;
+ }
+ if P0 jump check_work_pending
+ {
+ R0 = R25;
+ callr R24
+ }
+ {
+ jump check_work_pending
+ R0 = #VM_INT_DISABLE;
+ }
diff --git a/arch/hexagon/kernel/vm_events.c b/arch/hexagon/kernel/vm_events.c
index f337281ebe67..741aaa917cda 100644
--- a/arch/hexagon/kernel/vm_events.c
+++ b/arch/hexagon/kernel/vm_events.c
@@ -1,7 +1,7 @@
/*
* Mostly IRQ support for Hexagon
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,8 @@ void show_regs(struct pt_regs *regs)
regs->lc1, regs->sa1, regs->m1);
printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n",
regs->gp, regs->ugp, regs->usr);
+ printk(KERN_EMERG "cs0: \t0x%08lx cs1: 0x%08lx\n",
+ regs->cs0, regs->cs1);
printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00,
regs->r01,
regs->r02,
diff --git a/arch/hexagon/kernel/vm_vectors.S b/arch/hexagon/kernel/vm_vectors.S
index 620f42cc582a..791a7422dde4 100644
--- a/arch/hexagon/kernel/vm_vectors.S
+++ b/arch/hexagon/kernel/vm_vectors.S
@@ -1,7 +1,7 @@
/*
* Event jump tables
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2012,2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -41,7 +41,7 @@ _K_VM_event_vector:
jump 1b; /* Reset */
jump _K_enter_machcheck;
jump _K_enter_genex;
- jump 1b; /* 3 Rsvd */
+ jump _K_enter_debug;
jump 1b; /* 4 Rsvd */
jump _K_enter_trap0;
jump 1b; /* 6 Rsvd */
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 14e793f6abbf..44d8c47bae2f 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -1,7 +1,7 @@
/*
* Linker script for Hexagon kernel
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,8 +18,6 @@
* 02110-1301, USA.
*/
-#define LOAD_OFFSET PAGE_OFFSET
-
#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
#include <asm/mem-layout.h> /* except for page_offset */
@@ -36,13 +34,9 @@ See asm-generic/sections.h for seemingly required labels.
#define PAGE_SIZE _PAGE_SIZE
-/* This LOAD_OFFSET is temporary for debugging on the simulator; it may change
- for hypervisor pseudo-physical memory. */
-
-
SECTIONS
{
- . = PAGE_OFFSET + LOAD_ADDRESS;
+ . = PAGE_OFFSET;
__init_begin = .;
HEAD_TEXT_SECTION
@@ -52,7 +46,7 @@ SECTIONS
. = ALIGN(_PAGE_SIZE);
_stext = .;
- .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ .text : AT(ADDR(.text)) {
_text = .;
TEXT_TEXT
SCHED_TEXT
OpenPOWER on IntegriCloud