summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/e820.c35
-rw-r--r--arch/x86_64/kernel/entry.S3
-rw-r--r--arch/x86_64/kernel/head.S1
-rw-r--r--arch/x86_64/kernel/init_task.c5
-rw-r--r--arch/x86_64/kernel/setup.c6
-rw-r--r--arch/x86_64/kernel/setup64.c3
-rw-r--r--arch/x86_64/kernel/traps.c30
7 files changed, 36 insertions, 47 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index e56c2adf57a4..764bf23c7103 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -71,7 +71,11 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
#endif
/* kernel code + 640k memory hole (later should not be needed, but
be paranoid for now) */
- if (last >= 640*1024 && addr < __pa_symbol(&_end)) {
+ if (last >= 640*1024 && addr < 1024*1024) {
+ *addrp = 1024*1024;
+ return 1;
+ }
+ if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
*addrp = __pa_symbol(&_end);
return 1;
}
@@ -104,35 +108,6 @@ e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
return 0;
}
-/*
- * This function checks if the entire range <start,end> is mapped with type.
- *
- * Note: this function only works correct if the e820 table is sorted and
- * not-overlapping, which is the case
- */
-int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
-{
- int i;
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- if (type && ei->type != type)
- continue;
- /* is the region (part) in overlap with the current region ?*/
- if (ei->addr >= end || ei->addr + ei->size <= start)
- continue;
-
- /* if the region is at the beginning of <start,end> we move
- * start to the end of the region since it's ok until there
- */
- if (ei->addr <= start)
- start = ei->addr + ei->size;
- /* if start is now at or beyond end, we're done, full coverage */
- if (start >= end)
- return 1; /* we're done */
- }
- return 0;
-}
-
/*
* Find a free area in a specific range.
*/
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 6f810424df44..aa8d8939abc1 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -973,6 +973,8 @@ ENTRY(kernel_thread)
ENDPROC(kernel_thread)
child_rip:
+ pushq $0 # fake return address
+ CFI_STARTPROC
/*
* Here we are in the child and the registers are set as they were
* at kernel_thread() invocation in the parent.
@@ -983,6 +985,7 @@ child_rip:
# exit
xorl %edi, %edi
call do_exit
+ CFI_ENDPROC
ENDPROC(child_rip)
/*
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 6df05e6034fa..c9739ca81d06 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -191,6 +191,7 @@ startup_64:
* jump
*/
movq initial_code(%rip),%rax
+ pushq $0 # fake return address
jmp *%rax
/* SMP bootup changes these two */
diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c
index ce31d904d601..3dc5854ba21e 100644
--- a/arch/x86_64/kernel/init_task.c
+++ b/arch/x86_64/kernel/init_task.c
@@ -46,4 +46,9 @@ EXPORT_SYMBOL(init_task);
*/
DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
+/* Copies of the original ist values from the tss are only accessed during
+ * debugging, no special alignment required.
+ */
+DEFINE_PER_CPU(struct orig_ist, orig_ist);
+
#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 8a099ff1f8bc..34afad704824 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -521,8 +521,6 @@ static void discover_ebda(void)
void __init setup_arch(char **cmdline_p)
{
- unsigned long kernel_end;
-
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
screen_info = SCREEN_INFO;
edid_info = EDID_INFO;
@@ -596,8 +594,8 @@ void __init setup_arch(char **cmdline_p)
(table_end - table_start) << PAGE_SHIFT);
/* reserve kernel */
- kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
- reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
+ reserve_bootmem_generic(__pa_symbol(&_text),
+ __pa_symbol(&_end) - __pa_symbol(&_text));
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 6fe58a634b5f..417de564456e 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -189,6 +189,7 @@ void __cpuinit cpu_init (void)
{
int cpu = stack_smp_processor_id();
struct tss_struct *t = &per_cpu(init_tss, cpu);
+ struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
unsigned long v;
char *estacks = NULL;
struct task_struct *me;
@@ -256,7 +257,7 @@ void __cpuinit cpu_init (void)
estacks += EXCEPTION_STKSZ;
break;
}
- t->ist[v] = (unsigned long)estacks;
+ orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
}
t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 14052f089814..b1249774d1e8 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -107,7 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
}
static int kstack_depth_to_print = 12;
+#ifdef CONFIG_STACK_UNWIND
static int call_trace = 1;
+#else
+#define call_trace (-1)
+#endif
#ifdef CONFIG_KALLSYMS
# include <linux/kallsyms.h>
@@ -174,7 +178,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
break;
#endif
default:
- end = per_cpu(init_tss, cpu).ist[k];
+ end = per_cpu(orig_ist, cpu).ist[k];
break;
}
/*
@@ -274,21 +278,21 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
if (unwind_init_blocked(&info, tsk) == 0)
unw_ret = show_trace_unwind(&info, NULL);
}
- if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
-#ifdef CONFIG_STACK_UNWIND
- unsigned long rip = info.regs.rip;
- print_symbol("DWARF2 unwinder stuck at %s\n", rip);
- if (call_trace == 1) {
- printk("Leftover inexact backtrace:\n");
- stack = (unsigned long *)info.regs.rsp;
- } else if (call_trace > 1)
+ if (unw_ret > 0) {
+ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
+ print_symbol("DWARF2 unwinder stuck at %s\n",
+ UNW_PC(&info));
+ if ((long)UNW_SP(&info) < 0) {
+ printk("Leftover inexact backtrace:\n");
+ stack = (unsigned long *)UNW_SP(&info);
+ } else
+ printk("Full inexact backtrace again:\n");
+ } else if (call_trace >= 1)
return;
else
printk("Full inexact backtrace again:\n");
-#else
+ } else
printk("Inexact backtrace:\n");
-#endif
- }
}
/*
@@ -1120,6 +1124,7 @@ static int __init kstack_setup(char *s)
}
__setup("kstack=", kstack_setup);
+#ifdef CONFIG_STACK_UNWIND
static int __init call_trace_setup(char *s)
{
if (strcmp(s, "old") == 0)
@@ -1133,3 +1138,4 @@ static int __init call_trace_setup(char *s)
return 1;
}
__setup("call_trace=", call_trace_setup);
+#endif
OpenPOWER on IntegriCloud