diff options
Diffstat (limited to 'arch/i386/power')
-rw-r--r-- | arch/i386/power/Makefile | 2 | ||||
-rw-r--r-- | arch/i386/power/cpu.c | 152 | ||||
-rw-r--r-- | arch/i386/power/swsusp.S | 73 |
3 files changed, 227 insertions, 0 deletions
diff --git a/arch/i386/power/Makefile b/arch/i386/power/Makefile new file mode 100644 index 000000000000..8cfa4e8a719d --- /dev/null +++ b/arch/i386/power/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_PM) += cpu.o +obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c new file mode 100644 index 000000000000..cf337c673d92 --- /dev/null +++ b/arch/i386/power/cpu.c @@ -0,0 +1,152 @@ +/* + * Suspend support specific for i386. + * + * Distribute under GPLv2 + * + * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> + * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/poll.h> +#include <linux/delay.h> +#include <linux/sysrq.h> +#include <linux/proc_fs.h> +#include <linux/irq.h> +#include <linux/pm.h> +#include <linux/device.h> +#include <linux/suspend.h> +#include <linux/acpi.h> +#include <asm/uaccess.h> +#include <asm/acpi.h> +#include <asm/tlbflush.h> + +static struct saved_context saved_context; + +unsigned long saved_context_ebx; +unsigned long saved_context_esp, saved_context_ebp; +unsigned long saved_context_esi, saved_context_edi; +unsigned long saved_context_eflags; + +extern void enable_sep_cpu(void *); + +void __save_processor_state(struct saved_context *ctxt) +{ + kernel_fpu_begin(); + + /* + * descriptor tables + */ + asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); + asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); + asm volatile ("sldt %0" : "=m" (ctxt->ldt)); + asm volatile ("str %0" : "=m" (ctxt->tr)); + + /* + * segment registers + */ + asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); + asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); + asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); + asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); + + /* + * control registers + */ + asm volatile ("movl %%cr0, %0" : "=r" (ctxt->cr0)); + asm volatile ("movl %%cr2, %0" : "=r" (ctxt->cr2)); + asm volatile ("movl %%cr3, %0" : "=r" (ctxt->cr3)); + asm volatile ("movl %%cr4, %0" : "=r" (ctxt->cr4)); +} + +void save_processor_state(void) +{ + __save_processor_state(&saved_context); +} + +static void +do_fpu_end(void) +{ + /* restore FPU regs if necessary */ + /* Do it out of line so that gcc does not move cr0 load to some stupid place */ + kernel_fpu_end(); + mxcsr_feature_mask_init(); +} + + +static void fix_processor_context(void) +{ + int cpu = smp_processor_id(); + struct tss_struct * t = &per_cpu(init_tss, cpu); + + set_tss_desc(cpu,t); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */ + per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TSS].b &= 0xfffffdff; + + load_TR_desc(); /* This does ltr */ + load_LDT(¤t->active_mm->context); /* This does lldt */ + + /* + * Now maybe reload the debug registers + */ + if (current->thread.debugreg[7]){ + loaddebug(¤t->thread, 0); + loaddebug(¤t->thread, 1); + loaddebug(¤t->thread, 2); + loaddebug(¤t->thread, 3); + /* no 4 and 5 */ + loaddebug(¤t->thread, 6); + loaddebug(¤t->thread, 7); + } + +} + +void __restore_processor_state(struct saved_context *ctxt) +{ + + /* + * control registers + */ + asm volatile ("movl %0, %%cr4" :: "r" (ctxt->cr4)); + asm volatile ("movl %0, %%cr3" :: "r" (ctxt->cr3)); + asm volatile ("movl %0, %%cr2" :: "r" (ctxt->cr2)); + asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0)); + + /* + * segment registers + */ + asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); + asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); + asm volatile ("movw %0, %%gs" :: "r" (ctxt->gs)); + asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); + + /* + * now restore the descriptor tables to their proper values + * ltr is done i fix_processor_context(). + */ + asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); + asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); + asm volatile ("lldt %0" :: "m" (ctxt->ldt)); + + /* + * sysenter MSRs + */ + if (boot_cpu_has(X86_FEATURE_SEP)) + enable_sep_cpu(NULL); + + fix_processor_context(); + do_fpu_end(); +} + +void restore_processor_state(void) +{ + __restore_processor_state(&saved_context); +} + +/* Needed by apm.c */ +EXPORT_SYMBOL(save_processor_state); +EXPORT_SYMBOL(restore_processor_state); diff --git a/arch/i386/power/swsusp.S b/arch/i386/power/swsusp.S new file mode 100644 index 000000000000..c4105286ff26 --- /dev/null +++ b/arch/i386/power/swsusp.S @@ -0,0 +1,73 @@ +.text + +/* Originally gcc generated, modified by hand + * + * This may not use any stack, nor any variable that is not "NoSave": + * + * Its rewriting one kernel image with another. What is stack in "old" + * image could very well be data page in "new" image, and overwriting + * your own stack under you is bad idea. + */ + +#include <linux/linkage.h> +#include <asm/segment.h> +#include <asm/page.h> +#include <asm/asm_offsets.h> + + .text + +ENTRY(swsusp_arch_suspend) + + movl %esp, saved_context_esp + movl %ebx, saved_context_ebx + movl %ebp, saved_context_ebp + movl %esi, saved_context_esi + movl %edi, saved_context_edi + pushfl ; popl saved_context_eflags + + call swsusp_save + ret + +ENTRY(swsusp_arch_resume) + movl $swsusp_pg_dir-__PAGE_OFFSET, %ecx + movl %ecx, %cr3 + + movl pagedir_nosave, %edx + .p2align 4,,7 + +copy_loop: + testl %edx, %edx + jz done + + movl pbe_address(%edx), %esi + movl pbe_orig_address(%edx), %edi + + movl $1024, %ecx + rep + movsl + + movl pbe_next(%edx), %edx + jmp copy_loop + .p2align 4,,7 + +done: + /* Flush TLB, including "global" things (vmalloc) */ + movl mmu_cr4_features, %eax + movl %eax, %edx + andl $~(1<<7), %edx; # PGE + movl %edx, %cr4; # turn off PGE + movl %cr3, %ecx; # flush TLB + movl %ecx, %cr3 + movl %eax, %cr4; # turn PGE back on + + movl saved_context_esp, %esp + movl saved_context_ebp, %ebp + movl saved_context_ebx, %ebx + movl saved_context_esi, %esi + movl saved_context_edi, %edi + + pushl saved_context_eflags ; popfl + + xorl %eax, %eax + + ret |