summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/entry_32.S
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-10-21 15:45:50 +1000
committerPaul Mackerras <paulus@samba.org>2005-10-21 22:47:23 +1000
commit6cb7bfebb145af5ea1d052512a2ae7ff07a47202 (patch)
tree677ce52e6ad423f8a652ec3e16f98c3ad33fcc54 /arch/powerpc/kernel/entry_32.S
parentb0faa28493f97b55b36ff5b1a2b8c81bf253a460 (diff)
downloadblackbird-op-linux-6cb7bfebb145af5ea1d052512a2ae7ff07a47202.tar.gz
blackbird-op-linux-6cb7bfebb145af5ea1d052512a2ae7ff07a47202.zip
[PATCH] powerpc: Merge thread_info.h
Merge ppc32 and ppc64 versions of thread_info.h. They were pretty similar already, the chief changes are: - Instead of inline asm to implement current_thread_info(), which needs to be different for ppc32 and ppc64, we use C with an asm("r1") register variable. gcc turns it into the same asm as we used to have for both platforms. - We replace ppc32's 'local_flags' with the ppc64 'syscall_noerror' field. The noerror flag was in fact the only thing in the local_flags field anyway, so the ppc64 approach is simpler, and means we only need a load-immediate/store instead of load/mask/store when clearing the flag. - In readiness for 64k pages, when THREAD_SIZE will be less than a page, ppc64 used kmalloc() rather than get_free_pages() to allocate the kernel stack. With this patch we do the same for ppc32, since there's no strong reason not to. - For ppc64, we no longer export THREAD_SHIFT and THREAD_SIZE via asm-offsets, thread_info.h can now be safely included in asm, as on ppc32. Built and booted on G4 Powerbook (ARCH=ppc and ARCH=powerpc) and Power5 (ARCH=ppc64 and ARCH=powerpc). Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/entry_32.S')
-rw-r--r--arch/powerpc/kernel/entry_32.S25
1 files changed, 12 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index fc9dded9ac04..37b4396ca978 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -199,10 +199,9 @@ _GLOBAL(DoSyscall)
#ifdef SHOW_SYSCALLS
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
- rlwinm r10,r1,0,0,18 /* current_thread_info() */
- lwz r11,TI_LOCAL_FLAGS(r10)
- rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
- stw r11,TI_LOCAL_FLAGS(r10)
+ rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ li r11,0
+ stb r11,TI_SC_NOERR(r10)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
@@ -225,10 +224,10 @@ ret_from_syscall:
mr r6,r3
li r11,-_LAST_ERRNO
cmplw 0,r3,r11
- rlwinm r12,r1,0,0,18 /* current_thread_info() */
+ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
blt+ 30f
- lwz r11,TI_LOCAL_FLAGS(r12)
- andi. r11,r11,_TIFL_FORCE_NOERROR
+ lbz r11,TI_SC_NOERR(r12)
+ cmpwi r11,0
bne 30f
neg r3,r3
lwz r10,_CCR(r1) /* Set SO bit in CR */
@@ -315,7 +314,7 @@ syscall_exit_work:
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
SYNC
MTMSRD(r10) /* disable interrupts again */
- rlwinm r12,r1,0,0,18 /* current_thread_info() */
+ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r9,TI_FLAGS(r12)
5:
andi. r0,r9,_TIF_NEED_RESCHED
@@ -630,7 +629,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
.globl sigreturn_exit
sigreturn_exit:
subi r1,r3,STACK_FRAME_OVERHEAD
- rlwinm r12,r1,0,0,18 /* current_thread_info() */
+ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r9,TI_FLAGS(r12)
andi. r0,r9,_TIF_SYSCALL_T_OR_A
beq+ ret_from_except_full
@@ -657,7 +656,7 @@ ret_from_except:
user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
- rlwinm r9,r1,0,0,18
+ rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
bne do_work
@@ -677,7 +676,7 @@ restore_user:
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel:
/* check current_thread_info->preempt_count */
- rlwinm r9,r1,0,0,18
+ rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
@@ -687,7 +686,7 @@ resume_kernel:
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
1: bl preempt_schedule_irq
- rlwinm r9,r1,0,0,18
+ rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
@@ -889,7 +888,7 @@ recheck:
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
- rlwinm r9,r1,0,0,18
+ rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched
OpenPOWER on IntegriCloud