summaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/intvec_32.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel/intvec_32.S')
-rw-r--r--arch/tile/kernel/intvec_32.S114
1 files changed, 33 insertions, 81 deletions
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index cb52d66343ed..088d5c141e68 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -28,20 +28,10 @@
#include <arch/interrupts.h>
#include <arch/spr_def.h>
-#ifdef CONFIG_PREEMPT
-# error "No support for kernel preemption currently"
-#endif
-
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
-#if !CHIP_HAS_WH64()
- /* By making this an empty macro, we can use wh64 in the code. */
- .macro wh64 reg
- .endm
-#endif
-
.macro push_reg reg, ptr=sp, delta=-4
{
sw \ptr, \reg
@@ -189,7 +179,7 @@ intvec_\vecname:
* point sp at the top aligned address on the actual stack page.
*/
mfspr r0, SPR_SYSTEM_SAVE_K_0
- mm r0, r0, zero, LOG2_THREAD_SIZE, 31
+ mm r0, r0, zero, LOG2_NR_CPU_IDS, 31
0:
/*
@@ -207,6 +197,9 @@ intvec_\vecname:
* cache line 1: r14...r29
* cache line 0: 2 x frame, r0..r13
*/
+#if STACK_TOP_DELTA != 64
+#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
+#endif
andi r0, r0, -64
/*
@@ -326,18 +319,14 @@ intvec_\vecname:
movei r3, -1 /* not used, but set for consistency */
}
.else
-#if CHIP_HAS_AUX_PERF_COUNTERS()
.ifc \c_routine, op_handle_aux_perf_interrupt
{
mfspr r2, AUX_PERF_COUNT_STS
movei r3, -1 /* not used, but set for consistency */
}
.else
-#endif
movei r3, 0
-#if CHIP_HAS_AUX_PERF_COUNTERS()
.endif
-#endif
.endif
.endif
.endif
@@ -354,7 +343,7 @@ intvec_\vecname:
#ifdef __COLLECT_LINKER_FEEDBACK__
.pushsection .text.intvec_feedback,"ax"
.org (\vecnum << 5)
- FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
+ FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
jrp lr
.popsection
#endif
@@ -468,7 +457,7 @@ intvec_\vecname:
}
{
auli r21, r21, ha16(__per_cpu_offset)
- mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1
+ mm r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
}
s2a r20, r20, r21
lw tp, r20
@@ -562,7 +551,6 @@ intvec_\vecname:
.endif
mtspr INTERRUPT_CRITICAL_SECTION, zero
-#if CHIP_HAS_WH64()
/*
* Prepare the first 256 stack bytes to be rapidly accessible
* without having to fetch the background data. We don't really
@@ -583,7 +571,6 @@ intvec_\vecname:
addi r52, r52, -64
}
wh64 r52
-#endif
#ifdef CONFIG_TRACE_IRQFLAGS
.ifnc \function,handle_nmi
@@ -762,7 +749,7 @@ intvec_\vecname:
.macro dc_dispatch vecnum, vecname
.org (\vecnum << 8)
intvec_\vecname:
- j hv_downcall_dispatch
+ j _hv_downcall_dispatch
ENDPROC(intvec_\vecname)
.endm
@@ -812,17 +799,34 @@ STD_ENTRY(interrupt_return)
}
lw r29, r29
andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
+ bzt r29, .Lresume_userspace
+
+#ifdef CONFIG_PREEMPT
+ /* Returning to kernel space. Check if we need preemption. */
+ GET_THREAD_INFO(r29)
+ addli r28, r29, THREAD_INFO_FLAGS_OFFSET
{
- bzt r29, .Lresume_userspace
- PTREGS_PTR(r29, PTREGS_OFFSET_PC)
+ lw r28, r28
+ addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
}
+ {
+ andi r28, r28, _TIF_NEED_RESCHED
+ lw r29, r29
+ }
+ bzt r28, 1f
+ bnz r29, 1f
+ jal preempt_schedule_irq
+ FEEDBACK_REENTER(interrupt_return)
+1:
+#endif
/* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
{
- lw r28, r29
+ PTREGS_PTR(r29, PTREGS_OFFSET_PC)
moveli r27, lo16(_cpu_idle_nap)
}
{
+ lw r28, r29
auli r27, r27, ha16(_cpu_idle_nap)
}
{
@@ -1420,7 +1424,6 @@ handle_ill:
{
lw r0, r0 /* indirect thru thread_info to get task_info*/
addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
- move r2, zero /* load error code into r2 */
}
jal send_sigtrap /* issue a SIGTRAP */
@@ -1518,12 +1521,10 @@ STD_ENTRY(_sys_clone)
__HEAD
.align 64
/* Align much later jump on the start of a cache line. */
-#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
nop
#if PAGE_SIZE >= 0x10000
nop
#endif
-#endif
ENTRY(sys_cmpxchg)
/*
@@ -1557,45 +1558,6 @@ ENTRY(sys_cmpxchg)
# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
#endif
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
- {
- /* Check for unaligned input. */
- bnz sp, .Lcmpxchg_badaddr
- mm r25, r0, zero, 3, PAGE_SHIFT-1
- }
- {
- crc32_32 r25, zero, r25
- moveli r21, lo16(atomic_lock_ptr)
- }
- {
- auli r21, r21, ha16(atomic_lock_ptr)
- auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
- }
- {
- shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
- slt_u r23, r0, r23
- lw r26, r0 /* see comment in the "#else" for the "lw r26". */
- }
- {
- s2a r21, r20, r21
- bbns r23, .Lcmpxchg_badaddr
- }
- {
- lw r21, r21
- seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
- andi r25, r25, ATOMIC_HASH_L2_SIZE - 1
- }
- {
- /* Branch away at this point if we're doing a 64-bit cmpxchg. */
- bbs r23, .Lcmpxchg64
- andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
- }
- {
- s2a ATOMIC_LOCK_REG_NAME, r25, r21
- j .Lcmpxchg32_tns /* see comment in the #else for the jump. */
- }
-
-#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
{
/* Check for unaligned input. */
bnz sp, .Lcmpxchg_badaddr
@@ -1609,7 +1571,7 @@ ENTRY(sys_cmpxchg)
* Because of C pointer arithmetic, we want to compute this:
*
* ((char*)atomic_locks +
- * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2))
+ * (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
*
* Instead of two shifts we just ">> 1", and use 'mm'
* to ignore the low and high bits we don't want.
@@ -1620,12 +1582,9 @@ ENTRY(sys_cmpxchg)
/*
* Ensure that the TLB is loaded before we take out the lock.
- * On tilepro, this will start fetching the value all the way
- * into our L1 as well (and if it gets modified before we
- * grab the lock, it will be invalidated from our cache
- * before we reload it). On tile64, we'll start fetching it
- * into our L1 if we're the home, and if we're not, we'll
- * still at least start fetching it into the home's L2.
+ * This will start fetching the value all the way into our L1
+ * as well (and if it gets modified before we grab the lock,
+ * it will be invalidated from our cache before we reload it).
*/
lw r26, r0
}
@@ -1668,8 +1627,6 @@ ENTRY(sys_cmpxchg)
j .Lcmpxchg32_tns
}
-#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
/* Symbol for do_page_fault_ics() to use to compare against the PC. */
.global __sys_cmpxchg_grab_lock
__sys_cmpxchg_grab_lock:
@@ -1807,9 +1764,6 @@ __sys_cmpxchg_grab_lock:
.align 64
.Lcmpxchg64:
{
-#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
- s2a ATOMIC_LOCK_REG_NAME, r25, r21
-#endif
bzt r23, .Lcmpxchg64_tns
}
j .Lcmpxchg_badaddr
@@ -1875,8 +1829,8 @@ int_unalign:
push_extra_callee_saves r0
j do_trap
-/* Include .intrpt1 array of interrupt vectors */
- .section ".intrpt1", "ax"
+/* Include .intrpt array of interrupt vectors */
+ .section ".intrpt", "ax"
#define op_handle_perf_interrupt bad_intr
#define op_handle_aux_perf_interrupt bad_intr
@@ -1944,10 +1898,8 @@ int_unalign:
do_page_fault
int_hand INT_SN_CPL, SN_CPL, bad_intr
int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
-#if CHIP_HAS_AUX_PERF_COUNTERS()
int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
op_handle_aux_perf_interrupt, handle_nmi
-#endif
/* Synthetic interrupt delivered only by the simulator */
int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
OpenPOWER on IntegriCloud