diff options
Diffstat (limited to 'arch/arm64/include/asm/assembler.h')
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 170 |
1 files changed, 138 insertions, 32 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 3c78835bba94..0bcc98dbba56 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -202,25 +202,15 @@ lr .req x30 // link register /* * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where - * <symbol> is within the range +/- 4 GB of the PC when running - * in core kernel context. In module context, a movz/movk sequence - * is used, since modules may be loaded far away from the kernel - * when KASLR is in effect. + * <symbol> is within the range +/- 4 GB of the PC. */ /* * @dst: destination register (64 bit wide) * @sym: name of the symbol */ .macro adr_l, dst, sym -#ifndef MODULE adrp \dst, \sym add \dst, \dst, :lo12:\sym -#else - movz \dst, #:abs_g3:\sym - movk \dst, #:abs_g2_nc:\sym - movk \dst, #:abs_g1_nc:\sym - movk \dst, #:abs_g0_nc:\sym -#endif .endm /* @@ -231,7 +221,6 @@ lr .req x30 // link register * the address */ .macro ldr_l, dst, sym, tmp= -#ifndef MODULE .ifb \tmp adrp \dst, \sym ldr \dst, [\dst, :lo12:\sym] @@ -239,15 +228,6 @@ lr .req x30 // link register adrp \tmp, \sym ldr \dst, [\tmp, :lo12:\sym] .endif -#else - .ifb \tmp - adr_l \dst, \sym - ldr \dst, [\dst] - .else - adr_l \tmp, \sym - ldr \dst, [\tmp] - .endif -#endif .endm /* @@ -257,28 +237,18 @@ lr .req x30 // link register * while <src> needs to be preserved. */ .macro str_l, src, sym, tmp -#ifndef MODULE adrp \tmp, \sym str \src, [\tmp, :lo12:\sym] -#else - adr_l \tmp, \sym - str \src, [\tmp] -#endif .endm /* - * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for - * non-module code + * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP) * @sym: The name of the per-cpu variable * @tmp: scratch register */ .macro adr_this_cpu, dst, sym, tmp -#ifndef MODULE adrp \tmp, \sym add \dst, \tmp, #:lo12:\sym -#else - adr_l \dst, \sym -#endif alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs \tmp, tpidr_el1 alternative_else @@ -595,4 +565,140 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #endif .endm + /* + * frame_push - Push @regcount callee saved registers to the stack, + * starting at x19, as well as x29/x30, and set x29 to + * the new value of sp. Add @extra bytes of stack space + * for locals. + */ + .macro frame_push, regcount:req, extra + __frame st, \regcount, \extra + .endm + + /* + * frame_pop - Pop the callee saved registers from the stack that were + * pushed in the most recent call to frame_push, as well + * as x29/x30 and any extra stack space that may have been + * allocated. + */ + .macro frame_pop + __frame ld + .endm + + .macro __frame_regs, reg1, reg2, op, num + .if .Lframe_regcount == \num + \op\()r \reg1, [sp, #(\num + 1) * 8] + .elseif .Lframe_regcount > \num + \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8] + .endif + .endm + + .macro __frame, op, regcount, extra=0 + .ifc \op, st + .if (\regcount) < 0 || (\regcount) > 10 + .error "regcount should be in the range [0 ... 10]" + .endif + .if ((\extra) % 16) != 0 + .error "extra should be a multiple of 16 bytes" + .endif + .ifdef .Lframe_regcount + .if .Lframe_regcount != -1 + .error "frame_push/frame_pop may not be nested" + .endif + .endif + .set .Lframe_regcount, \regcount + .set .Lframe_extra, \extra + .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16 + stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]! + mov x29, sp + .endif + + __frame_regs x19, x20, \op, 1 + __frame_regs x21, x22, \op, 3 + __frame_regs x23, x24, \op, 5 + __frame_regs x25, x26, \op, 7 + __frame_regs x27, x28, \op, 9 + + .ifc \op, ld + .if .Lframe_regcount == -1 + .error "frame_push/frame_pop may not be nested" + .endif + ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra + .set .Lframe_regcount, -1 + .endif + .endm + +/* + * Check whether to yield to another runnable task from kernel mode NEON code + * (which runs with preemption disabled). + * + * if_will_cond_yield_neon + * // pre-yield patchup code + * do_cond_yield_neon + * // post-yield patchup code + * endif_yield_neon <label> + * + * where <label> is optional, and marks the point where execution will resume + * after a yield has been performed. If omitted, execution resumes right after + * the endif_yield_neon invocation. Note that the entire sequence, including + * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT + * is not defined. + * + * As a convenience, in the case where no patchup code is required, the above + * sequence may be abbreviated to + * + * cond_yield_neon <label> + * + * Note that the patchup code does not support assembler directives that change + * the output section, any use of such directives is undefined. + * + * The yield itself consists of the following: + * - Check whether the preempt count is exactly 1, in which case disabling + * preemption once will make the task preemptible. If this is not the case, + * yielding is pointless. + * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable + * kernel mode NEON (which will trigger a reschedule), and branch to the + * yield fixup code. + * + * This macro sequence may clobber all CPU state that is not guaranteed by the + * AAPCS to be preserved across an ordinary function call. + */ + + .macro cond_yield_neon, lbl + if_will_cond_yield_neon + do_cond_yield_neon + endif_yield_neon \lbl + .endm + + .macro if_will_cond_yield_neon +#ifdef CONFIG_PREEMPT + get_thread_info x0 + ldr w1, [x0, #TSK_TI_PREEMPT] + ldr x0, [x0, #TSK_TI_FLAGS] + cmp w1, #PREEMPT_DISABLE_OFFSET + csel x0, x0, xzr, eq + tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling? + /* fall through to endif_yield_neon */ + .subsection 1 +.Lyield_\@ : +#else + .section ".discard.cond_yield_neon", "ax" +#endif + .endm + + .macro do_cond_yield_neon + bl kernel_neon_end + bl kernel_neon_begin + .endm + + .macro endif_yield_neon, lbl + .ifnb \lbl + b \lbl + .else + b .Lyield_out_\@ + .endif + .previous +.Lyield_out_\@ : + .endm + #endif /* __ASM_ASSEMBLER_H */ |