diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2014-04-29 15:21:24 +0200 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2014-05-23 15:12:37 +0200 |
commit | 76fbfc318de2eb0eb4823095ece020f999a17c63 (patch) | |
tree | 376004537e00adf3459c1e32a5ba82b5692a6a9f /arch/mips/kernel | |
parent | 377cb1b6c16a17e2560a9288f9c8ed5dcdbeba39 (diff) | |
download | blackbird-op-linux-76fbfc318de2eb0eb4823095ece020f999a17c63.tar.gz blackbird-op-linux-76fbfc318de2eb0eb4823095ece020f999a17c63.zip |
MIPS: Sort out mm_isBranchInstr.
mm_isBranchInstr() did reside in the math emu code even though it logically
is separate and also is used outside the math emu code. In addition GCC 4.9.0
leaves the following unnnecessarily bloated function body for a non-microMIPS
configuration:
<mm_isBranchInstr>:
105c: afa50004 sw a1,4(sp)
1060: afa60008 sw a2,8(sp)
1064: afa7000c sw a3,12(sp)
1068: 03e00008 jr ra
106c: 00001021 move v0,zero
which stores arguments that are never going to be used on the stack frame.
Move mm_isBranchInstr() from cp1emu.c to branch.c, then split mm_isBranchInstr()
into a __mm_isBranchInstr() core and a mm_isBranchInstr() wrapper inline function
which only invokes __mm_isBranchInstr() on microMIPS configurations.
This shaves off 112 bytes off the kernel and improves code flow a bit.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/branch.c | 196 |
1 files changed, 196 insertions, 0 deletions
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d78bf445a9c..84888d9332b9 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -48,6 +48,202 @@ int __isa_exception_epc(struct pt_regs *regs) return epc; } +/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */ +static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7}; + +int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc) +{ + union mips_instruction insn = (union mips_instruction)dec_insn.insn; + int bc_false = 0; + unsigned int fcr31; + unsigned int bit; + + if (!cpu_has_mmips) + return 0; + + switch (insn.mm_i_format.opcode) { + case mm_pool32a_op: + if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == + mm_pool32axf_op) { + switch (insn.mm_i_format.simmediate >> + MM_POOL32A_MINOR_SHIFT) { + case mm_jalr_op: + case mm_jalrhb_op: + case mm_jalrs_op: + case mm_jalrshb_op: + if (insn.mm_i_format.rt != 0) /* Not mm_jr */ + regs->regs[insn.mm_i_format.rt] = + regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + } + } + break; + case mm_pool32i_op: + switch (insn.mm_i_format.rt) { + case mm_bltzals_op: + case mm_bltzal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bltz_op: + if ((long)regs->regs[insn.mm_i_format.rs] < 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bgezals_op: + case mm_bgezal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bgez_op: + if ((long)regs->regs[insn.mm_i_format.rs] >= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_blez_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bgtz_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bc2f_op: + case mm_bc1f_op: + bc_false = 1; + /* Fall through */ + case mm_bc2t_op: + case mm_bc1t_op: + preempt_disable(); + if (is_fpu_owner()) + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + if (bc_false) + fcr31 = ~fcr31; + + bit = (insn.mm_i_format.rs >> 2); + bit += (bit != 0); + bit += 23; + if (fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + } + break; + case mm_pool16c_op: + switch (insn.mm_i_format.rt) { + case mm_jalr16_op: + case mm_jalrs16_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_jr16_op: + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + } + break; + case mm_beqz16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_bnez16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_b16_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc + + (insn.mm_b0_format.simmediate << 1); + return 1; + case mm_beq32_op: + if (regs->regs[insn.mm_i_format.rs] == + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + case mm_bne32_op: + if (regs->regs[insn.mm_i_format.rs] != + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + case mm_jalx32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 28; + *contpc <<= 28; + *contpc |= (insn.j_format.target << 2); + return 1; + case mm_jals32_op: + case mm_jal32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_j32_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 27; + *contpc <<= 27; + *contpc |= (insn.j_format.target << 1); + set_isa16_mode(*contpc); + return 1; + } + return 0; +} + /* * Compute return address and emulate branch in microMIPS mode after an * exception only. It does not handle compact branches/jumps and cannot |