From a79ac14fa68297f9888bc70a10df5ed9b8864e38 Mon Sep 17 00:00:00 2001 From: David Blaikie Date: Fri, 27 Feb 2015 21:17:42 +0000 Subject: [opaque pointer type] Add textual IR support for explicit type parameter to load instruction Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 llvm-svn: 230794 --- llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll | 4 +- llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll | 10 +- llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll | 4 +- llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll | 2 +- llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll | 14 +- llvm/test/CodeGen/Mips/2010-07-20-Switch.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/br1.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll | 8 +- llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll | 48 +- llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll | 4 +- llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll | 40 +- llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll | 10 +- llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll | 30 +- llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll | 6 +- llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll | 10 +- llvm/test/CodeGen/Mips/Fast-ISel/shift.ll | 2 +- llvm/test/CodeGen/Mips/addi.ll | 8 +- llvm/test/CodeGen/Mips/addressing-mode.ll | 4 +- llvm/test/CodeGen/Mips/align16.ll | 4 +- llvm/test/CodeGen/Mips/alloca.ll | 14 +- llvm/test/CodeGen/Mips/alloca16.ll | 44 +- llvm/test/CodeGen/Mips/and1.ll | 4 +- llvm/test/CodeGen/Mips/atomic.ll | 4 +- llvm/test/CodeGen/Mips/atomicops.ll | 6 +- llvm/test/CodeGen/Mips/beqzc.ll | 2 +- llvm/test/CodeGen/Mips/beqzc1.ll | 2 +- llvm/test/CodeGen/Mips/biggot.ll | 2 +- llvm/test/CodeGen/Mips/brconeq.ll | 4 +- llvm/test/CodeGen/Mips/brconeqk.ll | 2 +- llvm/test/CodeGen/Mips/brconeqz.ll | 2 +- llvm/test/CodeGen/Mips/brconge.ll | 6 +- llvm/test/CodeGen/Mips/brcongt.ll | 4 +- llvm/test/CodeGen/Mips/brconle.ll | 6 +- llvm/test/CodeGen/Mips/brconlt.ll | 4 +- llvm/test/CodeGen/Mips/brconne.ll | 4 +- llvm/test/CodeGen/Mips/brconnek.ll | 2 +- llvm/test/CodeGen/Mips/brconnez.ll | 2 +- llvm/test/CodeGen/Mips/brdelayslot.ll | 12 +- llvm/test/CodeGen/Mips/brind.ll | 2 +- .../cconv/arguments-varargs-small-structs-byte.ll | 38 +- ...arguments-varargs-small-structs-combinations.ll | 20 +- ...rguments-varargs-small-structs-multiple-args.ll | 36 +- llvm/test/CodeGen/Mips/cconv/return-float.ll | 4 +- llvm/test/CodeGen/Mips/cconv/return-hard-float.ll | 6 +- llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll | 2 +- .../CodeGen/Mips/cconv/return-hard-struct-f128.ll | 2 +- llvm/test/CodeGen/Mips/cconv/return-struct.ll | 8 +- llvm/test/CodeGen/Mips/cconv/return.ll | 6 +- llvm/test/CodeGen/Mips/cfi_offset.ll | 4 +- llvm/test/CodeGen/Mips/ci2.ll | 2 +- llvm/test/CodeGen/Mips/cmov.ll | 6 +- llvm/test/CodeGen/Mips/cmplarge.ll | 4 +- llvm/test/CodeGen/Mips/const4a.ll | 2 +- llvm/test/CodeGen/Mips/ctlz.ll | 2 +- llvm/test/CodeGen/Mips/disable-tail-merge.ll | 6 +- llvm/test/CodeGen/Mips/div.ll | 4 +- llvm/test/CodeGen/Mips/div_rem.ll | 4 +- llvm/test/CodeGen/Mips/divrem.ll | 4 +- llvm/test/CodeGen/Mips/divu.ll | 4 +- llvm/test/CodeGen/Mips/divu_remu.ll | 4 +- llvm/test/CodeGen/Mips/dsp-patterns.ll | 6 +- llvm/test/CodeGen/Mips/dsp-vec-load-store.ll | 2 +- llvm/test/CodeGen/Mips/eh.ll | 2 +- llvm/test/CodeGen/Mips/emit-big-cst.ll | 2 +- llvm/test/CodeGen/Mips/ex2.ll | 2 +- llvm/test/CodeGen/Mips/extins.ll | 2 +- llvm/test/CodeGen/Mips/f16abs.ll | 4 +- llvm/test/CodeGen/Mips/fastcc.ll | 120 ++--- llvm/test/CodeGen/Mips/fixdfsf.ll | 2 +- llvm/test/CodeGen/Mips/fp-indexed-ls.ll | 18 +- llvm/test/CodeGen/Mips/fp-spill-reload.ll | 16 +- llvm/test/CodeGen/Mips/fp16instrinsmc.ll | 60 +-- llvm/test/CodeGen/Mips/fp16static.ll | 4 +- llvm/test/CodeGen/Mips/fpneeded.ll | 6 +- llvm/test/CodeGen/Mips/fpnotneeded.ll | 2 +- llvm/test/CodeGen/Mips/global-address.ll | 4 +- llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll | 2 +- llvm/test/CodeGen/Mips/gprestore.ll | 6 +- llvm/test/CodeGen/Mips/hf16_1.ll | 80 ++-- llvm/test/CodeGen/Mips/hf16call32.ll | 530 ++++++++++----------- llvm/test/CodeGen/Mips/hf16call32_body.ll | 54 +-- llvm/test/CodeGen/Mips/hf1_body.ll | 2 +- llvm/test/CodeGen/Mips/hfptrcall.ll | 32 +- .../CodeGen/Mips/inlineasm-assembler-directives.ll | 2 +- llvm/test/CodeGen/Mips/inlineasm-operand-code.ll | 6 +- llvm/test/CodeGen/Mips/inlineasm64.ll | 4 +- llvm/test/CodeGen/Mips/internalfunc.ll | 4 +- llvm/test/CodeGen/Mips/jtstat.ll | 2 +- llvm/test/CodeGen/Mips/l3mc.ll | 32 +- llvm/test/CodeGen/Mips/lb1.ll | 4 +- llvm/test/CodeGen/Mips/lbu1.ll | 4 +- llvm/test/CodeGen/Mips/lcb2.ll | 16 +- llvm/test/CodeGen/Mips/lcb3c.ll | 4 +- llvm/test/CodeGen/Mips/lcb4a.ll | 4 +- llvm/test/CodeGen/Mips/lcb5.ll | 32 +- llvm/test/CodeGen/Mips/lh1.ll | 4 +- llvm/test/CodeGen/Mips/lhu1.ll | 4 +- llvm/test/CodeGen/Mips/llcarry.ll | 10 +- llvm/test/CodeGen/Mips/load-store-left-right.ll | 14 +- llvm/test/CodeGen/Mips/machineverifier.ll | 2 +- llvm/test/CodeGen/Mips/mbrsize4a.ll | 2 +- llvm/test/CodeGen/Mips/micromips-addiu.ll | 6 +- llvm/test/CodeGen/Mips/micromips-and16.ll | 4 +- llvm/test/CodeGen/Mips/micromips-andi.ll | 4 +- .../CodeGen/Mips/micromips-compact-branches.ll | 2 +- llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll | 2 +- llvm/test/CodeGen/Mips/micromips-delay-slot.ll | 2 +- llvm/test/CodeGen/Mips/micromips-gp-rc.ll | 2 +- llvm/test/CodeGen/Mips/micromips-jal.ll | 10 +- .../Mips/micromips-load-effective-address.ll | 8 +- llvm/test/CodeGen/Mips/micromips-or16.ll | 4 +- .../CodeGen/Mips/micromips-rdhwr-directives.ll | 2 +- llvm/test/CodeGen/Mips/micromips-shift.ll | 8 +- llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll | 12 +- llvm/test/CodeGen/Mips/micromips-xor16.ll | 4 +- llvm/test/CodeGen/Mips/mips16_32_8.ll | 10 +- llvm/test/CodeGen/Mips/mips16_fpret.ll | 16 +- llvm/test/CodeGen/Mips/mips16ex.ll | 12 +- llvm/test/CodeGen/Mips/mips16fpe.ll | 112 ++--- llvm/test/CodeGen/Mips/mips64-f128-call.ll | 4 +- llvm/test/CodeGen/Mips/mips64-f128.ll | 72 +-- llvm/test/CodeGen/Mips/mips64directive.ll | 2 +- llvm/test/CodeGen/Mips/mips64fpldst.ll | 8 +- llvm/test/CodeGen/Mips/mips64instrs.ll | 8 +- llvm/test/CodeGen/Mips/mips64intldst.ll | 22 +- llvm/test/CodeGen/Mips/mips64sinttofpsf.ll | 2 +- llvm/test/CodeGen/Mips/mipslopat.ll | 4 +- llvm/test/CodeGen/Mips/misha.ll | 8 +- llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll | 4 +- llvm/test/CodeGen/Mips/msa/2r.ll | 24 +- llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf.ll | 32 +- llvm/test/CodeGen/Mips/msa/2rf_exup.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_float_int.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_fq.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_int_float.ll | 20 +- llvm/test/CodeGen/Mips/msa/2rf_tq.ll | 8 +- llvm/test/CodeGen/Mips/msa/3r-a.ll | 192 ++++---- llvm/test/CodeGen/Mips/msa/3r-b.ll | 96 ++-- llvm/test/CodeGen/Mips/msa/3r-c.ll | 80 ++-- llvm/test/CodeGen/Mips/msa/3r-d.ll | 88 ++-- llvm/test/CodeGen/Mips/msa/3r-i.ll | 64 +-- llvm/test/CodeGen/Mips/msa/3r-m.ll | 160 +++---- llvm/test/CodeGen/Mips/msa/3r-p.ll | 32 +- llvm/test/CodeGen/Mips/msa/3r-s.ll | 248 +++++----- llvm/test/CodeGen/Mips/msa/3r-v.ll | 24 +- llvm/test/CodeGen/Mips/msa/3r_4r.ll | 48 +- llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll | 72 +-- llvm/test/CodeGen/Mips/msa/3r_splat.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf.ll | 96 ++-- llvm/test/CodeGen/Mips/msa/3rf_4rf.ll | 24 +- llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll | 48 +- llvm/test/CodeGen/Mips/msa/3rf_exdo.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf_float_int.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf_int_float.ll | 176 +++---- llvm/test/CodeGen/Mips/msa/3rf_q.ll | 16 +- llvm/test/CodeGen/Mips/msa/arithmetic.ll | 176 +++---- llvm/test/CodeGen/Mips/msa/arithmetic_float.ll | 88 ++-- llvm/test/CodeGen/Mips/msa/basic_operations.ll | 72 +-- .../CodeGen/Mips/msa/basic_operations_float.ll | 34 +- llvm/test/CodeGen/Mips/msa/bit.ll | 56 +-- llvm/test/CodeGen/Mips/msa/bitcast.ll | 98 ++-- llvm/test/CodeGen/Mips/msa/bitwise.ll | 310 ++++++------ llvm/test/CodeGen/Mips/msa/compare.ll | 408 ++++++++-------- llvm/test/CodeGen/Mips/msa/compare_float.ll | 156 +++--- llvm/test/CodeGen/Mips/msa/elm_copy.ll | 16 +- llvm/test/CodeGen/Mips/msa/elm_insv.ll | 32 +- llvm/test/CodeGen/Mips/msa/elm_move.ll | 2 +- llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll | 24 +- llvm/test/CodeGen/Mips/msa/frameindex.ll | 46 +- llvm/test/CodeGen/Mips/msa/i10.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5-a.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5-b.ll | 56 +-- llvm/test/CodeGen/Mips/msa/i5-c.ll | 40 +- llvm/test/CodeGen/Mips/msa/i5-m.ll | 32 +- llvm/test/CodeGen/Mips/msa/i5-s.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5_ld_st.ll | 8 +- llvm/test/CodeGen/Mips/msa/i8.ll | 26 +- llvm/test/CodeGen/Mips/msa/inline-asm.ll | 4 +- .../CodeGen/Mips/msa/llvm-stress-s1704963983.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s1935737938.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s2704903805.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s3861334421.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s3926023935.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s3997499501.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s525530439.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s997348632.ll | 22 +- llvm/test/CodeGen/Mips/msa/shuffle.ll | 166 +++---- llvm/test/CodeGen/Mips/msa/spill.ll | 272 +++++------ llvm/test/CodeGen/Mips/msa/vec.ll | 184 +++---- llvm/test/CodeGen/Mips/msa/vecs10.ll | 4 +- llvm/test/CodeGen/Mips/mul.ll | 4 +- llvm/test/CodeGen/Mips/mulll.ll | 4 +- llvm/test/CodeGen/Mips/mulull.ll | 4 +- llvm/test/CodeGen/Mips/nacl-align.ll | 2 +- llvm/test/CodeGen/Mips/nacl-branch-delay.ll | 2 +- llvm/test/CodeGen/Mips/nacl-reserved-regs.ll | 32 +- llvm/test/CodeGen/Mips/neg1.ll | 2 +- llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll | 8 +- llvm/test/CodeGen/Mips/nomips16.ll | 4 +- llvm/test/CodeGen/Mips/not1.ll | 2 +- llvm/test/CodeGen/Mips/o32_cc_byval.ll | 22 +- llvm/test/CodeGen/Mips/o32_cc_vararg.ll | 20 +- llvm/test/CodeGen/Mips/optimize-pic-o0.ll | 6 +- llvm/test/CodeGen/Mips/or1.ll | 4 +- llvm/test/CodeGen/Mips/prevent-hoisting.ll | 12 +- llvm/test/CodeGen/Mips/private.ll | 2 +- llvm/test/CodeGen/Mips/ra-allocatable.ll | 242 +++++----- llvm/test/CodeGen/Mips/rdhwr-directives.ll | 2 +- llvm/test/CodeGen/Mips/rem.ll | 4 +- llvm/test/CodeGen/Mips/remu.ll | 4 +- llvm/test/CodeGen/Mips/s2rem.ll | 4 +- llvm/test/CodeGen/Mips/sb1.ll | 6 +- llvm/test/CodeGen/Mips/sel1c.ll | 4 +- llvm/test/CodeGen/Mips/sel2c.ll | 4 +- llvm/test/CodeGen/Mips/selTBteqzCmpi.ll | 6 +- llvm/test/CodeGen/Mips/selTBtnezCmpi.ll | 6 +- llvm/test/CodeGen/Mips/selTBtnezSlti.ll | 6 +- llvm/test/CodeGen/Mips/select.ll | 12 +- llvm/test/CodeGen/Mips/seleq.ll | 32 +- llvm/test/CodeGen/Mips/seleqk.ll | 24 +- llvm/test/CodeGen/Mips/selgek.ll | 24 +- llvm/test/CodeGen/Mips/selgt.ll | 34 +- llvm/test/CodeGen/Mips/selle.ll | 32 +- llvm/test/CodeGen/Mips/selltk.ll | 24 +- llvm/test/CodeGen/Mips/selne.ll | 32 +- llvm/test/CodeGen/Mips/selnek.ll | 32 +- llvm/test/CodeGen/Mips/selpat.ll | 136 +++--- llvm/test/CodeGen/Mips/seteq.ll | 4 +- llvm/test/CodeGen/Mips/seteqz.ll | 4 +- llvm/test/CodeGen/Mips/setge.ll | 6 +- llvm/test/CodeGen/Mips/setgek.ll | 2 +- llvm/test/CodeGen/Mips/setle.ll | 6 +- llvm/test/CodeGen/Mips/setlt.ll | 4 +- llvm/test/CodeGen/Mips/setltk.ll | 2 +- llvm/test/CodeGen/Mips/setne.ll | 4 +- llvm/test/CodeGen/Mips/setuge.ll | 6 +- llvm/test/CodeGen/Mips/setugt.ll | 4 +- llvm/test/CodeGen/Mips/setule.ll | 6 +- llvm/test/CodeGen/Mips/setult.ll | 4 +- llvm/test/CodeGen/Mips/setultk.ll | 2 +- llvm/test/CodeGen/Mips/sh1.ll | 6 +- llvm/test/CodeGen/Mips/simplebr.ll | 2 +- llvm/test/CodeGen/Mips/sitofp-selectcc-opt.ll | 2 +- llvm/test/CodeGen/Mips/sll1.ll | 4 +- llvm/test/CodeGen/Mips/sll2.ll | 6 +- llvm/test/CodeGen/Mips/small-section-reserve-gp.ll | 2 +- llvm/test/CodeGen/Mips/spill-copy-acreg.ll | 6 +- llvm/test/CodeGen/Mips/sra1.ll | 2 +- llvm/test/CodeGen/Mips/sra2.ll | 4 +- llvm/test/CodeGen/Mips/srl1.ll | 4 +- llvm/test/CodeGen/Mips/srl2.ll | 6 +- llvm/test/CodeGen/Mips/stackcoloring.ll | 4 +- llvm/test/CodeGen/Mips/stchar.ll | 28 +- llvm/test/CodeGen/Mips/stldst.ll | 16 +- llvm/test/CodeGen/Mips/sub1.ll | 2 +- llvm/test/CodeGen/Mips/sub2.ll | 4 +- llvm/test/CodeGen/Mips/tailcall.ll | 20 +- llvm/test/CodeGen/Mips/tls.ll | 6 +- llvm/test/CodeGen/Mips/tls16.ll | 2 +- llvm/test/CodeGen/Mips/tls16_2.ll | 2 +- llvm/test/CodeGen/Mips/uitofp.ll | 2 +- llvm/test/CodeGen/Mips/vector-load-store.ll | 4 +- llvm/test/CodeGen/Mips/vector-setcc.ll | 4 +- llvm/test/CodeGen/Mips/xor1.ll | 4 +- llvm/test/CodeGen/Mips/zeroreg.ll | 8 +- 268 files changed, 3546 insertions(+), 3546 deletions(-) (limited to 'llvm/test/CodeGen/Mips') diff --git a/llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll b/llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll index cbc3ecf5edc..3c6f3809064 100644 --- a/llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll +++ b/llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll @@ -27,8 +27,8 @@ entry: define i32 @A1() nounwind { entry: - load i32* getelementptr (%struct.anon* @foo, i32 0, i32 0), align 8 - load i32* getelementptr (%struct.anon* @foo, i32 0, i32 1), align 4 + load i32, i32* getelementptr (%struct.anon* @foo, i32 0, i32 0), align 8 + load i32, i32* getelementptr (%struct.anon* @foo, i32 0, i32 1), align 4 add i32 %1, %0 ret i32 %2 } diff --git a/llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll b/llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll index ae06ffeb63d..5edba029502 100644 --- a/llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll +++ b/llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll @@ -26,8 +26,8 @@ entry: define void @foo0() nounwind { entry: ; CHECK: addu - %0 = load i32* @gi1, align 4 - %1 = load i32* @gi0, align 4 + %0 = load i32, i32* @gi1, align 4 + %1 = load i32, i32* @gi0, align 4 %2 = tail call i32 asm "addu $0, $1, $2", "=r,r,r"(i32 %0, i32 %1) nounwind store i32 %2, i32* @gi2, align 4 ret void @@ -36,7 +36,7 @@ entry: define void @foo2() nounwind { entry: ; CHECK: neg.s - %0 = load float* @gf1, align 4 + %0 = load float, float* @gf1, align 4 %1 = tail call float asm "neg.s $0, $1", "=f,f"(float %0) nounwind store float %1, float* @gf0, align 4 ret void @@ -45,7 +45,7 @@ entry: define void @foo3() nounwind { entry: ; CHECK: neg.d - %0 = load double* @gd1, align 8 + %0 = load double, double* @gd1, align 8 %1 = tail call double asm "neg.d $0, $1", "=f,f"(double %0) nounwind store double %1, double* @gd0, align 8 ret void @@ -64,7 +64,7 @@ define void @foo4() { entry: %0 = tail call i32 asm sideeffect "ulh $0,16($$sp)\0A\09", "=r,~{$2}"() store i32 %0, i32* @gi2, align 4 - %1 = load float* @gf0, align 4 + %1 = load float, float* @gf0, align 4 %2 = tail call double asm sideeffect "cvt.d.s $0, $1\0A\09", "=f,f,~{$f0}"(float %1) store double %2, double* @gd0, align 8 ret void diff --git a/llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll b/llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll index c41d5213c17..592e574a362 100644 --- a/llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll +++ b/llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll @@ -7,12 +7,12 @@ entry: %retval = alloca double ; [#uses=3] store double 0.000000e+00, double* %retval %r = alloca double ; [#uses=1] - load double* %r ; :0 [#uses=1] + load double, double* %r ; :0 [#uses=1] store double %0, double* %retval br label %return return: ; preds = %entry - load double* %retval ; :1 [#uses=1] + load double, double* %retval ; :1 [#uses=1] ret double %1 } diff --git a/llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll b/llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll index 6e447477289..eaf6ddc911e 100644 --- a/llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll +++ b/llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll @@ -8,7 +8,7 @@ entry: continue.outer: ; preds = %case4, %entry %p.0.ph.rec = phi i32 [ 0, %entry ], [ %indvar.next, %case4 ] ; [#uses=2] %p.0.ph = getelementptr i8, i8* %0, i32 %p.0.ph.rec ; [#uses=1] - %1 = load i8* %p.0.ph ; [#uses=1] + %1 = load i8, i8* %p.0.ph ; [#uses=1] switch i8 %1, label %infloop [ i8 0, label %return.split i8 76, label %case4 diff --git a/llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll b/llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll index 32584e43c74..9cebfcd5012 100644 --- a/llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll +++ b/llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll @@ -13,16 +13,16 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f define double @_erand48_r(%struct._reent* %r, i16* %xseed) nounwind { entry: tail call void @__dorand48( %struct._reent* %r, i16* %xseed ) nounwind - load i16* %xseed, align 2 ; :0 [#uses=1] + load i16, i16* %xseed, align 2 ; :0 [#uses=1] uitofp i16 %0 to double ; :1 [#uses=1] tail call double @ldexp( double %1, i32 -48 ) nounwind ; :2 [#uses=1] getelementptr i16, i16* %xseed, i32 1 ; :3 [#uses=1] - load i16* %3, align 2 ; :4 [#uses=1] + load i16, i16* %3, align 2 ; :4 [#uses=1] uitofp i16 %4 to double ; :5 [#uses=1] tail call double @ldexp( double %5, i32 -32 ) nounwind ; :6 [#uses=1] fadd double %2, %6 ; :7 [#uses=1] getelementptr i16, i16* %xseed, i32 2 ; :8 [#uses=1] - load i16* %8, align 2 ; :9 [#uses=1] + load i16, i16* %8, align 2 ; :9 [#uses=1] uitofp i16 %9 to double ; :10 [#uses=1] tail call double @ldexp( double %10, i32 -16 ) nounwind ; :11 [#uses=1] fadd double %7, %11 ; :12 [#uses=1] @@ -35,18 +35,18 @@ declare double @ldexp(double, i32) define double @erand48(i16* %xseed) nounwind { entry: - load %struct._reent** @_impure_ptr, align 4 ; <%struct._reent*>:0 [#uses=1] + load %struct._reent*, %struct._reent** @_impure_ptr, align 4 ; <%struct._reent*>:0 [#uses=1] tail call void @__dorand48( %struct._reent* %0, i16* %xseed ) nounwind - load i16* %xseed, align 2 ; :1 [#uses=1] + load i16, i16* %xseed, align 2 ; :1 [#uses=1] uitofp i16 %1 to double ; :2 [#uses=1] tail call double @ldexp( double %2, i32 -48 ) nounwind ; :3 [#uses=1] getelementptr i16, i16* %xseed, i32 1 ; :4 [#uses=1] - load i16* %4, align 2 ; :5 [#uses=1] + load i16, i16* %4, align 2 ; :5 [#uses=1] uitofp i16 %5 to double ; :6 [#uses=1] tail call double @ldexp( double %6, i32 -32 ) nounwind ; :7 [#uses=1] fadd double %3, %7 ; :8 [#uses=1] getelementptr i16, i16* %xseed, i32 2 ; :9 [#uses=1] - load i16* %9, align 2 ; :10 [#uses=1] + load i16, i16* %9, align 2 ; :10 [#uses=1] uitofp i16 %10 to double ; :11 [#uses=1] tail call double @ldexp( double %11, i32 -16 ) nounwind ; :12 [#uses=1] fadd double %8, %12 ; :13 [#uses=1] diff --git a/llvm/test/CodeGen/Mips/2010-07-20-Switch.ll b/llvm/test/CodeGen/Mips/2010-07-20-Switch.ll index 5c840775cf9..fd0254e9f5e 100644 --- a/llvm/test/CodeGen/Mips/2010-07-20-Switch.ll +++ b/llvm/test/CodeGen/Mips/2010-07-20-Switch.ll @@ -15,7 +15,7 @@ define i32 @main() nounwind readnone { entry: %x = alloca i32, align 4 ; [#uses=2] store volatile i32 2, i32* %x, align 4 - %0 = load volatile i32* %x, align 4 ; [#uses=1] + %0 = load volatile i32, i32* %x, align 4 ; [#uses=1] ; STATIC-O32: sll $[[R0:[0-9]+]], ${{[0-9]+}}, 2 ; STATIC-O32: lui $[[R1:[0-9]+]], %hi($JTI0_0) ; STATIC-O32: addu $[[R2:[0-9]+]], $[[R0]], $[[R1]] diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/br1.ll b/llvm/test/CodeGen/Mips/Fast-ISel/br1.ll index bc508c86dd0..11842ddc418 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/br1.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/br1.ll @@ -10,7 +10,7 @@ ; Function Attrs: nounwind define void @br() #0 { entry: - %0 = load i32* @b, align 4 + %0 = load i32, i32* @b, align 4 %tobool = icmp eq i32 %0, 0 br i1 %tobool, label %if.end, label %if.then diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll b/llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll index de5f758549d..f80cb82ca4d 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll @@ -84,13 +84,13 @@ entry: ; CHECK-LABEL: cxiiiiconv ; mips32r2-LABEL: cxiiiiconv ; mips32-LABEL: cxiiiiconv - %0 = load i8* @c1, align 1 + %0 = load i8, i8* @c1, align 1 %conv = sext i8 %0 to i32 - %1 = load i8* @uc1, align 1 + %1 = load i8, i8* @uc1, align 1 %conv1 = zext i8 %1 to i32 - %2 = load i16* @s1, align 2 + %2 = load i16, i16* @s1, align 2 %conv2 = sext i16 %2 to i32 - %3 = load i16* @us1, align 2 + %3 = load i16, i16* @us1, align 2 %conv3 = zext i16 %3 to i32 call void @xiiii(i32 %conv, i32 %conv1, i32 %conv2, i32 %conv3) ; CHECK: addu $[[REG_GP:[0-9]+]], ${{[0-9]+}}, ${{[0-9+]}} diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll index 4cbfe00a4e3..72de888b26e 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll @@ -12,8 +12,8 @@ ; Function Attrs: nounwind define void @feq1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp oeq float %0, %1 ; CHECK-LABEL: feq1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -33,8 +33,8 @@ entry: ; Function Attrs: nounwind define void @fne1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp une float %0, %1 ; CHECK-LABEL: fne1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -53,8 +53,8 @@ entry: ; Function Attrs: nounwind define void @flt1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp olt float %0, %1 ; CHECK-LABEL: flt1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -74,8 +74,8 @@ entry: ; Function Attrs: nounwind define void @fgt1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp ogt float %0, %1 ; CHECK-LABEL: fgt1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -94,8 +94,8 @@ entry: ; Function Attrs: nounwind define void @fle1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp ole float %0, %1 ; CHECK-LABEL: fle1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -114,8 +114,8 @@ entry: ; Function Attrs: nounwind define void @fge1() { entry: - %0 = load float* @f1, align 4 - %1 = load float* @f2, align 4 + %0 = load float, float* @f1, align 4 + %1 = load float, float* @f2, align 4 %cmp = fcmp oge float %0, %1 ; CHECK-LABEL: fge1: ; CHECK-DAG: lw $[[REG_F2_GOT:[0-9]+]], %got(f2)(${{[0-9]+}}) @@ -134,8 +134,8 @@ entry: ; Function Attrs: nounwind define void @deq1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp oeq double %0, %1 ; CHECK-LABEL: deq1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -154,8 +154,8 @@ entry: ; Function Attrs: nounwind define void @dne1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp une double %0, %1 ; CHECK-LABEL: dne1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -174,8 +174,8 @@ entry: ; Function Attrs: nounwind define void @dlt1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp olt double %0, %1 ; CHECK-LABEL: dlt1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -194,8 +194,8 @@ entry: ; Function Attrs: nounwind define void @dgt1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp ogt double %0, %1 ; CHECK-LABEL: dgt1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -214,8 +214,8 @@ entry: ; Function Attrs: nounwind define void @dle1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp ole double %0, %1 ; CHECK-LABEL: dle1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) @@ -234,8 +234,8 @@ entry: ; Function Attrs: nounwind define void @dge1() { entry: - %0 = load double* @d1, align 8 - %1 = load double* @d2, align 8 + %0 = load double, double* @d1, align 8 + %1 = load double, double* @d2, align 8 %cmp = fcmp oge double %0, %1 ; CHECK-LABEL: dge1: ; CHECK-DAG: lw $[[REG_D2_GOT:[0-9]+]], %got(d2)(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll index 8b2570ac546..5ac22490ff0 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll @@ -10,7 +10,7 @@ ; Function Attrs: nounwind define void @dv() #0 { entry: - %0 = load float* @f, align 4 + %0 = load float, float* @f, align 4 %conv = fpext float %0 to double ; CHECK: cvt.d.s $f{{[0-9]+}}, $f{{[0-9]+}} store double %conv, double* @d_f, align 8 diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll index 5a2cd78f4c9..a94ef508153 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll @@ -14,7 +14,7 @@ define void @ifv() { entry: ; CHECK-LABEL: .ent ifv - %0 = load float* @f, align 4 + %0 = load float, float* @f, align 4 %conv = fptosi float %0 to i32 ; CHECK: trunc.w.s $f[[REG:[0-9]+]], $f{{[0-9]+}} ; CHECK: mfc1 ${{[0-9]+}}, $f[[REG]] @@ -26,7 +26,7 @@ entry: define void @idv() { entry: ; CHECK-LABEL: .ent idv - %0 = load double* @d, align 8 + %0 = load double, double* @d, align 8 %conv = fptosi double %0 to i32 ; CHECK: trunc.w.d $f[[REG:[0-9]+]], $f{{[0-9]+}} ; CHECK: mfc1 ${{[0-9]+}}, $f[[REG]] diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll b/llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll index f9739e1bdbf..2eec4c3ef54 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll @@ -10,7 +10,7 @@ ; Function Attrs: nounwind define void @fv() #0 { entry: - %0 = load double* @d, align 8 + %0 = load double, double* @d, align 8 %conv = fptrunc double %0 to float ; CHECK: cvt.s.d $f{{[0-9]+}}, $f{{[0-9]+}} store float %conv, float* @f, align 4 diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll b/llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll index d2bca3aa217..670a8d5cfb4 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll @@ -14,8 +14,8 @@ define void @eq() { entry: ; CHECK-LABEL: .ent eq - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp eq i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) @@ -35,8 +35,8 @@ entry: define void @ne() { entry: ; CHECK-LABEL: .ent ne - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp ne i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) @@ -56,8 +56,8 @@ entry: define void @ugt() { entry: ; CHECK-LABEL: .ent ugt - %0 = load i32* @uc, align 4 - %1 = load i32* @ud, align 4 + %0 = load i32, i32* @uc, align 4 + %1 = load i32, i32* @ud, align 4 %cmp = icmp ugt i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}}) @@ -76,8 +76,8 @@ entry: define void @ult() { entry: ; CHECK-LABEL: .ent ult - %0 = load i32* @uc, align 4 - %1 = load i32* @ud, align 4 + %0 = load i32, i32* @uc, align 4 + %1 = load i32, i32* @ud, align 4 %cmp = icmp ult i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}}) @@ -95,8 +95,8 @@ entry: define void @uge() { entry: ; CHECK-LABEL: .ent uge - %0 = load i32* @uc, align 4 - %1 = load i32* @ud, align 4 + %0 = load i32, i32* @uc, align 4 + %1 = load i32, i32* @ud, align 4 %cmp = icmp uge i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}}) @@ -115,8 +115,8 @@ entry: define void @ule() { entry: ; CHECK-LABEL: .ent ule - %0 = load i32* @uc, align 4 - %1 = load i32* @ud, align 4 + %0 = load i32, i32* @uc, align 4 + %1 = load i32, i32* @ud, align 4 %cmp = icmp ule i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_UD_GOT:[0-9+]]], %got(ud)(${{[0-9]+}}) @@ -135,8 +135,8 @@ entry: define void @sgt() { entry: ; CHECK-LABEL: .ent sgt - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp sgt i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) @@ -154,8 +154,8 @@ entry: define void @slt() { entry: ; CHECK-LABEL: .ent slt - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp slt i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) @@ -173,8 +173,8 @@ entry: define void @sge() { entry: ; CHECK-LABEL: .ent sge - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp sge i32 %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @b1, align 4 @@ -193,8 +193,8 @@ entry: define void @sle() { entry: ; CHECK-LABEL: .ent sle - %0 = load i32* @c, align 4 - %1 = load i32* @d, align 4 + %0 = load i32, i32* @c, align 4 + %1 = load i32, i32* @d, align 4 %cmp = icmp sle i32 %0, %1 %conv = zext i1 %cmp to i32 ; CHECK-DAG: lw $[[REG_D_GOT:[0-9+]]], %got(d)(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll b/llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll index c649f61c136..3daf03d681c 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll @@ -21,7 +21,7 @@ target triple = "mips--linux-gnu" ; Function Attrs: nounwind define void @cfoo() #0 { entry: - %0 = load i8* @c2, align 1 + %0 = load i8, i8* @c2, align 1 store i8 %0, i8* @c1, align 1 ; CHECK-LABEL: cfoo: ; CHECK: lbu $[[REGc:[0-9]+]], 0(${{[0-9]+}}) @@ -34,7 +34,7 @@ entry: ; Function Attrs: nounwind define void @sfoo() #0 { entry: - %0 = load i16* @s2, align 2 + %0 = load i16, i16* @s2, align 2 store i16 %0, i16* @s1, align 2 ; CHECK-LABEL: sfoo: ; CHECK: lhu $[[REGs:[0-9]+]], 0(${{[0-9]+}}) @@ -46,7 +46,7 @@ entry: ; Function Attrs: nounwind define void @ifoo() #0 { entry: - %0 = load i32* @i2, align 4 + %0 = load i32, i32* @i2, align 4 store i32 %0, i32* @i1, align 4 ; CHECK-LABEL: ifoo: ; CHECK: lw $[[REGi:[0-9]+]], 0(${{[0-9]+}}) @@ -58,7 +58,7 @@ entry: ; Function Attrs: nounwind define void @ffoo() #0 { entry: - %0 = load float* @f2, align 4 + %0 = load float, float* @f2, align 4 store float %0, float* @f1, align 4 ; CHECK-LABEL: ffoo: ; CHECK: lwc1 $f[[REGf:[0-9]+]], 0(${{[0-9]+}}) @@ -71,7 +71,7 @@ entry: ; Function Attrs: nounwind define void @dfoo() #0 { entry: - %0 = load double* @d2, align 8 + %0 = load double, double* @d2, align 8 store double %0, double* @d1, align 8 ; CHECK-LABEL: dfoo: ; CHECK: ldc1 $f[[REGd:[0-9]+]], 0(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll b/llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll index ca56520fea6..acba132b28e 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll @@ -28,11 +28,11 @@ define void @_Z3b_iv() { entry: ; CHECK-LABEL: .ent _Z3b_iv - %0 = load i8* @b1, align 1 + %0 = load i8, i8* @b1, align 1 %tobool = trunc i8 %0 to i1 %frombool = zext i1 %tobool to i8 store i8 %frombool, i8* @b2, align 1 - %1 = load i8* @b2, align 1 + %1 = load i8, i8* @b2, align 1 %tobool1 = trunc i8 %1 to i1 %conv = zext i1 %tobool1 to i32 store i32 %conv, i32* @i, align 4 @@ -51,10 +51,10 @@ define void @_Z4uc_iv() { entry: ; CHECK-LABEL: .ent _Z4uc_iv - %0 = load i8* @uc1, align 1 + %0 = load i8, i8* @uc1, align 1 %conv = zext i8 %0 to i32 store i32 %conv, i32* @i, align 4 - %1 = load i8* @uc2, align 1 + %1 = load i8, i8* @uc2, align 1 %conv1 = zext i8 %1 to i32 ; CHECK: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) ; CHECK: andi ${{[0-9]+}}, $[[REG1]], 255 @@ -71,10 +71,10 @@ entry: ; mips32r2-LABEL: .ent _Z4sc_iv ; mips32-LABEL: .ent _Z4sc_iv - %0 = load i8* @sc1, align 1 + %0 = load i8, i8* @sc1, align 1 %conv = sext i8 %0 to i32 store i32 %conv, i32* @i, align 4 - %1 = load i8* @sc2, align 1 + %1 = load i8, i8* @sc2, align 1 %conv1 = sext i8 %1 to i32 store i32 %conv1, i32* @j, align 4 ; mips32r2: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) @@ -91,10 +91,10 @@ entry: define void @_Z4us_iv() { entry: ; CHECK-LABEL: .ent _Z4us_iv - %0 = load i16* @us1, align 2 + %0 = load i16, i16* @us1, align 2 %conv = zext i16 %0 to i32 store i32 %conv, i32* @i, align 4 - %1 = load i16* @us2, align 2 + %1 = load i16, i16* @us2, align 2 %conv1 = zext i16 %1 to i32 store i32 %conv1, i32* @j, align 4 ret void @@ -109,10 +109,10 @@ entry: ; mips32r2-LABEL: .ent _Z4ss_iv ; mips32=LABEL: .ent _Z4ss_iv - %0 = load i16* @ss1, align 2 + %0 = load i16, i16* @ss1, align 2 %conv = sext i16 %0 to i32 store i32 %conv, i32* @i, align 4 - %1 = load i16* @ss2, align 2 + %1 = load i16, i16* @ss2, align 2 %conv1 = sext i16 %1 to i32 store i32 %conv1, i32* @j, align 4 ; mips32r2: lhu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) @@ -129,7 +129,7 @@ entry: define void @_Z4b_ssv() { entry: ; CHECK-LABEL: .ent _Z4b_ssv - %0 = load i8* @b2, align 1 + %0 = load i8, i8* @b2, align 1 %tobool = trunc i8 %0 to i1 %conv = zext i1 %tobool to i16 store i16 %conv, i16* @ssi, align 2 @@ -143,10 +143,10 @@ entry: define void @_Z5uc_ssv() { entry: ; CHECK-LABEL: .ent _Z5uc_ssv - %0 = load i8* @uc1, align 1 + %0 = load i8, i8* @uc1, align 1 %conv = zext i8 %0 to i16 store i16 %conv, i16* @ssi, align 2 - %1 = load i8* @uc2, align 1 + %1 = load i8, i8* @uc2, align 1 %conv1 = zext i8 %1 to i16 ; CHECK: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) ; CHECK: andi ${{[0-9]+}}, $[[REG1]], 255 @@ -161,10 +161,10 @@ define void @_Z5sc_ssv() { entry: ; mips32r2-LABEL: .ent _Z5sc_ssv ; mips32-LABEL: .ent _Z5sc_ssv - %0 = load i8* @sc1, align 1 + %0 = load i8, i8* @sc1, align 1 %conv = sext i8 %0 to i16 store i16 %conv, i16* @ssi, align 2 - %1 = load i8* @sc2, align 1 + %1 = load i8, i8* @sc2, align 1 %conv1 = sext i8 %1 to i16 store i16 %conv1, i16* @ssj, align 2 ; mips32r2: lbu $[[REG1:[0-9]+]], 0(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll b/llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll index 3792510633d..db0136244fb 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll @@ -12,7 +12,7 @@ define void @foo() { entry: ; CHECK-LABEL: .ent foo - %0 = load float** @y, align 4 + %0 = load float*, float** @y, align 4 %arrayidx = getelementptr inbounds float, float* %0, i32 64000 store float 5.500000e+00, float* %arrayidx, align 4 ; CHECK: lui $[[REG_FPCONST_INT:[0-9]+]], 16560 @@ -31,9 +31,9 @@ entry: define void @goo() { entry: ; CHECK-LABEL: .ent goo - %0 = load float** @y, align 4 + %0 = load float*, float** @y, align 4 %arrayidx = getelementptr inbounds float, float* %0, i32 64000 - %1 = load float* %arrayidx, align 4 + %1 = load float, float* %arrayidx, align 4 store float %1, float* @result, align 4 ; CHECK-DAG: lw $[[REG_RESULT:[0-9]+]], %got(result)(${{[0-9]+}}) ; CHECK-DAG: lw $[[REG_Y_GOT:[0-9]+]], %got(y)(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll b/llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll index 109a7f6a839..ce0ca347ada 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll @@ -11,7 +11,7 @@ define i32 @reti() { entry: ; CHECK-LABEL: reti: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 ret i32 %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) @@ -25,7 +25,7 @@ entry: define signext i16 @rets() { entry: ; CHECK-LABEL: rets: - %0 = load i16* @s, align 2 + %0 = load i16, i16* @s, align 2 ret i16 %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) @@ -40,7 +40,7 @@ entry: define signext i8 @retc() { entry: ; CHECK-LABEL: retc: - %0 = load i8* @c, align 1 + %0 = load i8, i8* @c, align 1 ret i8 %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) @@ -55,7 +55,7 @@ entry: define float @retf() { entry: ; CHECK-LABEL: retf: - %0 = load float* @f, align 4 + %0 = load float, float* @f, align 4 ret float %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) @@ -69,7 +69,7 @@ entry: define double @retd() { entry: ; CHECK-LABEL: retd: - %0 = load double* @d, align 8 + %0 = load double, double* @d, align 8 ret double %0 ; CHECK: lui $[[REG_GPa:[0-9]+]], %hi(_gp_disp) ; CHECK: addiu $[[REG_GPb:[0-9]+]], $[[REG_GPa]], %lo(_gp_disp) diff --git a/llvm/test/CodeGen/Mips/Fast-ISel/shift.ll b/llvm/test/CodeGen/Mips/Fast-ISel/shift.ll index 18fd5ac32d2..df1c82700d5 100644 --- a/llvm/test/CodeGen/Mips/Fast-ISel/shift.ll +++ b/llvm/test/CodeGen/Mips/Fast-ISel/shift.ll @@ -9,7 +9,7 @@ define i32 @main() nounwind uwtable { entry: %foo = alloca %struct.s, align 4 %0 = bitcast %struct.s* %foo to i32* - %bf.load = load i32* %0, align 4 + %bf.load = load i32, i32* %0, align 4 %bf.lshr = lshr i32 %bf.load, 2 %cmp = icmp ne i32 %bf.lshr, 2 br i1 %cmp, label %if.then, label %if.end diff --git a/llvm/test/CodeGen/Mips/addi.ll b/llvm/test/CodeGen/Mips/addi.ll index 01d409e521d..b6af2ee4568 100644 --- a/llvm/test/CodeGen/Mips/addi.ll +++ b/llvm/test/CodeGen/Mips/addi.ll @@ -8,16 +8,16 @@ define void @foo() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %add = add nsw i32 %0, 5 store i32 %add, i32* @i, align 4 - %1 = load i32* @j, align 4 + %1 = load i32, i32* @j, align 4 %sub = sub nsw i32 %1, 5 store i32 %sub, i32* @j, align 4 - %2 = load i32* @k, align 4 + %2 = load i32, i32* @k, align 4 %add1 = add nsw i32 %2, 10000 store i32 %add1, i32* @k, align 4 - %3 = load i32* @l, align 4 + %3 = load i32, i32* @l, align 4 %sub2 = sub nsw i32 %3, 10000 store i32 %sub2, i32* @l, align 4 ; 16: addiu ${{[0-9]+}}, 5 # 16 bit inst diff --git a/llvm/test/CodeGen/Mips/addressing-mode.ll b/llvm/test/CodeGen/Mips/addressing-mode.ll index e4e3a278d64..81e062062ec 100644 --- a/llvm/test/CodeGen/Mips/addressing-mode.ll +++ b/llvm/test/CodeGen/Mips/addressing-mode.ll @@ -21,9 +21,9 @@ for.body3: %s.120 = phi i32 [ %s.022, %for.cond1.preheader ], [ %add7, %for.body3 ] %j.019 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ] %arrayidx4 = getelementptr inbounds [256 x i32], [256 x i32]* %a, i32 %i.021, i32 %j.019 - %0 = load i32* %arrayidx4, align 4 + %0 = load i32, i32* %arrayidx4, align 4 %arrayidx6 = getelementptr inbounds [256 x i32], [256 x i32]* %b, i32 %i.021, i32 %j.019 - %1 = load i32* %arrayidx6, align 4 + %1 = load i32, i32* %arrayidx6, align 4 %add = add i32 %0, %s.120 %add7 = add i32 %add, %1 %add8 = add nsw i32 %j.019, %m diff --git a/llvm/test/CodeGen/Mips/align16.ll b/llvm/test/CodeGen/Mips/align16.ll index 580a89c5372..f385adfaa04 100644 --- a/llvm/test/CodeGen/Mips/align16.ll +++ b/llvm/test/CodeGen/Mips/align16.ll @@ -15,10 +15,10 @@ entry: %x = alloca i32, align 8 %zz = alloca i32, align 4 %z = alloca i32, align 4 - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %arrayidx = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10 store i32 %0, i32* %arrayidx, align 4 - %1 = load i32* @i, align 4 + %1 = load i32, i32* @i, align 4 store i32 %1, i32* %x, align 8 call void @p(i32* %x) %arrayidx1 = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10 diff --git a/llvm/test/CodeGen/Mips/alloca.ll b/llvm/test/CodeGen/Mips/alloca.ll index 0700ea3ab0f..9f2cef1a3a7 100644 --- a/llvm/test/CodeGen/Mips/alloca.ll +++ b/llvm/test/CodeGen/Mips/alloca.ll @@ -59,23 +59,23 @@ if.end: ; preds = %if.else, %if.then ; CHECK: lw $25, %call16(printf) %.pre-phi = phi i32* [ %2, %if.else ], [ %.pre, %if.then ] - %tmp7 = load i32* %0, align 4 + %tmp7 = load i32, i32* %0, align 4 %arrayidx9 = getelementptr inbounds i8, i8* %tmp1, i32 4 %3 = bitcast i8* %arrayidx9 to i32* - %tmp10 = load i32* %3, align 4 + %tmp10 = load i32, i32* %3, align 4 %arrayidx12 = getelementptr inbounds i8, i8* %tmp1, i32 8 %4 = bitcast i8* %arrayidx12 to i32* - %tmp13 = load i32* %4, align 4 - %tmp16 = load i32* %.pre-phi, align 4 + %tmp13 = load i32, i32* %4, align 4 + %tmp16 = load i32, i32* %.pre-phi, align 4 %arrayidx18 = getelementptr inbounds i8, i8* %tmp1, i32 16 %5 = bitcast i8* %arrayidx18 to i32* - %tmp19 = load i32* %5, align 4 + %tmp19 = load i32, i32* %5, align 4 %arrayidx21 = getelementptr inbounds i8, i8* %tmp1, i32 20 %6 = bitcast i8* %arrayidx21 to i32* - %tmp22 = load i32* %6, align 4 + %tmp22 = load i32, i32* %6, align 4 %arrayidx24 = getelementptr inbounds i8, i8* %tmp1, i32 24 %7 = bitcast i8* %arrayidx24 to i32* - %tmp25 = load i32* %7, align 4 + %tmp25 = load i32, i32* %7, align 4 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str, i32 0, i32 0), i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind ret i32 0 } diff --git a/llvm/test/CodeGen/Mips/alloca16.ll b/llvm/test/CodeGen/Mips/alloca16.ll index 67ec2f9c83e..be8cc740310 100644 --- a/llvm/test/CodeGen/Mips/alloca16.ll +++ b/llvm/test/CodeGen/Mips/alloca16.ll @@ -12,7 +12,7 @@ define void @temp(i32 %foo) nounwind { entry: %foo.addr = alloca i32, align 4 store i32 %foo, i32* %foo.addr, align 4 - %0 = load i32* %foo.addr, align 4 + %0 = load i32, i32* %foo.addr, align 4 store i32 %0, i32* @t, align 4 ret void } @@ -28,46 +28,46 @@ entry: %sssi = alloca i32, align 4 %ip = alloca i32*, align 4 %sssj = alloca i32, align 4 - %0 = load i32* @iiii, align 4 + %0 = load i32, i32* @iiii, align 4 store i32 %0, i32* %sssi, align 4 - %1 = load i32* @kkkk, align 4 + %1 = load i32, i32* @kkkk, align 4 %mul = mul nsw i32 %1, 100 %2 = alloca i8, i32 %mul %3 = bitcast i8* %2 to i32* store i32* %3, i32** %ip, align 4 - %4 = load i32* @jjjj, align 4 + %4 = load i32, i32* @jjjj, align 4 store i32 %4, i32* %sssj, align 4 - %5 = load i32* @jjjj, align 4 - %6 = load i32* @iiii, align 4 - %7 = load i32** %ip, align 4 + %5 = load i32, i32* @jjjj, align 4 + %6 = load i32, i32* @iiii, align 4 + %7 = load i32*, i32** %ip, align 4 %arrayidx = getelementptr inbounds i32, i32* %7, i32 %6 store i32 %5, i32* %arrayidx, align 4 - %8 = load i32* @kkkk, align 4 - %9 = load i32* @jjjj, align 4 - %10 = load i32** %ip, align 4 + %8 = load i32, i32* @kkkk, align 4 + %9 = load i32, i32* @jjjj, align 4 + %10 = load i32*, i32** %ip, align 4 %arrayidx1 = getelementptr inbounds i32, i32* %10, i32 %9 store i32 %8, i32* %arrayidx1, align 4 - %11 = load i32* @iiii, align 4 - %12 = load i32* @kkkk, align 4 - %13 = load i32** %ip, align 4 + %11 = load i32, i32* @iiii, align 4 + %12 = load i32, i32* @kkkk, align 4 + %13 = load i32*, i32** %ip, align 4 %arrayidx2 = getelementptr inbounds i32, i32* %13, i32 %12 store i32 %11, i32* %arrayidx2, align 4 - %14 = load i32** %ip, align 4 + %14 = load i32*, i32** %ip, align 4 %arrayidx3 = getelementptr inbounds i32, i32* %14, i32 25 - %15 = load i32* %arrayidx3, align 4 + %15 = load i32, i32* %arrayidx3, align 4 store i32 %15, i32* @riii, align 4 - %16 = load i32** %ip, align 4 + %16 = load i32*, i32** %ip, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %16, i32 35 - %17 = load i32* %arrayidx4, align 4 + %17 = load i32, i32* %arrayidx4, align 4 store i32 %17, i32* @rjjj, align 4 - %18 = load i32** %ip, align 4 + %18 = load i32*, i32** %ip, align 4 %arrayidx5 = getelementptr inbounds i32, i32* %18, i32 100 - %19 = load i32* %arrayidx5, align 4 + %19 = load i32, i32* %arrayidx5, align 4 store i32 %19, i32* @rkkk, align 4 - %20 = load i32* @t, align 4 - %21 = load i32** %ip, align 4 + %20 = load i32, i32* @t, align 4 + %21 = load i32*, i32** %ip, align 4 %arrayidx6 = getelementptr inbounds i32, i32* %21, i32 %20 - %22 = load i32* %arrayidx6, align 4 + %22 = load i32, i32* %arrayidx6, align 4 ; 16: addiu $sp, -16 call void @temp(i32 %22) ; 16: addiu $sp, 16 diff --git a/llvm/test/CodeGen/Mips/and1.ll b/llvm/test/CodeGen/Mips/and1.ll index 4ff1204fe7a..67aef6724ee 100644 --- a/llvm/test/CodeGen/Mips/and1.ll +++ b/llvm/test/CodeGen/Mips/and1.ll @@ -6,8 +6,8 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 - %1 = load i32* @y, align 4 + %0 = load i32, i32* @x, align 4 + %1 = load i32, i32* @y, align 4 %and = and i32 %0, %1 ; 16: and ${{[0-9]+}}, ${{[0-9]+}} %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %and) diff --git a/llvm/test/CodeGen/Mips/atomic.ll b/llvm/test/CodeGen/Mips/atomic.ll index ccfeb00967e..ccd9b264158 100644 --- a/llvm/test/CodeGen/Mips/atomic.ll +++ b/llvm/test/CodeGen/Mips/atomic.ll @@ -54,7 +54,7 @@ define i32 @AtomicSwap32(i32 signext %newval) nounwind { entry: %newval.addr = alloca i32, align 4 store i32 %newval, i32* %newval.addr, align 4 - %tmp = load i32* %newval.addr, align 4 + %tmp = load i32, i32* %newval.addr, align 4 %0 = atomicrmw xchg i32* @x, i32 %tmp monotonic ret i32 %0 @@ -74,7 +74,7 @@ define i32 @AtomicCmpSwap32(i32 signext %oldval, i32 signext %newval) nounwind { entry: %newval.addr = alloca i32, align 4 store i32 %newval, i32* %newval.addr, align 4 - %tmp = load i32* %newval.addr, align 4 + %tmp = load i32, i32* %newval.addr, align 4 %0 = cmpxchg i32* @x, i32 %oldval, i32 %tmp monotonic monotonic %1 = extractvalue { i32, i1 } %0, 0 ret i32 %1 diff --git a/llvm/test/CodeGen/Mips/atomicops.ll b/llvm/test/CodeGen/Mips/atomicops.ll index c26415233d0..c1093cf1a65 100644 --- a/llvm/test/CodeGen/Mips/atomicops.ll +++ b/llvm/test/CodeGen/Mips/atomicops.ll @@ -18,14 +18,14 @@ entry: store volatile i32 0, i32* %x, align 4 %0 = atomicrmw add i32* %x, i32 1 seq_cst %add.i = add nsw i32 %0, 2 - %1 = load volatile i32* %x, align 4 + %1 = load volatile i32, i32* %x, align 4 %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %add.i, i32 %1) nounwind %pair = cmpxchg i32* %x, i32 1, i32 2 seq_cst seq_cst %2 = extractvalue { i32, i1 } %pair, 0 - %3 = load volatile i32* %x, align 4 + %3 = load volatile i32, i32* %x, align 4 %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %2, i32 %3) nounwind %4 = atomicrmw xchg i32* %x, i32 1 seq_cst - %5 = load volatile i32* %x, align 4 + %5 = load volatile i32, i32* %x, align 4 %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), i32 %4, i32 %5) nounwind ; 16-LABEL: main: ; 16: lw ${{[0-9]+}}, %call16(__sync_synchronize)(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/beqzc.ll b/llvm/test/CodeGen/Mips/beqzc.ll index 4a294c2d817..afb66a915b3 100644 --- a/llvm/test/CodeGen/Mips/beqzc.ll +++ b/llvm/test/CodeGen/Mips/beqzc.ll @@ -6,7 +6,7 @@ ; Function Attrs: nounwind optsize define i32 @main() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 %. = select i1 %cmp, i32 10, i32 55 store i32 %., i32* @j, align 4 diff --git a/llvm/test/CodeGen/Mips/beqzc1.ll b/llvm/test/CodeGen/Mips/beqzc1.ll index 8f929a8e354..fe0dd2a3ce9 100644 --- a/llvm/test/CodeGen/Mips/beqzc1.ll +++ b/llvm/test/CodeGen/Mips/beqzc1.ll @@ -6,7 +6,7 @@ ; Function Attrs: nounwind optsize define i32 @main() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.end diff --git a/llvm/test/CodeGen/Mips/biggot.ll b/llvm/test/CodeGen/Mips/biggot.ll index da287eea6fd..b56ce6ba87b 100644 --- a/llvm/test/CodeGen/Mips/biggot.ll +++ b/llvm/test/CodeGen/Mips/biggot.ll @@ -20,7 +20,7 @@ entry: ; N64: daddu $[[R3:[0-9]+]], $[[R2]], ${{[a-z0-9]+}} ; N64: ld ${{[0-9]+}}, %call_lo(foo0)($[[R3]]) - %0 = load i32* @v0, align 4 + %0 = load i32, i32* @v0, align 4 tail call void @foo0(i32 %0) nounwind ret void } diff --git a/llvm/test/CodeGen/Mips/brconeq.ll b/llvm/test/CodeGen/Mips/brconeq.ll index 613391557ef..f555528bbb6 100644 --- a/llvm/test/CodeGen/Mips/brconeq.ll +++ b/llvm/test/CodeGen/Mips/brconeq.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp eq i32 %0, %1 ; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} ; 16: bteqz $[[LABEL:[0-9A-Ba-b_]+]] diff --git a/llvm/test/CodeGen/Mips/brconeqk.ll b/llvm/test/CodeGen/Mips/brconeqk.ll index 2c0e72dabd2..59edae82e5a 100644 --- a/llvm/test/CodeGen/Mips/brconeqk.ll +++ b/llvm/test/CodeGen/Mips/brconeqk.ll @@ -5,7 +5,7 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 10 br i1 %cmp, label %if.end, label %if.then ; 16: cmpi ${{[0-9]+}}, {{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/brconeqz.ll b/llvm/test/CodeGen/Mips/brconeqz.ll index 5586e7b976d..22c56640752 100644 --- a/llvm/test/CodeGen/Mips/brconeqz.ll +++ b/llvm/test/CodeGen/Mips/brconeqz.ll @@ -5,7 +5,7 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.end, label %if.then ; 16: beqz ${{[0-9]+}}, $[[LABEL:[0-9A-Ba-b_]+]] diff --git a/llvm/test/CodeGen/Mips/brconge.ll b/llvm/test/CodeGen/Mips/brconge.ll index 02f0a633b31..46d19847d9b 100644 --- a/llvm/test/CodeGen/Mips/brconge.ll +++ b/llvm/test/CodeGen/Mips/brconge.ll @@ -8,8 +8,8 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp slt i32 %0, %1 br i1 %cmp, label %if.then, label %if.end @@ -22,7 +22,7 @@ if.then: ; preds = %entry br label %if.end if.end: ; preds = %if.then, %entry - %2 = load i32* @k, align 4 + %2 = load i32, i32* @k, align 4 %cmp1 = icmp slt i32 %0, %2 br i1 %cmp1, label %if.then2, label %if.end3 diff --git a/llvm/test/CodeGen/Mips/brcongt.ll b/llvm/test/CodeGen/Mips/brcongt.ll index 767b51b21b9..cefacb8318b 100644 --- a/llvm/test/CodeGen/Mips/brcongt.ll +++ b/llvm/test/CodeGen/Mips/brcongt.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i32* @i, align 4 - %1 = load i32* @j, align 4 + %0 = load i32, i32* @i, align 4 + %1 = load i32, i32* @j, align 4 %cmp = icmp sgt i32 %0, %1 br i1 %cmp, label %if.end, label %if.then ; 16: slt ${{[0-9]+}}, ${{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/brconle.ll b/llvm/test/CodeGen/Mips/brconle.ll index 854b2481c6e..e1f15ecb6b9 100644 --- a/llvm/test/CodeGen/Mips/brconle.ll +++ b/llvm/test/CodeGen/Mips/brconle.ll @@ -8,8 +8,8 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @i, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @i, align 4 %cmp = icmp sgt i32 %0, %1 br i1 %cmp, label %if.then, label %if.end @@ -22,7 +22,7 @@ if.then: ; preds = %entry br label %if.end if.end: ; preds = %if.then, %entry - %2 = load i32* @k, align 4 + %2 = load i32, i32* @k, align 4 %cmp1 = icmp sgt i32 %1, %2 br i1 %cmp1, label %if.then2, label %if.end3 diff --git a/llvm/test/CodeGen/Mips/brconlt.ll b/llvm/test/CodeGen/Mips/brconlt.ll index 931a3e8c7ba..049f35c393f 100644 --- a/llvm/test/CodeGen/Mips/brconlt.ll +++ b/llvm/test/CodeGen/Mips/brconlt.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @i, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @i, align 4 %cmp = icmp slt i32 %0, %1 br i1 %cmp, label %if.end, label %if.then diff --git a/llvm/test/CodeGen/Mips/brconne.ll b/llvm/test/CodeGen/Mips/brconne.ll index 5d5bde3fcf9..b260320b94e 100644 --- a/llvm/test/CodeGen/Mips/brconne.ll +++ b/llvm/test/CodeGen/Mips/brconne.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 - %1 = load i32* @i, align 4 + %0 = load i32, i32* @j, align 4 + %1 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %if.end ; 16: cmp ${{[0-9]+}}, ${{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/brconnek.ll b/llvm/test/CodeGen/Mips/brconnek.ll index 6208d7c5a04..778a5cce72b 100644 --- a/llvm/test/CodeGen/Mips/brconnek.ll +++ b/llvm/test/CodeGen/Mips/brconnek.ll @@ -5,7 +5,7 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 + %0 = load i32, i32* @j, align 4 %cmp = icmp eq i32 %0, 5 br i1 %cmp, label %if.then, label %if.end diff --git a/llvm/test/CodeGen/Mips/brconnez.ll b/llvm/test/CodeGen/Mips/brconnez.ll index 47db7901b51..754714b21da 100644 --- a/llvm/test/CodeGen/Mips/brconnez.ll +++ b/llvm/test/CodeGen/Mips/brconnez.ll @@ -5,7 +5,7 @@ define void @test() nounwind { entry: - %0 = load i32* @j, align 4 + %0 = load i32, i32* @j, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.end diff --git a/llvm/test/CodeGen/Mips/brdelayslot.ll b/llvm/test/CodeGen/Mips/brdelayslot.ll index bcaba797ce4..0f46619b827 100644 --- a/llvm/test/CodeGen/Mips/brdelayslot.ll +++ b/llvm/test/CodeGen/Mips/brdelayslot.ll @@ -54,18 +54,18 @@ declare void @foo4(double) define void @foo5(i32 %a) nounwind { entry: - %0 = load i32* @g2, align 4 + %0 = load i32, i32* @g2, align 4 %tobool = icmp eq i32 %a, 0 br i1 %tobool, label %if.else, label %if.then if.then: - %1 = load i32* @g1, align 4 + %1 = load i32, i32* @g1, align 4 %add = add nsw i32 %1, %0 store i32 %add, i32* @g1, align 4 br label %if.end if.else: - %2 = load i32* @g3, align 4 + %2 = load i32, i32* @g3, align 4 %sub = sub nsw i32 %2, %0 store i32 %sub, i32* @g3, align 4 br label %if.end @@ -99,9 +99,9 @@ declare void @foo7(double, float) define i32 @foo8(i32 %a) nounwind { entry: store i32 %a, i32* @g1, align 4 - %0 = load void ()** @foo9, align 4 + %0 = load void ()*, void ()** @foo9, align 4 tail call void %0() nounwind - %1 = load i32* @g1, align 4 + %1 = load i32, i32* @g1, align 4 %add = add nsw i32 %1, %a ret i32 %add } @@ -145,7 +145,7 @@ for.body: ; preds = %entry, %for.body %s.06 = phi i32 [ %add, %for.body ], [ 0, %entry ] %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ] %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.05 - %0 = load i32* %arrayidx, align 4 + %0 = load i32, i32* %arrayidx, align 4 %add = add nsw i32 %0, %s.06 %inc = add nsw i32 %i.05, 1 %exitcond = icmp eq i32 %inc, %n diff --git a/llvm/test/CodeGen/Mips/brind.ll b/llvm/test/CodeGen/Mips/brind.ll index 970dd991817..8aee61e1408 100644 --- a/llvm/test/CodeGen/Mips/brind.ll +++ b/llvm/test/CodeGen/Mips/brind.ll @@ -27,7 +27,7 @@ L3: ; preds = %L2, %L3 %puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str7, i32 0, i32 0)) %inc = add i32 %i.2, 1 %arrayidx = getelementptr inbounds [5 x i8*], [5 x i8*]* @main.L, i32 0, i32 %i.2 - %0 = load i8** %arrayidx, align 4 + %0 = load i8*, i8** %arrayidx, align 4 indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4] ; 16: jrc ${{[0-9]+}} L4: ; preds = %L3 diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll index d1f07a6cf01..1087e53e767 100644 --- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll +++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll @@ -140,10 +140,10 @@ define void @smallStruct_1b(%struct.SmallStruct_1b* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_1b*, align 8 store %struct.SmallStruct_1b* %ss, %struct.SmallStruct_1b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_1b* %0 to { i8 }* %2 = getelementptr { i8 }, { i8 }* %1, i32 0, i32 0 - %3 = load i8* %2, align 1 + %3 = load i8, i8* %2, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %3) ret void ; CHECK-LABEL: smallStruct_1b: @@ -154,10 +154,10 @@ define void @smallStruct_2b(%struct.SmallStruct_2b* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_2b*, align 8 store %struct.SmallStruct_2b* %ss, %struct.SmallStruct_2b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_2b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_2b*, %struct.SmallStruct_2b** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_2b* %0 to { i16 }* %2 = getelementptr { i16 }, { i16 }* %1, i32 0, i32 0 - %3 = load i16* %2, align 1 + %3 = load i16, i16* %2, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i16 inreg %3) ret void ; CHECK-LABEL: smallStruct_2b: @@ -169,12 +169,12 @@ entry: %ss.addr = alloca %struct.SmallStruct_3b*, align 8 %.coerce = alloca { i24 } store %struct.SmallStruct_3b* %ss, %struct.SmallStruct_3b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_3b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_3b*, %struct.SmallStruct_3b** %ss.addr, align 8 %1 = bitcast { i24 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_3b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i32 0, i1 false) %3 = getelementptr { i24 }, { i24 }* %.coerce, i32 0, i32 0 - %4 = load i24* %3, align 1 + %4 = load i24, i24* %3, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i24 inreg %4) ret void ; CHECK-LABEL: smallStruct_3b: @@ -187,10 +187,10 @@ define void @smallStruct_4b(%struct.SmallStruct_4b* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_4b*, align 8 store %struct.SmallStruct_4b* %ss, %struct.SmallStruct_4b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_4b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_4b*, %struct.SmallStruct_4b** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_4b* %0 to { i32 }* %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0 - %3 = load i32* %2, align 1 + %3 = load i32, i32* %2, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3) ret void ; CHECK-LABEL: smallStruct_4b: @@ -202,12 +202,12 @@ entry: %ss.addr = alloca %struct.SmallStruct_5b*, align 8 %.coerce = alloca { i40 } store %struct.SmallStruct_5b* %ss, %struct.SmallStruct_5b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_5b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_5b*, %struct.SmallStruct_5b** %ss.addr, align 8 %1 = bitcast { i40 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_5b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i32 0, i1 false) %3 = getelementptr { i40 }, { i40 }* %.coerce, i32 0, i32 0 - %4 = load i40* %3, align 1 + %4 = load i40, i40* %3, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i40 inreg %4) ret void ; CHECK-LABEL: smallStruct_5b: @@ -219,12 +219,12 @@ entry: %ss.addr = alloca %struct.SmallStruct_6b*, align 8 %.coerce = alloca { i48 } store %struct.SmallStruct_6b* %ss, %struct.SmallStruct_6b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_6b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_6b*, %struct.SmallStruct_6b** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_6b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 - %4 = load i48* %3, align 1 + %4 = load i48, i48* %3, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) ret void ; CHECK-LABEL: smallStruct_6b: @@ -236,12 +236,12 @@ entry: %ss.addr = alloca %struct.SmallStruct_7b*, align 8 %.coerce = alloca { i56 } store %struct.SmallStruct_7b* %ss, %struct.SmallStruct_7b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_7b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_7b*, %struct.SmallStruct_7b** %ss.addr, align 8 %1 = bitcast { i56 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_7b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i32 0, i1 false) %3 = getelementptr { i56 }, { i56 }* %.coerce, i32 0, i32 0 - %4 = load i56* %3, align 1 + %4 = load i56, i56* %3, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i56 inreg %4) ret void ; CHECK-LABEL: smallStruct_7b: @@ -252,10 +252,10 @@ define void @smallStruct_8b(%struct.SmallStruct_8b* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_8b*, align 8 store %struct.SmallStruct_8b* %ss, %struct.SmallStruct_8b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_8b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_8b*, %struct.SmallStruct_8b** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_8b* %0 to { i64 }* %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0 - %3 = load i64* %2, align 1 + %3 = load i64, i64* %2, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) ret void ; CHECK-LABEL: smallStruct_8b: @@ -267,14 +267,14 @@ entry: %ss.addr = alloca %struct.SmallStruct_9b*, align 8 %.coerce = alloca { i64, i8 } store %struct.SmallStruct_9b* %ss, %struct.SmallStruct_9b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_9b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_9b*, %struct.SmallStruct_9b** %ss.addr, align 8 %1 = bitcast { i64, i8 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_9b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i32 0, i1 false) %3 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 0 - %4 = load i64* %3, align 1 + %4 = load i64, i64* %3, align 1 %5 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 1 - %6 = load i8* %5, align 1 + %6 = load i8, i8* %5, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %4, i8 inreg %6) ret void ; CHECK-LABEL: smallStruct_9b: diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll index c5e4e9307f0..674adcc0dba 100644 --- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll +++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll @@ -74,10 +74,10 @@ define void @smallStruct_1b1s(%struct.SmallStruct_1b1s* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_1b1s*, align 8 store %struct.SmallStruct_1b1s* %ss, %struct.SmallStruct_1b1s** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1b1s** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1b1s*, %struct.SmallStruct_1b1s** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_1b1s* %0 to { i32 }* %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0 - %3 = load i32* %2, align 1 + %3 = load i32, i32* %2, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3) ret void ; CHECK-LABEL: smallStruct_1b1s: @@ -88,10 +88,10 @@ define void @smallStruct_1b1i(%struct.SmallStruct_1b1i* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_1b1i*, align 8 store %struct.SmallStruct_1b1i* %ss, %struct.SmallStruct_1b1i** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1b1i** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1b1i*, %struct.SmallStruct_1b1i** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_1b1i* %0 to { i64 }* %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0 - %3 = load i64* %2, align 1 + %3 = load i64, i64* %2, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) ret void ; CHECK-LABEL: smallStruct_1b1i: @@ -103,12 +103,12 @@ entry: %ss.addr = alloca %struct.SmallStruct_1b1s1b*, align 8 %.coerce = alloca { i48 } store %struct.SmallStruct_1b1s1b* %ss, %struct.SmallStruct_1b1s1b** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1b1s1b** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1b1s1b*, %struct.SmallStruct_1b1s1b** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_1b1s1b* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 - %4 = load i48* %3, align 1 + %4 = load i48, i48* %3, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) ret void ; CHECK-LABEL: smallStruct_1b1s1b: @@ -121,10 +121,10 @@ define void @smallStruct_1s1i(%struct.SmallStruct_1s1i* %ss) #0 { entry: %ss.addr = alloca %struct.SmallStruct_1s1i*, align 8 store %struct.SmallStruct_1s1i* %ss, %struct.SmallStruct_1s1i** %ss.addr, align 8 - %0 = load %struct.SmallStruct_1s1i** %ss.addr, align 8 + %0 = load %struct.SmallStruct_1s1i*, %struct.SmallStruct_1s1i** %ss.addr, align 8 %1 = bitcast %struct.SmallStruct_1s1i* %0 to { i64 }* %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0 - %3 = load i64* %2, align 1 + %3 = load i64, i64* %2, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3) ret void ; CHECK-LABEL: smallStruct_1s1i: @@ -136,12 +136,12 @@ entry: %ss.addr = alloca %struct.SmallStruct_3b1s*, align 8 %.coerce = alloca { i48 } store %struct.SmallStruct_3b1s* %ss, %struct.SmallStruct_3b1s** %ss.addr, align 8 - %0 = load %struct.SmallStruct_3b1s** %ss.addr, align 8 + %0 = load %struct.SmallStruct_3b1s*, %struct.SmallStruct_3b1s** %ss.addr, align 8 %1 = bitcast { i48 }* %.coerce to i8* %2 = bitcast %struct.SmallStruct_3b1s* %0 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false) %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0 - %4 = load i48* %3, align 1 + %4 = load i48, i48* %3, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4) ret void ; CHECK-LABEL: smallStruct_3b1s: diff --git a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll index a9e85632f1d..224235863f9 100644 --- a/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll +++ b/llvm/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll @@ -110,42 +110,42 @@ entry: store %struct.SmallStruct_1b* %ss7, %struct.SmallStruct_1b** %ss7.addr, align 8 store %struct.SmallStruct_1b* %ss8, %struct.SmallStruct_1b** %ss8.addr, align 8 store %struct.SmallStruct_1b* %ss9, %struct.SmallStruct_1b** %ss9.addr, align 8 - %0 = load %struct.SmallStruct_1b** %ss1.addr, align 8 - %1 = load %struct.SmallStruct_1b** %ss2.addr, align 8 - %2 = load %struct.SmallStruct_1b** %ss3.addr, align 8 - %3 = load %struct.SmallStruct_1b** %ss4.addr, align 8 - %4 = load %struct.SmallStruct_1b** %ss5.addr, align 8 - %5 = load %struct.SmallStruct_1b** %ss6.addr, align 8 - %6 = load %struct.SmallStruct_1b** %ss7.addr, align 8 - %7 = load %struct.SmallStruct_1b** %ss8.addr, align 8 - %8 = load %struct.SmallStruct_1b** %ss9.addr, align 8 + %0 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss1.addr, align 8 + %1 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss2.addr, align 8 + %2 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss3.addr, align 8 + %3 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss4.addr, align 8 + %4 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss5.addr, align 8 + %5 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss6.addr, align 8 + %6 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss7.addr, align 8 + %7 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss8.addr, align 8 + %8 = load %struct.SmallStruct_1b*, %struct.SmallStruct_1b** %ss9.addr, align 8 %9 = bitcast %struct.SmallStruct_1b* %0 to { i8 }* %10 = getelementptr { i8 }, { i8 }* %9, i32 0, i32 0 - %11 = load i8* %10, align 1 + %11 = load i8, i8* %10, align 1 %12 = bitcast %struct.SmallStruct_1b* %1 to { i8 }* %13 = getelementptr { i8 }, { i8 }* %12, i32 0, i32 0 - %14 = load i8* %13, align 1 + %14 = load i8, i8* %13, align 1 %15 = bitcast %struct.SmallStruct_1b* %2 to { i8 }* %16 = getelementptr { i8 }, { i8 }* %15, i32 0, i32 0 - %17 = load i8* %16, align 1 + %17 = load i8, i8* %16, align 1 %18 = bitcast %struct.SmallStruct_1b* %3 to { i8 }* %19 = getelementptr { i8 }, { i8 }* %18, i32 0, i32 0 - %20 = load i8* %19, align 1 + %20 = load i8, i8* %19, align 1 %21 = bitcast %struct.SmallStruct_1b* %4 to { i8 }* %22 = getelementptr { i8 }, { i8 }* %21, i32 0, i32 0 - %23 = load i8* %22, align 1 + %23 = load i8, i8* %22, align 1 %24 = bitcast %struct.SmallStruct_1b* %5 to { i8 }* %25 = getelementptr { i8 }, { i8 }* %24, i32 0, i32 0 - %26 = load i8* %25, align 1 + %26 = load i8, i8* %25, align 1 %27 = bitcast %struct.SmallStruct_1b* %6 to { i8 }* %28 = getelementptr { i8 }, { i8 }* %27, i32 0, i32 0 - %29 = load i8* %28, align 1 + %29 = load i8, i8* %28, align 1 %30 = bitcast %struct.SmallStruct_1b* %7 to { i8 }* %31 = getelementptr { i8 }, { i8 }* %30, i32 0, i32 0 - %32 = load i8* %31, align 1 + %32 = load i8, i8* %31, align 1 %33 = bitcast %struct.SmallStruct_1b* %8 to { i8 }* %34 = getelementptr { i8 }, { i8 }* %33, i32 0, i32 0 - %35 = load i8* %34, align 1 + %35 = load i8, i8* %34, align 1 call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %11, i8 inreg %14, i8 inreg %17, i8 inreg %20, i8 inreg %23, i8 inreg %26, i8 inreg %29, i8 inreg %32, i8 inreg %35) ret void ; CHECK-LABEL: smallStruct_1b_x9: diff --git a/llvm/test/CodeGen/Mips/cconv/return-float.ll b/llvm/test/CodeGen/Mips/cconv/return-float.ll index 8c4c31c97c1..4355a55232e 100644 --- a/llvm/test/CodeGen/Mips/cconv/return-float.ll +++ b/llvm/test/CodeGen/Mips/cconv/return-float.ll @@ -21,7 +21,7 @@ define float @retfloat() nounwind { entry: - %0 = load volatile float* @float + %0 = load volatile float, float* @float ret float %0 } @@ -35,7 +35,7 @@ entry: define double @retdouble() nounwind { entry: - %0 = load volatile double* @double + %0 = load volatile double, double* @double ret double %0 } diff --git a/llvm/test/CodeGen/Mips/cconv/return-hard-float.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-float.ll index f0aeb1273cb..14853c8ca6f 100644 --- a/llvm/test/CodeGen/Mips/cconv/return-hard-float.ll +++ b/llvm/test/CodeGen/Mips/cconv/return-hard-float.ll @@ -24,7 +24,7 @@ define float @retfloat() nounwind { entry: - %0 = load volatile float* @float + %0 = load volatile float, float* @float ret float %0 } @@ -38,7 +38,7 @@ entry: define double @retdouble() nounwind { entry: - %0 = load volatile double* @double + %0 = load volatile double, double* @double ret double %0 } @@ -50,7 +50,7 @@ entry: define { double, double } @retComplexDouble() #0 { %retval = alloca { double, double }, align 8 - %1 = load { double, double }* %retval + %1 = load { double, double }, { double, double }* %retval ret { double, double } %1 } diff --git a/llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll index 05dacfeba54..34e9647acdd 100644 --- a/llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll +++ b/llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll @@ -13,7 +13,7 @@ define fp128 @retldouble() nounwind { entry: - %0 = load volatile fp128* @fp128 + %0 = load volatile fp128, fp128* @fp128 ret fp128 %0 } diff --git a/llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll b/llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll index 4ce26b1e0fa..c4c8f10ca3b 100644 --- a/llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll +++ b/llvm/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll @@ -10,7 +10,7 @@ define inreg {fp128} @ret_struct_fp128() nounwind { entry: - %0 = load volatile {fp128}* @struct_fp128 + %0 = load volatile {fp128}, {fp128}* @struct_fp128 ret {fp128} %0 } diff --git a/llvm/test/CodeGen/Mips/cconv/return-struct.ll b/llvm/test/CodeGen/Mips/cconv/return-struct.ll index 3d591df605f..68af9e3f89d 100644 --- a/llvm/test/CodeGen/Mips/cconv/return-struct.ll +++ b/llvm/test/CodeGen/Mips/cconv/return-struct.ll @@ -22,7 +22,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i define inreg {i8} @ret_struct_i8() nounwind { entry: - %0 = load volatile {i8}* @struct_byte + %0 = load volatile {i8}, {i8}* @struct_byte ret {i8} %0 } @@ -54,7 +54,7 @@ entry: %0 = bitcast {i8,i8}* %retval to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* getelementptr inbounds ({i8,i8}* @struct_2byte, i32 0, i32 0), i64 2, i32 1, i1 false) %1 = bitcast {i8,i8}* %retval to {i16}* - %2 = load volatile {i16}* %1 + %2 = load volatile {i16}, {i16}* %1 ret {i16} %2 } @@ -91,7 +91,7 @@ entry: ; missed by the CCPromoteToType and the shift didn't happen. define inreg {i48} @ret_struct_3xi16() nounwind { entry: - %0 = load volatile i48* bitcast ({[3 x i16]}* @struct_3xi16 to i48*), align 2 + %0 = load volatile i48, i48* bitcast ({[3 x i16]}* @struct_3xi16 to i48*), align 2 %1 = insertvalue {i48} undef, i48 %0, 0 ret {i48} %1 } @@ -174,7 +174,7 @@ entry: ; This time we let the backend lower the sret argument. define {[6 x i32]} @ret_struct_6xi32() { entry: - %0 = load volatile {[6 x i32]}* @struct_6xi32, align 2 + %0 = load volatile {[6 x i32]}, {[6 x i32]}* @struct_6xi32, align 2 ret {[6 x i32]} %0 } diff --git a/llvm/test/CodeGen/Mips/cconv/return.ll b/llvm/test/CodeGen/Mips/cconv/return.ll index 516026d6cee..a5376727543 100644 --- a/llvm/test/CodeGen/Mips/cconv/return.ll +++ b/llvm/test/CodeGen/Mips/cconv/return.ll @@ -24,7 +24,7 @@ define i8 @reti8() nounwind { entry: - %0 = load volatile i8* @byte + %0 = load volatile i8, i8* @byte ret i8 %0 } @@ -38,7 +38,7 @@ entry: define i32 @reti32() nounwind { entry: - %0 = load volatile i32* @word + %0 = load volatile i32, i32* @word ret i32 %0 } @@ -52,7 +52,7 @@ entry: define i64 @reti64() nounwind { entry: - %0 = load volatile i64* @dword + %0 = load volatile i64, i64* @dword ret i64 %0 } diff --git a/llvm/test/CodeGen/Mips/cfi_offset.ll b/llvm/test/CodeGen/Mips/cfi_offset.ll index e23855bd65d..6e783447bb0 100644 --- a/llvm/test/CodeGen/Mips/cfi_offset.ll +++ b/llvm/test/CodeGen/Mips/cfi_offset.ll @@ -32,8 +32,8 @@ define void @bar() { ; CHECK: .cfi_offset 31, -20 ; CHECK: .cfi_offset 16, -24 - %val1 = load volatile double* @var - %val2 = load volatile double* @var + %val1 = load volatile double, double* @var + %val2 = load volatile double, double* @var call void (...)* @foo() nounwind store volatile double %val1, double* @var store volatile double %val2, double* @var diff --git a/llvm/test/CodeGen/Mips/ci2.ll b/llvm/test/CodeGen/Mips/ci2.ll index e2068fdf14e..63ed68387a2 100644 --- a/llvm/test/CodeGen/Mips/ci2.ll +++ b/llvm/test/CodeGen/Mips/ci2.ll @@ -8,7 +8,7 @@ define void @foo() #0 { entry: store i32 305419896, i32* @i, align 4 - %0 = load i32* @b, align 4 + %0 = load i32, i32* @b, align 4 %tobool = icmp ne i32 %0, 0 br i1 %tobool, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/Mips/cmov.ll b/llvm/test/CodeGen/Mips/cmov.ll index b12c2df97c1..b018f28cd66 100644 --- a/llvm/test/CodeGen/Mips/cmov.ll +++ b/llvm/test/CodeGen/Mips/cmov.ll @@ -41,7 +41,7 @@ define i32* @cmov1(i32 signext %s) nounwind readonly { entry: %tobool = icmp ne i32 %s, 0 - %tmp1 = load i32** @i3, align 4 + %tmp1 = load i32*, i32** @i3, align 4 %cond = select i1 %tobool, i32* getelementptr inbounds ([3 x i32]* @i1, i32 0, i32 0), i32* %tmp1 ret i32* %cond } @@ -81,8 +81,8 @@ entry: define i32 @cmov2(i32 signext %s) nounwind readonly { entry: %tobool = icmp ne i32 %s, 0 - %tmp1 = load i32* @c, align 4 - %tmp2 = load i32* @d, align 4 + %tmp1 = load i32, i32* @c, align 4 + %tmp2 = load i32, i32* @d, align 4 %cond = select i1 %tobool, i32 %tmp1, i32 %tmp2 ret i32 %cond } diff --git a/llvm/test/CodeGen/Mips/cmplarge.ll b/llvm/test/CodeGen/Mips/cmplarge.ll index 43fc10dda68..79019065a90 100644 --- a/llvm/test/CodeGen/Mips/cmplarge.ll +++ b/llvm/test/CodeGen/Mips/cmplarge.ll @@ -10,7 +10,7 @@ target triple = "mipsel--linux-gnu" define void @getSubImagesLuma(%struct.StorablePicture* nocapture %s) #0 { entry: %size_y = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %s, i32 0, i32 1 - %0 = load i32* %size_y, align 4 + %0 = load i32, i32* %size_y, align 4 %sub = add nsw i32 %0, -1 %add5 = add nsw i32 %0, 20 %cmp6 = icmp sgt i32 %add5, -20 @@ -20,7 +20,7 @@ for.body: ; preds = %entry, %for.body %j.07 = phi i32 [ %inc, %for.body ], [ -20, %entry ] %call = tail call i32 bitcast (i32 (...)* @iClip3 to i32 (i32, i32, i32)*)(i32 0, i32 %sub, i32 %j.07) #2 %inc = add nsw i32 %j.07, 1 - %1 = load i32* %size_y, align 4 + %1 = load i32, i32* %size_y, align 4 %add = add nsw i32 %1, 20 %cmp = icmp slt i32 %inc, %add br i1 %cmp, label %for.body, label %for.end diff --git a/llvm/test/CodeGen/Mips/const4a.ll b/llvm/test/CodeGen/Mips/const4a.ll index ac6795b2c83..9022eb44a8a 100644 --- a/llvm/test/CodeGen/Mips/const4a.ll +++ b/llvm/test/CodeGen/Mips/const4a.ll @@ -14,7 +14,7 @@ target triple = "mips--linux-gnu" define void @t() #0 { entry: store i32 -559023410, i32* @i, align 4 - %0 = load i32* @b, align 4 + %0 = load i32, i32* @b, align 4 ; no-load-relax: lw ${{[0-9]+}}, $CPI0_1 # 16 bit inst %tobool = icmp ne i32 %0, 0 br i1 %tobool, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/Mips/ctlz.ll b/llvm/test/CodeGen/Mips/ctlz.ll index 1f871664a6c..96af1973b19 100644 --- a/llvm/test/CodeGen/Mips/ctlz.ll +++ b/llvm/test/CodeGen/Mips/ctlz.ll @@ -9,7 +9,7 @@ define i32 @main() #0 { entry: %retval = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %1 = call i32 @llvm.ctlz.i32(i32 %0, i1 true) store i32 %1, i32* @y, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Mips/disable-tail-merge.ll b/llvm/test/CodeGen/Mips/disable-tail-merge.ll index b4c093aa852..9396db7be7f 100644 --- a/llvm/test/CodeGen/Mips/disable-tail-merge.ll +++ b/llvm/test/CodeGen/Mips/disable-tail-merge.ll @@ -9,20 +9,20 @@ define i32 @test1(i32 %a) { entry: %tobool = icmp eq i32 %a, 0 - %0 = load i32* @g0, align 4 + %0 = load i32, i32* @g0, align 4 br i1 %tobool, label %if.else, label %if.then if.then: %add = add nsw i32 %0, 1 store i32 %add, i32* @g0, align 4 - %1 = load i32* @g1, align 4 + %1 = load i32, i32* @g1, align 4 %add1 = add nsw i32 %1, 23 br label %if.end if.else: %add2 = add nsw i32 %0, 11 store i32 %add2, i32* @g0, align 4 - %2 = load i32* @g1, align 4 + %2 = load i32, i32* @g1, align 4 %add3 = add nsw i32 %2, 23 br label %if.end diff --git a/llvm/test/CodeGen/Mips/div.ll b/llvm/test/CodeGen/Mips/div.ll index 00e2c192745..731841c554f 100644 --- a/llvm/test/CodeGen/Mips/div.ll +++ b/llvm/test/CodeGen/Mips/div.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %div = sdiv i32 %0, %1 ; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}} ; 16: mflo ${{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/div_rem.ll b/llvm/test/CodeGen/Mips/div_rem.ll index 950192eee16..e64529cee84 100644 --- a/llvm/test/CodeGen/Mips/div_rem.ll +++ b/llvm/test/CodeGen/Mips/div_rem.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %div = sdiv i32 %0, %1 store i32 %div, i32* @kkkk, align 4 %rem = srem i32 %0, %1 diff --git a/llvm/test/CodeGen/Mips/divrem.ll b/llvm/test/CodeGen/Mips/divrem.ll index a9cfe0fa152..918db053f5b 100644 --- a/llvm/test/CodeGen/Mips/divrem.ll +++ b/llvm/test/CodeGen/Mips/divrem.ll @@ -220,8 +220,8 @@ entry: ; FIXME: It's not clear what this is supposed to test. define i32 @killFlags() { entry: - %0 = load i32* @g0, align 4 - %1 = load i32* @g1, align 4 + %0 = load i32, i32* @g0, align 4 + %1 = load i32, i32* @g1, align 4 %div = sdiv i32 %0, %1 ret i32 %div } diff --git a/llvm/test/CodeGen/Mips/divu.ll b/llvm/test/CodeGen/Mips/divu.ll index b96a439390c..5bc765a71eb 100644 --- a/llvm/test/CodeGen/Mips/divu.ll +++ b/llvm/test/CodeGen/Mips/divu.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %div = udiv i32 %0, %1 ; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}} ; 16: mflo ${{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/divu_remu.ll b/llvm/test/CodeGen/Mips/divu_remu.ll index a6c1563ac19..a079440b913 100644 --- a/llvm/test/CodeGen/Mips/divu_remu.ll +++ b/llvm/test/CodeGen/Mips/divu_remu.ll @@ -8,8 +8,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %div = udiv i32 %0, %1 store i32 %div, i32* @kkkk, align 4 %rem = urem i32 %0, %1 diff --git a/llvm/test/CodeGen/Mips/dsp-patterns.ll b/llvm/test/CodeGen/Mips/dsp-patterns.ll index 067003a8a8c..837c0d8bfc5 100644 --- a/llvm/test/CodeGen/Mips/dsp-patterns.ll +++ b/llvm/test/CodeGen/Mips/dsp-patterns.ll @@ -7,7 +7,7 @@ define zeroext i8 @test_lbux(i8* nocapture %b, i32 %i) { entry: %add.ptr = getelementptr inbounds i8, i8* %b, i32 %i - %0 = load i8* %add.ptr, align 1 + %0 = load i8, i8* %add.ptr, align 1 ret i8 %0 } @@ -17,7 +17,7 @@ entry: define signext i16 @test_lhx(i16* nocapture %b, i32 %i) { entry: %add.ptr = getelementptr inbounds i16, i16* %b, i32 %i - %0 = load i16* %add.ptr, align 2 + %0 = load i16, i16* %add.ptr, align 2 ret i16 %0 } @@ -27,7 +27,7 @@ entry: define i32 @test_lwx(i32* nocapture %b, i32 %i) { entry: %add.ptr = getelementptr inbounds i32, i32* %b, i32 %i - %0 = load i32* %add.ptr, align 4 + %0 = load i32, i32* %add.ptr, align 4 ret i32 %0 } diff --git a/llvm/test/CodeGen/Mips/dsp-vec-load-store.ll b/llvm/test/CodeGen/Mips/dsp-vec-load-store.ll index 7e4a8fedaa8..f9251807d00 100644 --- a/llvm/test/CodeGen/Mips/dsp-vec-load-store.ll +++ b/llvm/test/CodeGen/Mips/dsp-vec-load-store.ll @@ -5,7 +5,7 @@ define void @extend_load_trunc_store_v2i8() { entry: - %0 = load <2 x i8>* @g1, align 2 + %0 = load <2 x i8>, <2 x i8>* @g1, align 2 store <2 x i8> %0, <2 x i8>* @g0, align 2 ret void } diff --git a/llvm/test/CodeGen/Mips/eh.ll b/llvm/test/CodeGen/Mips/eh.ll index fc9e2ef21a8..03bc1993e70 100644 --- a/llvm/test/CodeGen/Mips/eh.ll +++ b/llvm/test/CodeGen/Mips/eh.ll @@ -37,7 +37,7 @@ lpad: ; preds = %entry catch: ; preds = %lpad %3 = tail call i8* @__cxa_begin_catch(i8* %exn) nounwind %4 = bitcast i8* %3 to double* - %exn.scalar = load double* %4, align 8 + %exn.scalar = load double, double* %4, align 8 %add = fadd double %exn.scalar, %i2 store double %add, double* @g1, align 8 tail call void @__cxa_end_catch() nounwind diff --git a/llvm/test/CodeGen/Mips/emit-big-cst.ll b/llvm/test/CodeGen/Mips/emit-big-cst.ll index a168743859a..9bc96c89307 100644 --- a/llvm/test/CodeGen/Mips/emit-big-cst.ll +++ b/llvm/test/CodeGen/Mips/emit-big-cst.ll @@ -10,7 +10,7 @@ define void @accessBig(i64* %storage) { %addr = bitcast i64* %storage to i82* - %bigLoadedCst = load volatile i82* @bigCst + %bigLoadedCst = load volatile i82, i82* @bigCst %tmp = add i82 %bigLoadedCst, 1 store i82 %tmp, i82* %addr ret void diff --git a/llvm/test/CodeGen/Mips/ex2.ll b/llvm/test/CodeGen/Mips/ex2.ll index 6d024c209c2..eb72a7a3a13 100644 --- a/llvm/test/CodeGen/Mips/ex2.ll +++ b/llvm/test/CodeGen/Mips/ex2.ll @@ -22,7 +22,7 @@ entry: unreachable return: ; No predecessors! - %1 = load i32* %retval + %1 = load i32, i32* %retval ret i32 %1 } diff --git a/llvm/test/CodeGen/Mips/extins.ll b/llvm/test/CodeGen/Mips/extins.ll index efaeeea96a5..6604f89b184 100644 --- a/llvm/test/CodeGen/Mips/extins.ll +++ b/llvm/test/CodeGen/Mips/extins.ll @@ -16,7 +16,7 @@ entry: ; 16-NOT: ins ${{[0-9]+}} %and = shl i32 %s, 5 %shl = and i32 %and, 16352 - %tmp3 = load i32* %d, align 4 + %tmp3 = load i32, i32* %d, align 4 %and5 = and i32 %tmp3, -16353 %or = or i32 %and5, %shl store i32 %or, i32* %d, align 4 diff --git a/llvm/test/CodeGen/Mips/f16abs.ll b/llvm/test/CodeGen/Mips/f16abs.ll index 0fba9c4fd08..838983274e9 100644 --- a/llvm/test/CodeGen/Mips/f16abs.ll +++ b/llvm/test/CodeGen/Mips/f16abs.ll @@ -11,12 +11,12 @@ ; Function Attrs: nounwind optsize define i32 @main() #0 { entry: - %0 = load double* @y, align 8 + %0 = load double, double* @y, align 8 %call = tail call double @fabs(double %0) #2 store double %call, double* @x, align 8 ; static-NOT: .ent __call_stub_fp_fabs ; static-NOT: jal fabs - %1 = load float* @y1, align 4 + %1 = load float, float* @y1, align 4 %call2 = tail call float @fabsf(float %1) #2 store float %call2, float* @x1, align 4 ; static-NOT: .ent __call_stub_fp_fabsf diff --git a/llvm/test/CodeGen/Mips/fastcc.ll b/llvm/test/CodeGen/Mips/fastcc.ll index 6b022c5e36d..a47a1f7e8d8 100644 --- a/llvm/test/CodeGen/Mips/fastcc.ll +++ b/llvm/test/CodeGen/Mips/fastcc.ll @@ -108,23 +108,23 @@ entry: ; CHECK-NACL-NOT: lw $15 ; CHECK-NACL-NOT: lw $24 - %0 = load i32* @gi0, align 4 - %1 = load i32* @gi1, align 4 - %2 = load i32* @gi2, align 4 - %3 = load i32* @gi3, align 4 - %4 = load i32* @gi4, align 4 - %5 = load i32* @gi5, align 4 - %6 = load i32* @gi6, align 4 - %7 = load i32* @gi7, align 4 - %8 = load i32* @gi8, align 4 - %9 = load i32* @gi9, align 4 - %10 = load i32* @gi10, align 4 - %11 = load i32* @gi11, align 4 - %12 = load i32* @gi12, align 4 - %13 = load i32* @gi13, align 4 - %14 = load i32* @gi14, align 4 - %15 = load i32* @gi15, align 4 - %16 = load i32* @gi16, align 4 + %0 = load i32, i32* @gi0, align 4 + %1 = load i32, i32* @gi1, align 4 + %2 = load i32, i32* @gi2, align 4 + %3 = load i32, i32* @gi3, align 4 + %4 = load i32, i32* @gi4, align 4 + %5 = load i32, i32* @gi5, align 4 + %6 = load i32, i32* @gi6, align 4 + %7 = load i32, i32* @gi7, align 4 + %8 = load i32, i32* @gi8, align 4 + %9 = load i32, i32* @gi9, align 4 + %10 = load i32, i32* @gi10, align 4 + %11 = load i32, i32* @gi11, align 4 + %12 = load i32, i32* @gi12, align 4 + %13 = load i32, i32* @gi13, align 4 + %14 = load i32, i32* @gi14, align 4 + %15 = load i32, i32* @gi15, align 4 + %16 = load i32, i32* @gi16, align 4 tail call fastcc void @callee0(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16) ret void } @@ -196,27 +196,27 @@ entry: ; CHECK: lwc1 $f1 ; CHECK: lwc1 $f0 - %0 = load float* @gfa0, align 4 - %1 = load float* @gfa1, align 4 - %2 = load float* @gfa2, align 4 - %3 = load float* @gfa3, align 4 - %4 = load float* @gfa4, align 4 - %5 = load float* @gfa5, align 4 - %6 = load float* @gfa6, align 4 - %7 = load float* @gfa7, align 4 - %8 = load float* @gfa8, align 4 - %9 = load float* @gfa9, align 4 - %10 = load float* @gfa10, align 4 - %11 = load float* @gfa11, align 4 - %12 = load float* @gfa12, align 4 - %13 = load float* @gfa13, align 4 - %14 = load float* @gfa14, align 4 - %15 = load float* @gfa15, align 4 - %16 = load float* @gfa16, align 4 - %17 = load float* @gfa17, align 4 - %18 = load float* @gfa18, align 4 - %19 = load float* @gfa19, align 4 - %20 = load float* @gfa20, align 4 + %0 = load float, float* @gfa0, align 4 + %1 = load float, float* @gfa1, align 4 + %2 = load float, float* @gfa2, align 4 + %3 = load float, float* @gfa3, align 4 + %4 = load float, float* @gfa4, align 4 + %5 = load float, float* @gfa5, align 4 + %6 = load float, float* @gfa6, align 4 + %7 = load float, float* @gfa7, align 4 + %8 = load float, float* @gfa8, align 4 + %9 = load float, float* @gfa9, align 4 + %10 = load float, float* @gfa10, align 4 + %11 = load float, float* @gfa11, align 4 + %12 = load float, float* @gfa12, align 4 + %13 = load float, float* @gfa13, align 4 + %14 = load float, float* @gfa14, align 4 + %15 = load float, float* @gfa15, align 4 + %16 = load float, float* @gfa16, align 4 + %17 = load float, float* @gfa17, align 4 + %18 = load float, float* @gfa18, align 4 + %19 = load float, float* @gfa19, align 4 + %20 = load float, float* @gfa20, align 4 tail call fastcc void @callee1(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10, float %11, float %12, float %13, float %14, float %15, float %16, float %17, float %18, float %19, float %20) ret void } @@ -292,17 +292,17 @@ entry: ; NOODDSPREG-DAG: lwc1 $[[F0:f[0-9]*[02468]]], 40($[[R0]]) ; NOODDSPREG-DAG: swc1 $[[F0]], 0($sp) - %0 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 0), align 4 - %1 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 1), align 4 - %2 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 2), align 4 - %3 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 3), align 4 - %4 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 4), align 4 - %5 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 5), align 4 - %6 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 6), align 4 - %7 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 7), align 4 - %8 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 8), align 4 - %9 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 9), align 4 - %10 = load float* getelementptr ([11 x float]* @fa, i32 0, i32 10), align 4 + %0 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 0), align 4 + %1 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 1), align 4 + %2 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 2), align 4 + %3 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 3), align 4 + %4 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 4), align 4 + %5 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 5), align 4 + %6 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 6), align 4 + %7 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 7), align 4 + %8 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 8), align 4 + %9 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 9), align 4 + %10 = load float, float* getelementptr ([11 x float]* @fa, i32 0, i32 10), align 4 tail call fastcc void @callee2(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8, float %9, float %10) @@ -373,17 +373,17 @@ entry: ; FP64-NOODDSPREG-DAG: ldc1 $[[F0:f[0-9]*[02468]]], 80($[[R0]]) ; FP64-NOODDSPREG-DAG: sdc1 $[[F0]], 0($sp) - %0 = load double* getelementptr ([11 x double]* @da, i32 0, i32 0), align 8 - %1 = load double* getelementptr ([11 x double]* @da, i32 0, i32 1), align 8 - %2 = load double* getelementptr ([11 x double]* @da, i32 0, i32 2), align 8 - %3 = load double* getelementptr ([11 x double]* @da, i32 0, i32 3), align 8 - %4 = load double* getelementptr ([11 x double]* @da, i32 0, i32 4), align 8 - %5 = load double* getelementptr ([11 x double]* @da, i32 0, i32 5), align 8 - %6 = load double* getelementptr ([11 x double]* @da, i32 0, i32 6), align 8 - %7 = load double* getelementptr ([11 x double]* @da, i32 0, i32 7), align 8 - %8 = load double* getelementptr ([11 x double]* @da, i32 0, i32 8), align 8 - %9 = load double* getelementptr ([11 x double]* @da, i32 0, i32 9), align 8 - %10 = load double* getelementptr ([11 x double]* @da, i32 0, i32 10), align 8 + %0 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 0), align 8 + %1 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 1), align 8 + %2 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 2), align 8 + %3 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 3), align 8 + %4 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 4), align 8 + %5 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 5), align 8 + %6 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 6), align 8 + %7 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 7), align 8 + %8 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 8), align 8 + %9 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 9), align 8 + %10 = load double, double* getelementptr ([11 x double]* @da, i32 0, i32 10), align 8 tail call fastcc void @callee3(double %0, double %1, double %2, double %3, double %4, double %5, double %6, double %7, double %8, double %9, double %10) diff --git a/llvm/test/CodeGen/Mips/fixdfsf.ll b/llvm/test/CodeGen/Mips/fixdfsf.ll index 4271ac222ed..869579922d5 100644 --- a/llvm/test/CodeGen/Mips/fixdfsf.ll +++ b/llvm/test/CodeGen/Mips/fixdfsf.ll @@ -7,7 +7,7 @@ ; Function Attrs: nounwind optsize define void @foo() { entry: - %0 = load double* @x, align 8 + %0 = load double, double* @x, align 8 %conv = fptoui double %0 to i32 store i32 %conv, i32* @y, align 4 ; pic1: lw ${{[0-9]+}}, %call16(__fixunsdfsi)(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/fp-indexed-ls.ll b/llvm/test/CodeGen/Mips/fp-indexed-ls.ll index 3ff9b373c8e..ee6a7ed2b08 100644 --- a/llvm/test/CodeGen/Mips/fp-indexed-ls.ll +++ b/llvm/test/CodeGen/Mips/fp-indexed-ls.ll @@ -46,7 +46,7 @@ entry: ; CHECK-NACL-NOT: lwxc1 %arrayidx = getelementptr inbounds float, float* %b, i32 %o - %0 = load float* %arrayidx, align 4 + %0 = load float, float* %arrayidx, align 4 ret float %0 } @@ -77,7 +77,7 @@ entry: ; CHECK-NACL-NOT: ldxc1 %arrayidx = getelementptr inbounds double, double* %b, i32 %o - %0 = load double* %arrayidx, align 8 + %0 = load double, double* %arrayidx, align 8 ret double %0 } @@ -101,7 +101,7 @@ entry: ; MIPS64R6-NOT: luxc1 %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c - %0 = load float* %arrayidx1, align 1 + %0 = load float, float* %arrayidx1, align 1 ret float %0 } @@ -129,7 +129,7 @@ entry: ; CHECK-NACL-NOT: swxc1 - %0 = load float* @gf, align 4 + %0 = load float, float* @gf, align 4 %arrayidx = getelementptr inbounds float, float* %b, i32 %o store float %0, float* %arrayidx, align 4 ret void @@ -159,7 +159,7 @@ entry: ; CHECK-NACL-NOT: sdxc1 - %0 = load double* @gd, align 8 + %0 = load double, double* @gd, align 8 %arrayidx = getelementptr inbounds double, double* %b, i32 %o store double %0, double* %arrayidx, align 8 ret void @@ -179,7 +179,7 @@ entry: ; MIPS64R6-NOT: suxc1 - %0 = load float* @gf, align 4 + %0 = load float, float* @gf, align 4 %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c store float %0, float* %arrayidx1, align 1 ret void @@ -200,7 +200,7 @@ entry: ; MIPS64R6-NOT: luxc1 %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c - %0 = load double* %arrayidx1, align 1 + %0 = load double, double* %arrayidx1, align 1 ret double %0 } @@ -218,7 +218,7 @@ entry: ; MIPS64R6-NOT: suxc1 - %0 = load double* @gd, align 8 + %0 = load double, double* @gd, align 8 %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c store double %0, double* %arrayidx1, align 1 ret void @@ -238,7 +238,7 @@ entry: ; MIPS64R6-NOT: luxc1 - %0 = load float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1 + %0 = load float, float* getelementptr inbounds (%struct.S3* @s3, i32 0, i32 1), align 1 ret float %0 } diff --git a/llvm/test/CodeGen/Mips/fp-spill-reload.ll b/llvm/test/CodeGen/Mips/fp-spill-reload.ll index 418a74cd65a..4a53ad8c8e1 100644 --- a/llvm/test/CodeGen/Mips/fp-spill-reload.ll +++ b/llvm/test/CodeGen/Mips/fp-spill-reload.ll @@ -5,27 +5,27 @@ define void @foo0(i32* nocapture %b) nounwind { entry: ; CHECK: sw $fp ; CHECK: lw $fp - %0 = load i32* %b, align 4 + %0 = load i32, i32* %b, align 4 %arrayidx.1 = getelementptr inbounds i32, i32* %b, i32 1 - %1 = load i32* %arrayidx.1, align 4 + %1 = load i32, i32* %arrayidx.1, align 4 %add.1 = add nsw i32 %1, 1 %arrayidx.2 = getelementptr inbounds i32, i32* %b, i32 2 - %2 = load i32* %arrayidx.2, align 4 + %2 = load i32, i32* %arrayidx.2, align 4 %add.2 = add nsw i32 %2, 2 %arrayidx.3 = getelementptr inbounds i32, i32* %b, i32 3 - %3 = load i32* %arrayidx.3, align 4 + %3 = load i32, i32* %arrayidx.3, align 4 %add.3 = add nsw i32 %3, 3 %arrayidx.4 = getelementptr inbounds i32, i32* %b, i32 4 - %4 = load i32* %arrayidx.4, align 4 + %4 = load i32, i32* %arrayidx.4, align 4 %add.4 = add nsw i32 %4, 4 %arrayidx.5 = getelementptr inbounds i32, i32* %b, i32 5 - %5 = load i32* %arrayidx.5, align 4 + %5 = load i32, i32* %arrayidx.5, align 4 %add.5 = add nsw i32 %5, 5 %arrayidx.6 = getelementptr inbounds i32, i32* %b, i32 6 - %6 = load i32* %arrayidx.6, align 4 + %6 = load i32, i32* %arrayidx.6, align 4 %add.6 = add nsw i32 %6, 6 %arrayidx.7 = getelementptr inbounds i32, i32* %b, i32 7 - %7 = load i32* %arrayidx.7, align 4 + %7 = load i32, i32* %arrayidx.7, align 4 %add.7 = add nsw i32 %7, 7 call void @foo2(i32 %0, i32 %add.1, i32 %add.2, i32 %add.3, i32 %add.4, i32 %add.5, i32 %add.6, i32 %add.7) nounwind call void bitcast (void (...)* @foo1 to void ()*)() nounwind diff --git a/llvm/test/CodeGen/Mips/fp16instrinsmc.ll b/llvm/test/CodeGen/Mips/fp16instrinsmc.ll index 84d3814ee8b..797be2668d4 100644 --- a/llvm/test/CodeGen/Mips/fp16instrinsmc.ll +++ b/llvm/test/CodeGen/Mips/fp16instrinsmc.ll @@ -23,8 +23,8 @@ define void @foo1() #0 { ; fmask: .set reorder ; fmask: .end foo1 entry: - %0 = load float* @x, align 4 - %1 = load float* @one, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @one, align 4 %call = call float @copysignf(float %0, float %1) #2 store float %call, float* @y, align 4 ret void @@ -39,8 +39,8 @@ define void @foo2() #0 { ; fmask: save {{.*}} ; fmask: .end foo2 entry: - %0 = load float* @x, align 4 - %1 = load float* @negone, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @negone, align 4 %call = call float @copysignf(float %0, float %1) #2 store float %call, float* @y, align 4 ret void @@ -57,8 +57,8 @@ entry: ; fmask: .set macro ; fmask: .set reorder ; fmask: .end foo3 - %0 = load double* @xd, align 8 - %1 = load float* @oned, align 4 + %0 = load double, double* @xd, align 8 + %1 = load float, float* @oned, align 4 %conv = fpext float %1 to double %call = call double @copysign(double %0, double %conv) #2 store double %call, double* @yd, align 8 @@ -74,8 +74,8 @@ entry: ; fmask: .ent foo4 ; fmask: save {{.*}} ; fmask: .end foo4 - %0 = load double* @xd, align 8 - %1 = load double* @negoned, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @negoned, align 8 %call = call double @copysign(double %0, double %1) #2 store double %call, double* @yd, align 8 ret void @@ -84,7 +84,7 @@ entry: ; Function Attrs: nounwind define void @foo5() #0 { entry: - %0 = load float* @xn, align 4 + %0 = load float, float* @xn, align 4 %call = call float @fabsf(float %0) #2 store float %call, float* @y, align 4 ret void @@ -96,7 +96,7 @@ declare float @fabsf(float) #1 ; Function Attrs: nounwind define void @foo6() #0 { entry: - %0 = load double* @xdn, align 8 + %0 = load double, double* @xdn, align 8 %call = call double @fabs(double %0) #2 store double %call, double* @yd, align 8 ret void @@ -108,7 +108,7 @@ declare double @fabs(double) #1 ; Function Attrs: nounwind define void @foo7() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @sinf(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(sinf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -122,7 +122,7 @@ declare float @sinf(float) #0 ; Function Attrs: nounwind define void @foo8() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @sin(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(sin)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -136,7 +136,7 @@ declare double @sin(double) #0 ; Function Attrs: nounwind define void @foo9() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @cosf(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(cosf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -150,7 +150,7 @@ declare float @cosf(float) #0 ; Function Attrs: nounwind define void @foo10() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @cos(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(cos)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -164,7 +164,7 @@ declare double @cos(double) #0 ; Function Attrs: nounwind define void @foo11() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @sqrtf(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(sqrtf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -178,7 +178,7 @@ declare float @sqrtf(float) #0 ; Function Attrs: nounwind define void @foo12() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @sqrt(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(sqrt)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -192,7 +192,7 @@ declare double @sqrt(double) #0 ; Function Attrs: nounwind define void @foo13() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @floorf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(floorf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -206,7 +206,7 @@ declare float @floorf(float) #1 ; Function Attrs: nounwind define void @foo14() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @floor(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(floor)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -220,7 +220,7 @@ declare double @floor(double) #1 ; Function Attrs: nounwind define void @foo15() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @nearbyintf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(nearbyintf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -234,7 +234,7 @@ declare float @nearbyintf(float) #1 ; Function Attrs: nounwind define void @foo16() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @nearbyint(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(nearbyint)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -248,7 +248,7 @@ declare double @nearbyint(double) #1 ; Function Attrs: nounwind define void @foo17() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @ceilf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(ceilf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -262,7 +262,7 @@ declare float @ceilf(float) #1 ; Function Attrs: nounwind define void @foo18() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @ceil(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(ceil)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -276,7 +276,7 @@ declare double @ceil(double) #1 ; Function Attrs: nounwind define void @foo19() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @rintf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(rintf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -290,7 +290,7 @@ declare float @rintf(float) #1 ; Function Attrs: nounwind define void @foo20() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @rint(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(rint)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -304,7 +304,7 @@ declare double @rint(double) #1 ; Function Attrs: nounwind define void @foo21() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @truncf(float %0) #2 ;pic: lw ${{[0-9]+}}, %call16(truncf)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -318,7 +318,7 @@ declare float @truncf(float) #1 ; Function Attrs: nounwind define void @foo22() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @trunc(double %0) #2 ;pic: lw ${{[0-9]+}}, %call16(trunc)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -332,7 +332,7 @@ declare double @trunc(double) #1 ; Function Attrs: nounwind define void @foo23() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @log2f(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(log2f)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -346,7 +346,7 @@ declare float @log2f(float) #0 ; Function Attrs: nounwind define void @foo24() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @log2(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(log2)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) @@ -360,7 +360,7 @@ declare double @log2(double) #0 ; Function Attrs: nounwind define void @foo25() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %call = call float @exp2f(float %0) #3 ;pic: lw ${{[0-9]+}}, %call16(exp2f)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_sf_1)(${{[0-9]+}}) @@ -374,7 +374,7 @@ declare float @exp2f(float) #0 ; Function Attrs: nounwind define void @foo26() #0 { entry: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %call = call double @exp2(double %0) #3 ;pic: lw ${{[0-9]+}}, %call16(exp2)(${{[0-9]+}}) ;pic: lw ${{[0-9]+}}, %got(__mips16_call_stub_df_2)(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/fp16static.ll b/llvm/test/CodeGen/Mips/fp16static.ll index beb063db15c..4e5059ed39e 100644 --- a/llvm/test/CodeGen/Mips/fp16static.ll +++ b/llvm/test/CodeGen/Mips/fp16static.ll @@ -4,8 +4,8 @@ define void @foo() nounwind { entry: - %0 = load float* @x, align 4 - %1 = load float* @x, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @x, align 4 %mul = fmul float %0, %1 store float %mul, float* @x, align 4 ; CHECK-STATIC16: jal __mips16_mulsf3 diff --git a/llvm/test/CodeGen/Mips/fpneeded.ll b/llvm/test/CodeGen/Mips/fpneeded.ll index fdd8e8f707e..a89e2a593a4 100644 --- a/llvm/test/CodeGen/Mips/fpneeded.ll +++ b/llvm/test/CodeGen/Mips/fpneeded.ll @@ -76,8 +76,8 @@ entry: define void @foo1() #0 { entry: store float 1.000000e+00, float* @zz, align 4 - %0 = load float* @y, align 4 - %1 = load float* @x, align 4 + %0 = load float, float* @y, align 4 + %1 = load float, float* @x, align 4 %add = fadd float %0, %1 store float %add, float* @z, align 4 ret void @@ -96,7 +96,7 @@ entry: define void @foo2() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 call void @vf(float %0) ret void } diff --git a/llvm/test/CodeGen/Mips/fpnotneeded.ll b/llvm/test/CodeGen/Mips/fpnotneeded.ll index e12d7baacdb..02b8e8a345d 100644 --- a/llvm/test/CodeGen/Mips/fpnotneeded.ll +++ b/llvm/test/CodeGen/Mips/fpnotneeded.ll @@ -19,7 +19,7 @@ entry: define i32 @iv() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 ret i32 %0 } diff --git a/llvm/test/CodeGen/Mips/global-address.ll b/llvm/test/CodeGen/Mips/global-address.ll index ae6afeb1f4e..ecf5e563a57 100644 --- a/llvm/test/CodeGen/Mips/global-address.ll +++ b/llvm/test/CodeGen/Mips/global-address.ll @@ -33,9 +33,9 @@ entry: ; STATIC-N64: lw ${{[0-9]+}}, %got_ofst(s1)($[[R1]]) ; STATIC-N64: ld ${{[0-9]+}}, %got_disp(g1) - %0 = load i32* @s1, align 4 + %0 = load i32, i32* @s1, align 4 tail call void @foo1(i32 %0) nounwind - %1 = load i32* @g1, align 4 + %1 = load i32, i32* @g1, align 4 store i32 %1, i32* @s1, align 4 %add = add nsw i32 %1, 2 store i32 %add, i32* @g1, align 4 diff --git a/llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll b/llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll index 3a636d82533..800a74f5358 100644 --- a/llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll +++ b/llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll @@ -19,7 +19,7 @@ declare void @externalFunc() define internal fastcc void @internalFunc() nounwind noinline { entry: - %0 = load i32* @g, align 4 + %0 = load i32, i32* @g, align 4 %inc = add nsw i32 %0, 1 store i32 %inc, i32* @g, align 4 ret void diff --git a/llvm/test/CodeGen/Mips/gprestore.ll b/llvm/test/CodeGen/Mips/gprestore.ll index cbcf0c93491..0b005ab6871 100644 --- a/llvm/test/CodeGen/Mips/gprestore.ll +++ b/llvm/test/CodeGen/Mips/gprestore.ll @@ -18,10 +18,10 @@ entry: ; CHECK-NOT: got({{.*}})($gp) ; CHECK: lw $gp tail call void (...)* @f1() nounwind - %tmp = load i32* @p, align 4 + %tmp = load i32, i32* @p, align 4 tail call void @f2(i32 %tmp) nounwind - %tmp1 = load i32* @q, align 4 - %tmp2 = load i32* @r, align 4 + %tmp1 = load i32, i32* @q, align 4 + %tmp2 = load i32, i32* @r, align 4 tail call void @f3(i32 %tmp1, i32 %tmp2) nounwind ret void } diff --git a/llvm/test/CodeGen/Mips/hf16_1.ll b/llvm/test/CodeGen/Mips/hf16_1.ll index 9879cd523af..103fd2d7fd6 100644 --- a/llvm/test/CodeGen/Mips/hf16_1.ll +++ b/llvm/test/CodeGen/Mips/hf16_1.ll @@ -11,96 +11,96 @@ define void @foo() nounwind { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 call void @v_sf(float %0) - %1 = load double* @xd, align 8 + %1 = load double, double* @xd, align 8 call void @v_df(double %1) - %2 = load float* @x, align 4 - %3 = load float* @y, align 4 + %2 = load float, float* @x, align 4 + %3 = load float, float* @y, align 4 call void @v_sf_sf(float %2, float %3) - %4 = load double* @xd, align 8 - %5 = load float* @x, align 4 + %4 = load double, double* @xd, align 8 + %5 = load float, float* @x, align 4 call void @v_df_sf(double %4, float %5) - %6 = load double* @xd, align 8 - %7 = load double* @yd, align 8 + %6 = load double, double* @xd, align 8 + %7 = load double, double* @yd, align 8 call void @v_df_df(double %6, double %7) %call = call float @sf_v() - %8 = load float* @x, align 4 + %8 = load float, float* @x, align 4 %call1 = call float @sf_sf(float %8) - %9 = load double* @xd, align 8 + %9 = load double, double* @xd, align 8 %call2 = call float @sf_df(double %9) - %10 = load float* @x, align 4 - %11 = load float* @y, align 4 + %10 = load float, float* @x, align 4 + %11 = load float, float* @y, align 4 %call3 = call float @sf_sf_sf(float %10, float %11) - %12 = load double* @xd, align 8 - %13 = load float* @x, align 4 + %12 = load double, double* @xd, align 8 + %13 = load float, float* @x, align 4 %call4 = call float @sf_df_sf(double %12, float %13) - %14 = load double* @xd, align 8 - %15 = load double* @yd, align 8 + %14 = load double, double* @xd, align 8 + %15 = load double, double* @yd, align 8 %call5 = call float @sf_df_df(double %14, double %15) %call6 = call double @df_v() - %16 = load float* @x, align 4 + %16 = load float, float* @x, align 4 %call7 = call double @df_sf(float %16) - %17 = load double* @xd, align 8 + %17 = load double, double* @xd, align 8 %call8 = call double @df_df(double %17) - %18 = load float* @x, align 4 - %19 = load float* @y, align 4 + %18 = load float, float* @x, align 4 + %19 = load float, float* @y, align 4 %call9 = call double @df_sf_sf(float %18, float %19) - %20 = load double* @xd, align 8 - %21 = load float* @x, align 4 + %20 = load double, double* @xd, align 8 + %21 = load float, float* @x, align 4 %call10 = call double @df_df_sf(double %20, float %21) - %22 = load double* @xd, align 8 - %23 = load double* @yd, align 8 + %22 = load double, double* @xd, align 8 + %23 = load double, double* @yd, align 8 %call11 = call double @df_df_df(double %22, double %23) %call12 = call { float, float } @sc_v() %24 = extractvalue { float, float } %call12, 0 %25 = extractvalue { float, float } %call12, 1 - %26 = load float* @x, align 4 + %26 = load float, float* @x, align 4 %call13 = call { float, float } @sc_sf(float %26) %27 = extractvalue { float, float } %call13, 0 %28 = extractvalue { float, float } %call13, 1 - %29 = load double* @xd, align 8 + %29 = load double, double* @xd, align 8 %call14 = call { float, float } @sc_df(double %29) %30 = extractvalue { float, float } %call14, 0 %31 = extractvalue { float, float } %call14, 1 - %32 = load float* @x, align 4 - %33 = load float* @y, align 4 + %32 = load float, float* @x, align 4 + %33 = load float, float* @y, align 4 %call15 = call { float, float } @sc_sf_sf(float %32, float %33) %34 = extractvalue { float, float } %call15, 0 %35 = extractvalue { float, float } %call15, 1 - %36 = load double* @xd, align 8 - %37 = load float* @x, align 4 + %36 = load double, double* @xd, align 8 + %37 = load float, float* @x, align 4 %call16 = call { float, float } @sc_df_sf(double %36, float %37) %38 = extractvalue { float, float } %call16, 0 %39 = extractvalue { float, float } %call16, 1 - %40 = load double* @xd, align 8 - %41 = load double* @yd, align 8 + %40 = load double, double* @xd, align 8 + %41 = load double, double* @yd, align 8 %call17 = call { float, float } @sc_df_df(double %40, double %41) %42 = extractvalue { float, float } %call17, 0 %43 = extractvalue { float, float } %call17, 1 %call18 = call { double, double } @dc_v() %44 = extractvalue { double, double } %call18, 0 %45 = extractvalue { double, double } %call18, 1 - %46 = load float* @x, align 4 + %46 = load float, float* @x, align 4 %call19 = call { double, double } @dc_sf(float %46) %47 = extractvalue { double, double } %call19, 0 %48 = extractvalue { double, double } %call19, 1 - %49 = load double* @xd, align 8 + %49 = load double, double* @xd, align 8 %call20 = call { double, double } @dc_df(double %49) %50 = extractvalue { double, double } %call20, 0 %51 = extractvalue { double, double } %call20, 1 - %52 = load float* @x, align 4 - %53 = load float* @y, align 4 + %52 = load float, float* @x, align 4 + %53 = load float, float* @y, align 4 %call21 = call { double, double } @dc_sf_sf(float %52, float %53) %54 = extractvalue { double, double } %call21, 0 %55 = extractvalue { double, double } %call21, 1 - %56 = load double* @xd, align 8 - %57 = load float* @x, align 4 + %56 = load double, double* @xd, align 8 + %57 = load float, float* @x, align 4 %call22 = call { double, double } @dc_df_sf(double %56, float %57) %58 = extractvalue { double, double } %call22, 0 %59 = extractvalue { double, double } %call22, 1 - %60 = load double* @xd, align 8 - %61 = load double* @yd, align 8 + %60 = load double, double* @xd, align 8 + %61 = load double, double* @yd, align 8 %call23 = call { double, double } @dc_df_df(double %60, double %61) %62 = extractvalue { double, double } %call23, 0 %63 = extractvalue { double, double } %call23, 1 diff --git a/llvm/test/CodeGen/Mips/hf16call32.ll b/llvm/test/CodeGen/Mips/hf16call32.ll index aec9c71c485..035479c6b3d 100644 --- a/llvm/test/CodeGen/Mips/hf16call32.ll +++ b/llvm/test/CodeGen/Mips/hf16call32.ll @@ -67,50 +67,50 @@ entry: store i32 0, i32* %retval call void @clear() store float 1.500000e+00, float* @lx, align 4 - %0 = load float* @lx, align 4 + %0 = load float, float* @lx, align 4 call void @v_sf(float %0) - %1 = load float* @x, align 4 + %1 = load float, float* @x, align 4 %conv = fpext float %1 to double - %2 = load float* @lx, align 4 + %2 = load float, float* @lx, align 4 %conv1 = fpext float %2 to double - %3 = load float* @x, align 4 - %4 = load float* @lx, align 4 + %3 = load float, float* @x, align 4 + %4 = load float, float* @lx, align 4 %cmp = fcmp oeq float %3, %4 %conv2 = zext i1 %cmp to i32 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %conv, double %conv1, i32 %conv2) call void @clear() store double 0x41678C29C0000000, double* @lxd, align 8 - %5 = load double* @lxd, align 8 + %5 = load double, double* @lxd, align 8 call void @v_df(double %5) - %6 = load double* @xd, align 8 - %7 = load double* @lxd, align 8 - %8 = load double* @xd, align 8 - %9 = load double* @lxd, align 8 + %6 = load double, double* @xd, align 8 + %7 = load double, double* @lxd, align 8 + %8 = load double, double* @xd, align 8 + %9 = load double, double* @lxd, align 8 %cmp3 = fcmp oeq double %8, %9 %conv4 = zext i1 %cmp3 to i32 %call5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %6, double %7, i32 %conv4) call void @clear() store float 9.000000e+00, float* @lx, align 4 store float 1.000000e+01, float* @ly, align 4 - %10 = load float* @lx, align 4 - %11 = load float* @ly, align 4 + %10 = load float, float* @lx, align 4 + %11 = load float, float* @ly, align 4 call void @v_sf_sf(float %10, float %11) - %12 = load float* @x, align 4 + %12 = load float, float* @x, align 4 %conv6 = fpext float %12 to double - %13 = load float* @lx, align 4 + %13 = load float, float* @lx, align 4 %conv7 = fpext float %13 to double - %14 = load float* @y, align 4 + %14 = load float, float* @y, align 4 %conv8 = fpext float %14 to double - %15 = load float* @ly, align 4 + %15 = load float, float* @ly, align 4 %conv9 = fpext float %15 to double - %16 = load float* @x, align 4 - %17 = load float* @lx, align 4 + %16 = load float, float* @x, align 4 + %17 = load float, float* @lx, align 4 %cmp10 = fcmp oeq float %16, %17 br i1 %cmp10, label %land.rhs, label %land.end land.rhs: ; preds = %entry - %18 = load float* @y, align 4 - %19 = load float* @ly, align 4 + %18 = load float, float* @y, align 4 + %19 = load float, float* @ly, align 4 %cmp12 = fcmp oeq float %18, %19 br label %land.end @@ -121,21 +121,21 @@ land.end: ; preds = %land.rhs, %entry call void @clear() store float 0x3FFE666660000000, float* @lx, align 4 store double 0x4007E613249FF279, double* @lyd, align 8 - %21 = load float* @lx, align 4 - %22 = load double* @lyd, align 8 + %21 = load float, float* @lx, align 4 + %22 = load double, double* @lyd, align 8 call void @v_sf_df(float %21, double %22) - %23 = load float* @x, align 4 + %23 = load float, float* @x, align 4 %conv15 = fpext float %23 to double - %24 = load float* @lx, align 4 + %24 = load float, float* @lx, align 4 %conv16 = fpext float %24 to double - %25 = load double* @yd, align 8 - %26 = load double* @lyd, align 8 - %27 = load float* @x, align 4 - %28 = load float* @lx, align 4 + %25 = load double, double* @yd, align 8 + %26 = load double, double* @lyd, align 8 + %27 = load float, float* @x, align 4 + %28 = load float, float* @lx, align 4 %cmp17 = fcmp oeq float %27, %28 %conv18 = zext i1 %cmp17 to i32 - %29 = load double* @yd, align 8 - %30 = load double* @lyd, align 8 + %29 = load double, double* @yd, align 8 + %30 = load double, double* @lyd, align 8 %cmp19 = fcmp oeq double %29, %30 %conv20 = zext i1 %cmp19 to i32 %and = and i32 %conv18, %conv20 @@ -143,21 +143,21 @@ land.end: ; preds = %land.rhs, %entry call void @clear() store double 0x4194E54F94000000, double* @lxd, align 8 store float 7.600000e+01, float* @ly, align 4 - %31 = load double* @lxd, align 8 - %32 = load float* @ly, align 4 + %31 = load double, double* @lxd, align 8 + %32 = load float, float* @ly, align 4 call void @v_df_sf(double %31, float %32) - %33 = load double* @xd, align 8 - %34 = load double* @lxd, align 8 - %35 = load float* @y, align 4 + %33 = load double, double* @xd, align 8 + %34 = load double, double* @lxd, align 8 + %35 = load float, float* @y, align 4 %conv22 = fpext float %35 to double - %36 = load float* @ly, align 4 + %36 = load float, float* @ly, align 4 %conv23 = fpext float %36 to double - %37 = load double* @xd, align 8 - %38 = load double* @lxd, align 8 + %37 = load double, double* @xd, align 8 + %38 = load double, double* @lxd, align 8 %cmp24 = fcmp oeq double %37, %38 %conv25 = zext i1 %cmp24 to i32 - %39 = load float* @y, align 4 - %40 = load float* @ly, align 4 + %39 = load float, float* @y, align 4 + %40 = load float, float* @ly, align 4 %cmp26 = fcmp oeq float %39, %40 %conv27 = zext i1 %cmp26 to i32 %and28 = and i32 %conv25, %conv27 @@ -165,19 +165,19 @@ land.end: ; preds = %land.rhs, %entry call void @clear() store double 7.365198e+07, double* @lxd, align 8 store double 0x416536CD80000000, double* @lyd, align 8 - %41 = load double* @lxd, align 8 - %42 = load double* @lyd, align 8 + %41 = load double, double* @lxd, align 8 + %42 = load double, double* @lyd, align 8 call void @v_df_df(double %41, double %42) - %43 = load double* @xd, align 8 - %44 = load double* @lxd, align 8 - %45 = load double* @yd, align 8 - %46 = load double* @lyd, align 8 - %47 = load double* @xd, align 8 - %48 = load double* @lxd, align 8 + %43 = load double, double* @xd, align 8 + %44 = load double, double* @lxd, align 8 + %45 = load double, double* @yd, align 8 + %46 = load double, double* @lyd, align 8 + %47 = load double, double* @xd, align 8 + %48 = load double, double* @lxd, align 8 %cmp30 = fcmp oeq double %47, %48 %conv31 = zext i1 %cmp30 to i32 - %49 = load double* @yd, align 8 - %50 = load double* @lyd, align 8 + %49 = load double, double* @yd, align 8 + %50 = load double, double* @lyd, align 8 %cmp32 = fcmp oeq double %49, %50 %conv33 = zext i1 %cmp32 to i32 %and34 = and i32 %conv31, %conv33 @@ -186,35 +186,35 @@ land.end: ; preds = %land.rhs, %entry store float 0x4016666660000000, float* @ret_sf, align 4 %call36 = call float @sf_v() store float %call36, float* @lret_sf, align 4 - %51 = load float* @ret_sf, align 4 + %51 = load float, float* @ret_sf, align 4 %conv37 = fpext float %51 to double - %52 = load float* @lret_sf, align 4 + %52 = load float, float* @lret_sf, align 4 %conv38 = fpext float %52 to double - %53 = load float* @ret_sf, align 4 - %54 = load float* @lret_sf, align 4 + %53 = load float, float* @ret_sf, align 4 + %54 = load float, float* @lret_sf, align 4 %cmp39 = fcmp oeq float %53, %54 %conv40 = zext i1 %cmp39 to i32 %call41 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %conv37, double %conv38, i32 %conv40) call void @clear() store float 4.587300e+06, float* @ret_sf, align 4 store float 3.420000e+02, float* @lx, align 4 - %55 = load float* @lx, align 4 + %55 = load float, float* @lx, align 4 %call42 = call float @sf_sf(float %55) store float %call42, float* @lret_sf, align 4 - %56 = load float* @ret_sf, align 4 + %56 = load float, float* @ret_sf, align 4 %conv43 = fpext float %56 to double - %57 = load float* @lret_sf, align 4 + %57 = load float, float* @lret_sf, align 4 %conv44 = fpext float %57 to double - %58 = load float* @x, align 4 + %58 = load float, float* @x, align 4 %conv45 = fpext float %58 to double - %59 = load float* @lx, align 4 + %59 = load float, float* @lx, align 4 %conv46 = fpext float %59 to double - %60 = load float* @ret_sf, align 4 - %61 = load float* @lret_sf, align 4 + %60 = load float, float* @ret_sf, align 4 + %61 = load float, float* @lret_sf, align 4 %cmp47 = fcmp oeq float %60, %61 %conv48 = zext i1 %cmp47 to i32 - %62 = load float* @x, align 4 - %63 = load float* @lx, align 4 + %62 = load float, float* @x, align 4 + %63 = load float, float* @lx, align 4 %cmp49 = fcmp oeq float %62, %63 %conv50 = zext i1 %cmp49 to i32 %and51 = and i32 %conv48, %conv50 @@ -222,21 +222,21 @@ land.end: ; preds = %land.rhs, %entry call void @clear() store float 4.445910e+06, float* @ret_sf, align 4 store double 0x419A7DB294000000, double* @lxd, align 8 - %64 = load double* @lxd, align 8 + %64 = load double, double* @lxd, align 8 %call53 = call float @sf_df(double %64) store float %call53, float* @lret_sf, align 4 - %65 = load float* @ret_sf, align 4 + %65 = load float, float* @ret_sf, align 4 %conv54 = fpext float %65 to double - %66 = load float* @lret_sf, align 4 + %66 = load float, float* @lret_sf, align 4 %conv55 = fpext float %66 to double - %67 = load double* @xd, align 8 - %68 = load double* @lxd, align 8 - %69 = load float* @ret_sf, align 4 - %70 = load float* @lret_sf, align 4 + %67 = load double, double* @xd, align 8 + %68 = load double, double* @lxd, align 8 + %69 = load float, float* @ret_sf, align 4 + %70 = load float, float* @lret_sf, align 4 %cmp56 = fcmp oeq float %69, %70 %conv57 = zext i1 %cmp56 to i32 - %71 = load double* @xd, align 8 - %72 = load double* @lxd, align 8 + %71 = load double, double* @xd, align 8 + %72 = load double, double* @lxd, align 8 %cmp58 = fcmp oeq double %71, %72 %conv59 = zext i1 %cmp58 to i32 %and60 = and i32 %conv57, %conv59 @@ -245,36 +245,36 @@ land.end: ; preds = %land.rhs, %entry store float 0x3FFF4BC6A0000000, float* @ret_sf, align 4 store float 4.445500e+03, float* @lx, align 4 store float 0x4068ACCCC0000000, float* @ly, align 4 - %73 = load float* @lx, align 4 - %74 = load float* @ly, align 4 + %73 = load float, float* @lx, align 4 + %74 = load float, float* @ly, align 4 %call62 = call float @sf_sf_sf(float %73, float %74) store float %call62, float* @lret_sf, align 4 - %75 = load float* @ret_sf, align 4 + %75 = load float, float* @ret_sf, align 4 %conv63 = fpext float %75 to double - %76 = load float* @lret_sf, align 4 + %76 = load float, float* @lret_sf, align 4 %conv64 = fpext float %76 to double - %77 = load float* @x, align 4 + %77 = load float, float* @x, align 4 %conv65 = fpext float %77 to double - %78 = load float* @lx, align 4 + %78 = load float, float* @lx, align 4 %conv66 = fpext float %78 to double - %79 = load float* @y, align 4 + %79 = load float, float* @y, align 4 %conv67 = fpext float %79 to double - %80 = load float* @ly, align 4 + %80 = load float, float* @ly, align 4 %conv68 = fpext float %80 to double - %81 = load float* @ret_sf, align 4 - %82 = load float* @lret_sf, align 4 + %81 = load float, float* @ret_sf, align 4 + %82 = load float, float* @lret_sf, align 4 %cmp69 = fcmp oeq float %81, %82 br i1 %cmp69, label %land.lhs.true, label %land.end76 land.lhs.true: ; preds = %land.end - %83 = load float* @x, align 4 - %84 = load float* @lx, align 4 + %83 = load float, float* @x, align 4 + %84 = load float, float* @lx, align 4 %cmp71 = fcmp oeq float %83, %84 br i1 %cmp71, label %land.rhs73, label %land.end76 land.rhs73: ; preds = %land.lhs.true - %85 = load float* @y, align 4 - %86 = load float* @ly, align 4 + %85 = load float, float* @y, align 4 + %86 = load float, float* @ly, align 4 %cmp74 = fcmp oeq float %85, %86 br label %land.end76 @@ -286,34 +286,34 @@ land.end76: ; preds = %land.rhs73, %land.l store float 9.991300e+04, float* @ret_sf, align 4 store float 1.114500e+04, float* @lx, align 4 store double 9.994445e+07, double* @lyd, align 8 - %88 = load float* @lx, align 4 - %89 = load double* @lyd, align 8 + %88 = load float, float* @lx, align 4 + %89 = load double, double* @lyd, align 8 %call79 = call float @sf_sf_df(float %88, double %89) store float %call79, float* @lret_sf, align 4 - %90 = load float* @ret_sf, align 4 + %90 = load float, float* @ret_sf, align 4 %conv80 = fpext float %90 to double - %91 = load float* @lret_sf, align 4 + %91 = load float, float* @lret_sf, align 4 %conv81 = fpext float %91 to double - %92 = load float* @x, align 4 + %92 = load float, float* @x, align 4 %conv82 = fpext float %92 to double - %93 = load float* @lx, align 4 + %93 = load float, float* @lx, align 4 %conv83 = fpext float %93 to double - %94 = load double* @yd, align 8 - %95 = load double* @lyd, align 8 - %96 = load float* @ret_sf, align 4 - %97 = load float* @lret_sf, align 4 + %94 = load double, double* @yd, align 8 + %95 = load double, double* @lyd, align 8 + %96 = load float, float* @ret_sf, align 4 + %97 = load float, float* @lret_sf, align 4 %cmp84 = fcmp oeq float %96, %97 br i1 %cmp84, label %land.lhs.true86, label %land.end92 land.lhs.true86: ; preds = %land.end76 - %98 = load float* @x, align 4 - %99 = load float* @lx, align 4 + %98 = load float, float* @x, align 4 + %99 = load float, float* @lx, align 4 %cmp87 = fcmp oeq float %98, %99 br i1 %cmp87, label %land.rhs89, label %land.end92 land.rhs89: ; preds = %land.lhs.true86 - %100 = load double* @yd, align 8 - %101 = load double* @lyd, align 8 + %100 = load double, double* @yd, align 8 + %101 = load double, double* @lyd, align 8 %cmp90 = fcmp oeq double %100, %101 br label %land.end92 @@ -325,34 +325,34 @@ land.end92: ; preds = %land.rhs89, %land.l store float 0x417CCC7A00000000, float* @ret_sf, align 4 store double 0x4172034530000000, double* @lxd, align 8 store float 4.456200e+04, float* @ly, align 4 - %103 = load double* @lxd, align 8 - %104 = load float* @ly, align 4 + %103 = load double, double* @lxd, align 8 + %104 = load float, float* @ly, align 4 %call95 = call float @sf_df_sf(double %103, float %104) store float %call95, float* @lret_sf, align 4 - %105 = load float* @ret_sf, align 4 + %105 = load float, float* @ret_sf, align 4 %conv96 = fpext float %105 to double - %106 = load float* @lret_sf, align 4 + %106 = load float, float* @lret_sf, align 4 %conv97 = fpext float %106 to double - %107 = load double* @xd, align 8 - %108 = load double* @lxd, align 8 - %109 = load float* @y, align 4 + %107 = load double, double* @xd, align 8 + %108 = load double, double* @lxd, align 8 + %109 = load float, float* @y, align 4 %conv98 = fpext float %109 to double - %110 = load float* @ly, align 4 + %110 = load float, float* @ly, align 4 %conv99 = fpext float %110 to double - %111 = load float* @ret_sf, align 4 - %112 = load float* @lret_sf, align 4 + %111 = load float, float* @ret_sf, align 4 + %112 = load float, float* @lret_sf, align 4 %cmp100 = fcmp oeq float %111, %112 br i1 %cmp100, label %land.lhs.true102, label %land.end108 land.lhs.true102: ; preds = %land.end92 - %113 = load double* @xd, align 8 - %114 = load double* @lxd, align 8 + %113 = load double, double* @xd, align 8 + %114 = load double, double* @lxd, align 8 %cmp103 = fcmp oeq double %113, %114 br i1 %cmp103, label %land.rhs105, label %land.end108 land.rhs105: ; preds = %land.lhs.true102 - %115 = load float* @y, align 4 - %116 = load float* @ly, align 4 + %115 = load float, float* @y, align 4 + %116 = load float, float* @ly, align 4 %cmp106 = fcmp oeq float %115, %116 br label %land.end108 @@ -364,32 +364,32 @@ land.end108: ; preds = %land.rhs105, %land. store float 3.987721e+06, float* @ret_sf, align 4 store double 0x3FF1F49F6DDDC2D8, double* @lxd, align 8 store double 0x409129F306A2B170, double* @lyd, align 8 - %118 = load double* @lxd, align 8 - %119 = load double* @lyd, align 8 + %118 = load double, double* @lxd, align 8 + %119 = load double, double* @lyd, align 8 %call111 = call float @sf_df_df(double %118, double %119) store float %call111, float* @lret_sf, align 4 - %120 = load float* @ret_sf, align 4 + %120 = load float, float* @ret_sf, align 4 %conv112 = fpext float %120 to double - %121 = load float* @lret_sf, align 4 + %121 = load float, float* @lret_sf, align 4 %conv113 = fpext float %121 to double - %122 = load double* @xd, align 8 - %123 = load double* @lxd, align 8 - %124 = load double* @yd, align 8 - %125 = load double* @lyd, align 8 - %126 = load float* @ret_sf, align 4 - %127 = load float* @lret_sf, align 4 + %122 = load double, double* @xd, align 8 + %123 = load double, double* @lxd, align 8 + %124 = load double, double* @yd, align 8 + %125 = load double, double* @lyd, align 8 + %126 = load float, float* @ret_sf, align 4 + %127 = load float, float* @lret_sf, align 4 %cmp114 = fcmp oeq float %126, %127 br i1 %cmp114, label %land.lhs.true116, label %land.end122 land.lhs.true116: ; preds = %land.end108 - %128 = load double* @xd, align 8 - %129 = load double* @lxd, align 8 + %128 = load double, double* @xd, align 8 + %129 = load double, double* @lxd, align 8 %cmp117 = fcmp oeq double %128, %129 br i1 %cmp117, label %land.rhs119, label %land.end122 land.rhs119: ; preds = %land.lhs.true116 - %130 = load double* @yd, align 8 - %131 = load double* @lyd, align 8 + %130 = load double, double* @yd, align 8 + %131 = load double, double* @lyd, align 8 %cmp120 = fcmp oeq double %130, %131 br label %land.end122 @@ -401,31 +401,31 @@ land.end122: ; preds = %land.rhs119, %land. store double 1.561234e+01, double* @ret_df, align 8 %call125 = call double @df_v() store double %call125, double* @lret_df, align 8 - %133 = load double* @ret_df, align 8 - %134 = load double* @lret_df, align 8 - %135 = load double* @ret_df, align 8 - %136 = load double* @lret_df, align 8 + %133 = load double, double* @ret_df, align 8 + %134 = load double, double* @lret_df, align 8 + %135 = load double, double* @ret_df, align 8 + %136 = load double, double* @lret_df, align 8 %cmp126 = fcmp oeq double %135, %136 %conv127 = zext i1 %cmp126 to i32 %call128 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i32 0, i32 0), double %133, double %134, i32 %conv127) call void @clear() store double 1.345873e+01, double* @ret_df, align 8 store float 3.434520e+05, float* @lx, align 4 - %137 = load float* @lx, align 4 + %137 = load float, float* @lx, align 4 %call129 = call double @df_sf(float %137) store double %call129, double* @lret_df, align 8 - %138 = load double* @ret_df, align 8 - %139 = load double* @lret_df, align 8 - %140 = load float* @x, align 4 + %138 = load double, double* @ret_df, align 8 + %139 = load double, double* @lret_df, align 8 + %140 = load float, float* @x, align 4 %conv130 = fpext float %140 to double - %141 = load float* @lx, align 4 + %141 = load float, float* @lx, align 4 %conv131 = fpext float %141 to double - %142 = load double* @ret_df, align 8 - %143 = load double* @lret_df, align 8 + %142 = load double, double* @ret_df, align 8 + %143 = load double, double* @lret_df, align 8 %cmp132 = fcmp oeq double %142, %143 %conv133 = zext i1 %cmp132 to i32 - %144 = load float* @x, align 4 - %145 = load float* @lx, align 4 + %144 = load float, float* @x, align 4 + %145 = load float, float* @lx, align 4 %cmp134 = fcmp oeq float %144, %145 %conv135 = zext i1 %cmp134 to i32 %and136 = and i32 %conv133, %conv135 @@ -433,19 +433,19 @@ land.end122: ; preds = %land.rhs119, %land. call void @clear() store double 0x4084F3AB7AA25D8D, double* @ret_df, align 8 store double 0x4114F671D2F1A9FC, double* @lxd, align 8 - %146 = load double* @lxd, align 8 + %146 = load double, double* @lxd, align 8 %call138 = call double @df_df(double %146) store double %call138, double* @lret_df, align 8 - %147 = load double* @ret_df, align 8 - %148 = load double* @lret_df, align 8 - %149 = load double* @xd, align 8 - %150 = load double* @lxd, align 8 - %151 = load double* @ret_df, align 8 - %152 = load double* @lret_df, align 8 + %147 = load double, double* @ret_df, align 8 + %148 = load double, double* @lret_df, align 8 + %149 = load double, double* @xd, align 8 + %150 = load double, double* @lxd, align 8 + %151 = load double, double* @ret_df, align 8 + %152 = load double, double* @lret_df, align 8 %cmp139 = fcmp oeq double %151, %152 %conv140 = zext i1 %cmp139 to i32 - %153 = load double* @xd, align 8 - %154 = load double* @lxd, align 8 + %153 = load double, double* @xd, align 8 + %154 = load double, double* @lxd, align 8 %cmp141 = fcmp oeq double %153, %154 %conv142 = zext i1 %cmp141 to i32 %and143 = and i32 %conv140, %conv142 @@ -454,34 +454,34 @@ land.end122: ; preds = %land.rhs119, %land. store double 6.781956e+03, double* @ret_df, align 8 store float 4.445500e+03, float* @lx, align 4 store float 0x4068ACCCC0000000, float* @ly, align 4 - %155 = load float* @lx, align 4 - %156 = load float* @ly, align 4 + %155 = load float, float* @lx, align 4 + %156 = load float, float* @ly, align 4 %call145 = call double @df_sf_sf(float %155, float %156) store double %call145, double* @lret_df, align 8 - %157 = load double* @ret_df, align 8 - %158 = load double* @lret_df, align 8 - %159 = load float* @x, align 4 + %157 = load double, double* @ret_df, align 8 + %158 = load double, double* @lret_df, align 8 + %159 = load float, float* @x, align 4 %conv146 = fpext float %159 to double - %160 = load float* @lx, align 4 + %160 = load float, float* @lx, align 4 %conv147 = fpext float %160 to double - %161 = load float* @y, align 4 + %161 = load float, float* @y, align 4 %conv148 = fpext float %161 to double - %162 = load float* @ly, align 4 + %162 = load float, float* @ly, align 4 %conv149 = fpext float %162 to double - %163 = load double* @ret_df, align 8 - %164 = load double* @lret_df, align 8 + %163 = load double, double* @ret_df, align 8 + %164 = load double, double* @lret_df, align 8 %cmp150 = fcmp oeq double %163, %164 br i1 %cmp150, label %land.lhs.true152, label %land.end158 land.lhs.true152: ; preds = %land.end122 - %165 = load float* @x, align 4 - %166 = load float* @lx, align 4 + %165 = load float, float* @x, align 4 + %166 = load float, float* @lx, align 4 %cmp153 = fcmp oeq float %165, %166 br i1 %cmp153, label %land.rhs155, label %land.end158 land.rhs155: ; preds = %land.lhs.true152 - %167 = load float* @y, align 4 - %168 = load float* @ly, align 4 + %167 = load float, float* @y, align 4 + %168 = load float, float* @ly, align 4 %cmp156 = fcmp oeq float %167, %168 br label %land.end158 @@ -493,32 +493,32 @@ land.end158: ; preds = %land.rhs155, %land. store double 1.889130e+05, double* @ret_df, align 8 store float 9.111450e+05, float* @lx, align 4 store double 0x4185320A58000000, double* @lyd, align 8 - %170 = load float* @lx, align 4 - %171 = load double* @lyd, align 8 + %170 = load float, float* @lx, align 4 + %171 = load double, double* @lyd, align 8 %call161 = call double @df_sf_df(float %170, double %171) store double %call161, double* @lret_df, align 8 - %172 = load double* @ret_df, align 8 - %173 = load double* @lret_df, align 8 - %174 = load float* @x, align 4 + %172 = load double, double* @ret_df, align 8 + %173 = load double, double* @lret_df, align 8 + %174 = load float, float* @x, align 4 %conv162 = fpext float %174 to double - %175 = load float* @lx, align 4 + %175 = load float, float* @lx, align 4 %conv163 = fpext float %175 to double - %176 = load double* @yd, align 8 - %177 = load double* @lyd, align 8 - %178 = load double* @ret_df, align 8 - %179 = load double* @lret_df, align 8 + %176 = load double, double* @yd, align 8 + %177 = load double, double* @lyd, align 8 + %178 = load double, double* @ret_df, align 8 + %179 = load double, double* @lret_df, align 8 %cmp164 = fcmp oeq double %178, %179 br i1 %cmp164, label %land.lhs.true166, label %land.end172 land.lhs.true166: ; preds = %land.end158 - %180 = load float* @x, align 4 - %181 = load float* @lx, align 4 + %180 = load float, float* @x, align 4 + %181 = load float, float* @lx, align 4 %cmp167 = fcmp oeq float %180, %181 br i1 %cmp167, label %land.rhs169, label %land.end172 land.rhs169: ; preds = %land.lhs.true166 - %182 = load double* @yd, align 8 - %183 = load double* @lyd, align 8 + %182 = load double, double* @yd, align 8 + %183 = load double, double* @lyd, align 8 %cmp170 = fcmp oeq double %182, %183 br label %land.end172 @@ -530,32 +530,32 @@ land.end172: ; preds = %land.rhs169, %land. store double 0x418B2DB900000000, double* @ret_df, align 8 store double 0x41B1EF2ED3000000, double* @lxd, align 8 store float 1.244562e+06, float* @ly, align 4 - %185 = load double* @lxd, align 8 - %186 = load float* @ly, align 4 + %185 = load double, double* @lxd, align 8 + %186 = load float, float* @ly, align 4 %call175 = call double @df_df_sf(double %185, float %186) store double %call175, double* @lret_df, align 8 - %187 = load double* @ret_df, align 8 - %188 = load double* @lret_df, align 8 - %189 = load double* @xd, align 8 - %190 = load double* @lxd, align 8 - %191 = load float* @y, align 4 + %187 = load double, double* @ret_df, align 8 + %188 = load double, double* @lret_df, align 8 + %189 = load double, double* @xd, align 8 + %190 = load double, double* @lxd, align 8 + %191 = load float, float* @y, align 4 %conv176 = fpext float %191 to double - %192 = load float* @ly, align 4 + %192 = load float, float* @ly, align 4 %conv177 = fpext float %192 to double - %193 = load double* @ret_df, align 8 - %194 = load double* @lret_df, align 8 + %193 = load double, double* @ret_df, align 8 + %194 = load double, double* @lret_df, align 8 %cmp178 = fcmp oeq double %193, %194 br i1 %cmp178, label %land.lhs.true180, label %land.end186 land.lhs.true180: ; preds = %land.end172 - %195 = load double* @xd, align 8 - %196 = load double* @lxd, align 8 + %195 = load double, double* @xd, align 8 + %196 = load double, double* @lxd, align 8 %cmp181 = fcmp oeq double %195, %196 br i1 %cmp181, label %land.rhs183, label %land.end186 land.rhs183: ; preds = %land.lhs.true180 - %197 = load float* @y, align 4 - %198 = load float* @ly, align 4 + %197 = load float, float* @y, align 4 + %198 = load float, float* @ly, align 4 %cmp184 = fcmp oeq float %197, %198 br label %land.end186 @@ -567,30 +567,30 @@ land.end186: ; preds = %land.rhs183, %land. store double 3.987721e+06, double* @ret_df, align 8 store double 5.223560e+00, double* @lxd, align 8 store double 0x40B7D37CC1A8AC5C, double* @lyd, align 8 - %200 = load double* @lxd, align 8 - %201 = load double* @lyd, align 8 + %200 = load double, double* @lxd, align 8 + %201 = load double, double* @lyd, align 8 %call189 = call double @df_df_df(double %200, double %201) store double %call189, double* @lret_df, align 8 - %202 = load double* @ret_df, align 8 - %203 = load double* @lret_df, align 8 - %204 = load double* @xd, align 8 - %205 = load double* @lxd, align 8 - %206 = load double* @yd, align 8 - %207 = load double* @lyd, align 8 - %208 = load double* @ret_df, align 8 - %209 = load double* @lret_df, align 8 + %202 = load double, double* @ret_df, align 8 + %203 = load double, double* @lret_df, align 8 + %204 = load double, double* @xd, align 8 + %205 = load double, double* @lxd, align 8 + %206 = load double, double* @yd, align 8 + %207 = load double, double* @lyd, align 8 + %208 = load double, double* @ret_df, align 8 + %209 = load double, double* @lret_df, align 8 %cmp190 = fcmp oeq double %208, %209 br i1 %cmp190, label %land.lhs.true192, label %land.end198 land.lhs.true192: ; preds = %land.end186 - %210 = load double* @xd, align 8 - %211 = load double* @lxd, align 8 + %210 = load double, double* @xd, align 8 + %211 = load double, double* @lxd, align 8 %cmp193 = fcmp oeq double %210, %211 br i1 %cmp193, label %land.rhs195, label %land.end198 land.rhs195: ; preds = %land.lhs.true192 - %212 = load double* @yd, align 8 - %213 = load double* @lyd, align 8 + %212 = load double, double* @yd, align 8 + %213 = load double, double* @lyd, align 8 %cmp196 = fcmp oeq double %212, %213 br label %land.end198 @@ -606,26 +606,26 @@ land.end198: ; preds = %land.rhs195, %land. %216 = extractvalue { float, float } %call201, 1 store float %215, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) store float %216, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) - %ret_sc.real = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + %ret_sc.real = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) %conv202 = fpext float %ret_sc.real to double %conv203 = fpext float %ret_sc.imag to double - %ret_sc.real204 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag205 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + %ret_sc.real204 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag205 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) %conv206 = fpext float %ret_sc.real204 to double %conv207 = fpext float %ret_sc.imag205 to double - %lret_sc.real = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %lret_sc.real = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) %conv208 = fpext float %lret_sc.real to double %conv209 = fpext float %lret_sc.imag to double - %lret_sc.real210 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag211 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %lret_sc.real210 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag211 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) %conv212 = fpext float %lret_sc.real210 to double %conv213 = fpext float %lret_sc.imag211 to double - %ret_sc.real214 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag215 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) - %lret_sc.real216 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag217 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %ret_sc.real214 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag215 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + %lret_sc.real216 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag217 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) %cmp.r = fcmp oeq float %ret_sc.real214, %lret_sc.real216 %cmp.i = fcmp oeq float %ret_sc.imag215, %lret_sc.imag217 %and.ri = and i1 %cmp.r, %cmp.i @@ -635,44 +635,44 @@ land.end198: ; preds = %land.rhs195, %land. store float 0x3FF7A99300000000, float* @lx, align 4 store float 4.500000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) store float 7.000000e+00, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) - %217 = load float* @lx, align 4 + %217 = load float, float* @lx, align 4 %call220 = call { float, float } @sc_sf(float %217) %218 = extractvalue { float, float } %call220, 0 %219 = extractvalue { float, float } %call220, 1 store float %218, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) store float %219, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) - %ret_sc.real221 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag222 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + %ret_sc.real221 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag222 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) %conv223 = fpext float %ret_sc.real221 to double %conv224 = fpext float %ret_sc.imag222 to double - %ret_sc.real225 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag226 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + %ret_sc.real225 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag226 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) %conv227 = fpext float %ret_sc.real225 to double %conv228 = fpext float %ret_sc.imag226 to double - %lret_sc.real229 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag230 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %lret_sc.real229 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag230 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) %conv231 = fpext float %lret_sc.real229 to double %conv232 = fpext float %lret_sc.imag230 to double - %lret_sc.real233 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag234 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %lret_sc.real233 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag234 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) %conv235 = fpext float %lret_sc.real233 to double %conv236 = fpext float %lret_sc.imag234 to double - %220 = load float* @x, align 4 + %220 = load float, float* @x, align 4 %conv237 = fpext float %220 to double - %221 = load float* @lx, align 4 + %221 = load float, float* @lx, align 4 %conv238 = fpext float %221 to double - %ret_sc.real239 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) - %ret_sc.imag240 = load float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) - %lret_sc.real241 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) - %lret_sc.imag242 = load float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) + %ret_sc.real239 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 0) + %ret_sc.imag240 = load float, float* getelementptr inbounds ({ float, float }* @ret_sc, i32 0, i32 1) + %lret_sc.real241 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 0) + %lret_sc.imag242 = load float, float* getelementptr inbounds ({ float, float }* @lret_sc, i32 0, i32 1) %cmp.r243 = fcmp oeq float %ret_sc.real239, %lret_sc.real241 %cmp.i244 = fcmp oeq float %ret_sc.imag240, %lret_sc.imag242 %and.ri245 = and i1 %cmp.r243, %cmp.i244 br i1 %and.ri245, label %land.rhs247, label %land.end250 land.rhs247: ; preds = %land.end198 - %222 = load float* @x, align 4 - %223 = load float* @lx, align 4 + %222 = load float, float* @x, align 4 + %223 = load float, float* @lx, align 4 %cmp248 = fcmp oeq float %222, %223 br label %land.end250 @@ -688,18 +688,18 @@ land.end250: ; preds = %land.rhs247, %land. %226 = extractvalue { double, double } %call253, 1 store double %225, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) store double %226, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %ret_dc.real = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %ret_dc.real254 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag255 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %lret_dc.real = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %lret_dc.real256 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag257 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %ret_dc.real258 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag259 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %lret_dc.real260 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag261 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + %ret_dc.real = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) + %ret_dc.real254 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag255 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) + %lret_dc.real = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + %lret_dc.real256 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag257 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + %ret_dc.real258 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag259 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) + %lret_dc.real260 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag261 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) %cmp.r262 = fcmp oeq double %ret_dc.real258, %lret_dc.real260 %cmp.i263 = fcmp oeq double %ret_dc.imag259, %lret_dc.imag261 %and.ri264 = and i1 %cmp.r262, %cmp.i263 @@ -709,36 +709,36 @@ land.end250: ; preds = %land.rhs247, %land. store double 0x40AAF6F532617C1C, double* @lxd, align 8 store double 4.444500e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) store double 7.888000e+03, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %227 = load float* @lx, align 4 + %227 = load float, float* @lx, align 4 %call267 = call { double, double } @dc_sf(float %227) %228 = extractvalue { double, double } %call267, 0 %229 = extractvalue { double, double } %call267, 1 store double %228, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) store double %229, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %ret_dc.real268 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag269 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %ret_dc.real270 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag271 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %lret_dc.real272 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag273 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %lret_dc.real274 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag275 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) - %230 = load float* @x, align 4 + %ret_dc.real268 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag269 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) + %ret_dc.real270 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag271 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) + %lret_dc.real272 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag273 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + %lret_dc.real274 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag275 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + %230 = load float, float* @x, align 4 %conv276 = fpext float %230 to double - %231 = load float* @lx, align 4 + %231 = load float, float* @lx, align 4 %conv277 = fpext float %231 to double - %ret_dc.real278 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) - %ret_dc.imag279 = load double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) - %lret_dc.real280 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) - %lret_dc.imag281 = load double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) + %ret_dc.real278 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 0) + %ret_dc.imag279 = load double, double* getelementptr inbounds ({ double, double }* @ret_dc, i32 0, i32 1) + %lret_dc.real280 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 0) + %lret_dc.imag281 = load double, double* getelementptr inbounds ({ double, double }* @lret_dc, i32 0, i32 1) %cmp.r282 = fcmp oeq double %ret_dc.real278, %lret_dc.real280 %cmp.i283 = fcmp oeq double %ret_dc.imag279, %lret_dc.imag281 %and.ri284 = and i1 %cmp.r282, %cmp.i283 br i1 %and.ri284, label %land.rhs286, label %land.end289 land.rhs286: ; preds = %land.end250 - %232 = load float* @x, align 4 - %233 = load float* @lx, align 4 + %232 = load float, float* @x, align 4 + %233 = load float, float* @lx, align 4 %cmp287 = fcmp oeq float %232, %233 br label %land.end289 @@ -746,7 +746,7 @@ land.end289: ; preds = %land.rhs286, %land. %234 = phi i1 [ false, %land.end250 ], [ %cmp287, %land.rhs286 ] %land.ext290 = zext i1 %234 to i32 %call291 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8]* @.str4, i32 0, i32 0), double %ret_dc.real268, double %ret_dc.imag271, double %lret_dc.real272, double %lret_dc.imag275, double %conv276, double %conv277, i32 %land.ext290) - %235 = load i32* %retval + %235 = load i32, i32* %retval ret i32 %235 } diff --git a/llvm/test/CodeGen/Mips/hf16call32_body.ll b/llvm/test/CodeGen/Mips/hf16call32_body.ll index adac31460c4..d06256cc564 100644 --- a/llvm/test/CodeGen/Mips/hf16call32_body.ll +++ b/llvm/test/CodeGen/Mips/hf16call32_body.ll @@ -14,7 +14,7 @@ define void @v_sf(float %p) #0 { entry: %p.addr = alloca float, align 4 store float %p, float* %p.addr, align 4 - %0 = load float* %p.addr, align 4 + %0 = load float, float* %p.addr, align 4 store float %0, float* @x, align 4 ret void } @@ -33,7 +33,7 @@ define void @v_df(double %p) #0 { entry: %p.addr = alloca double, align 8 store double %p, double* %p.addr, align 8 - %0 = load double* %p.addr, align 8 + %0 = load double, double* %p.addr, align 8 store double %0, double* @xd, align 8 ret void } @@ -54,9 +54,9 @@ entry: %p2.addr = alloca float, align 4 store float %p1, float* %p1.addr, align 4 store float %p2, float* %p2.addr, align 4 - %0 = load float* %p1.addr, align 4 + %0 = load float, float* %p1.addr, align 4 store float %0, float* @x, align 4 - %1 = load float* %p2.addr, align 4 + %1 = load float, float* %p2.addr, align 4 store float %1, float* @y, align 4 ret void } @@ -77,9 +77,9 @@ entry: %p2.addr = alloca double, align 8 store float %p1, float* %p1.addr, align 4 store double %p2, double* %p2.addr, align 8 - %0 = load float* %p1.addr, align 4 + %0 = load float, float* %p1.addr, align 4 store float %0, float* @x, align 4 - %1 = load double* %p2.addr, align 8 + %1 = load double, double* %p2.addr, align 8 store double %1, double* @yd, align 8 ret void } @@ -101,9 +101,9 @@ entry: %p2.addr = alloca float, align 4 store double %p1, double* %p1.addr, align 8 store float %p2, float* %p2.addr, align 4 - %0 = load double* %p1.addr, align 8 + %0 = load double, double* %p1.addr, align 8 store double %0, double* @xd, align 8 - %1 = load float* %p2.addr, align 4 + %1 = load float, float* %p2.addr, align 4 store float %1, float* @y, align 4 ret void } @@ -125,9 +125,9 @@ entry: %p2.addr = alloca double, align 8 store double %p1, double* %p1.addr, align 8 store double %p2, double* %p2.addr, align 8 - %0 = load double* %p1.addr, align 8 + %0 = load double, double* %p1.addr, align 8 store double %0, double* @xd, align 8 - %1 = load double* %p2.addr, align 8 + %1 = load double, double* %p2.addr, align 8 store double %1, double* @yd, align 8 ret void } @@ -146,7 +146,7 @@ entry: ; Function Attrs: nounwind define float @sf_v() #0 { entry: - %0 = load float* @ret_sf, align 4 + %0 = load float, float* @ret_sf, align 4 ret float %0 } @@ -155,9 +155,9 @@ define float @sf_sf(float %p) #0 { entry: %p.addr = alloca float, align 4 store float %p, float* %p.addr, align 4 - %0 = load float* %p.addr, align 4 + %0 = load float, float* %p.addr, align 4 store float %0, float* @x, align 4 - %1 = load float* @ret_sf, align 4 + %1 = load float, float* @ret_sf, align 4 ret float %1 } @@ -176,9 +176,9 @@ define float @sf_df(double %p) #0 { entry: %p.addr = alloca double, align 8 store double %p, double* %p.addr, align 8 - %0 = load double* %p.addr, align 8 + %0 = load double, double* %p.addr, align 8 store double %0, double* @xd, align 8 - %1 = load float* @ret_sf, align 4 + %1 = load float, float* @ret_sf, align 4 ret float %1 } @@ -198,11 +198,11 @@ entry: %p2.addr = alloca float, align 4 store float %p1, float* %p1.addr, align 4 store float %p2, float* %p2.addr, align 4 - %0 = load float* %p1.addr, align 4 + %0 = load float, float* %p1.addr, align 4 store float %0, float* @x, align 4 - %1 = load float* %p2.addr, align 4 + %1 = load float, float* %p2.addr, align 4 store float %1, float* @y, align 4 - %2 = load float* @ret_sf, align 4 + %2 = load float, float* @ret_sf, align 4 ret float %2 } @@ -222,11 +222,11 @@ entry: %p2.addr = alloca double, align 8 store float %p1, float* %p1.addr, align 4 store double %p2, double* %p2.addr, align 8 - %0 = load float* %p1.addr, align 4 + %0 = load float, float* %p1.addr, align 4 store float %0, float* @x, align 4 - %1 = load double* %p2.addr, align 8 + %1 = load double, double* %p2.addr, align 8 store double %1, double* @yd, align 8 - %2 = load float* @ret_sf, align 4 + %2 = load float, float* @ret_sf, align 4 ret float %2 } @@ -247,11 +247,11 @@ entry: %p2.addr = alloca float, align 4 store double %p1, double* %p1.addr, align 8 store float %p2, float* %p2.addr, align 4 - %0 = load double* %p1.addr, align 8 + %0 = load double, double* %p1.addr, align 8 store double %0, double* @xd, align 8 - %1 = load float* %p2.addr, align 4 + %1 = load float, float* %p2.addr, align 4 store float %1, float* @y, align 4 - %2 = load float* @ret_sf, align 4 + %2 = load float, float* @ret_sf, align 4 ret float %2 } @@ -272,11 +272,11 @@ entry: %p2.addr = alloca double, align 8 store double %p1, double* %p1.addr, align 8 store double %p2, double* %p2.addr, align 8 - %0 = load double* %p1.addr, align 8 + %0 = load double, double* %p1.addr, align 8 store double %0, double* @xd, align 8 - %1 = load double* %p2.addr, align 8 + %1 = load double, double* %p2.addr, align 8 store double %1, double* @yd, align 8 - %2 = load float* @ret_sf, align 4 + %2 = load float, float* @ret_sf, align 4 ret float %2 } diff --git a/llvm/test/CodeGen/Mips/hf1_body.ll b/llvm/test/CodeGen/Mips/hf1_body.ll index 5acfe86373d..71a1b960c5b 100644 --- a/llvm/test/CodeGen/Mips/hf1_body.ll +++ b/llvm/test/CodeGen/Mips/hf1_body.ll @@ -7,7 +7,7 @@ define void @v_sf(float %p) #0 { entry: %p.addr = alloca float, align 4 store float %p, float* %p.addr, align 4 - %0 = load float* %p.addr, align 4 + %0 = load float, float* %p.addr, align 4 store float %0, float* @x, align 4 ret void } diff --git a/llvm/test/CodeGen/Mips/hfptrcall.ll b/llvm/test/CodeGen/Mips/hfptrcall.ll index fd0e3593073..1df58a3add9 100644 --- a/llvm/test/CodeGen/Mips/hfptrcall.ll +++ b/llvm/test/CodeGen/Mips/hfptrcall.ll @@ -38,7 +38,7 @@ entry: %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1 store float 5.000000e+00, float* %real store float 9.900000e+01, float* %imag - %0 = load { float, float }* %retval + %0 = load { float, float }, { float, float }* %retval ret { float, float } %0 } @@ -54,7 +54,7 @@ entry: %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1 store double 0x416BC8B0A0000000, double* %real store double 0x41CDCCB763800000, double* %imag - %0 = load { double, double }* %retval + %0 = load { double, double }, { double, double }* %retval ret { double, double } %0 } @@ -65,42 +65,42 @@ entry: ; Function Attrs: nounwind define i32 @main() #0 { entry: - %0 = load float ()** @ptrsv, align 4 + %0 = load float ()*, float ()** @ptrsv, align 4 %call = call float %0() store float %call, float* @x, align 4 - %1 = load float* @x, align 4 + %1 = load float, float* @x, align 4 %conv = fpext float %1 to double %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), double %conv) - %2 = load double ()** @ptrdv, align 4 + %2 = load double ()*, double ()** @ptrdv, align 4 %call2 = call double %2() store double %call2, double* @xd, align 8 - %3 = load double* @xd, align 8 + %3 = load double, double* @xd, align 8 %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), double %3) - %4 = load { float, float } ()** @ptrscv, align 4 + %4 = load { float, float } ()*, { float, float } ()** @ptrscv, align 4 %call4 = call { float, float } %4() %5 = extractvalue { float, float } %call4, 0 %6 = extractvalue { float, float } %call4, 1 store float %5, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) store float %6, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) - %xy.real = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) - %xy.imag = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) + %xy.real = load float, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) + %xy.imag = load float, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) %conv5 = fpext float %xy.real to double %conv6 = fpext float %xy.imag to double - %xy.real7 = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) - %xy.imag8 = load float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) + %xy.real7 = load float, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 0) + %xy.imag8 = load float, float* getelementptr inbounds ({ float, float }* @xy, i32 0, i32 1) %conv9 = fpext float %xy.real7 to double %conv10 = fpext float %xy.imag8 to double %call11 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str1, i32 0, i32 0), double %conv5, double %conv10) - %7 = load { double, double } ()** @ptrdcv, align 4 + %7 = load { double, double } ()*, { double, double } ()** @ptrdcv, align 4 %call12 = call { double, double } %7() %8 = extractvalue { double, double } %call12, 0 %9 = extractvalue { double, double } %call12, 1 store double %8, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) store double %9, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) - %xyd.real = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) - %xyd.imag = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) - %xyd.real13 = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) - %xyd.imag14 = load double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) + %xyd.real = load double, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) + %xyd.imag = load double, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) + %xyd.real13 = load double, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 0) + %xyd.imag14 = load double, double* getelementptr inbounds ({ double, double }* @xyd, i32 0, i32 1) %call15 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str1, i32 0, i32 0), double %xyd.real, double %xyd.imag14) ret i32 0 } diff --git a/llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll b/llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll index e4a6d1e26c6..88ceed4114c 100644 --- a/llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll +++ b/llvm/test/CodeGen/Mips/inlineasm-assembler-directives.ll @@ -16,7 +16,7 @@ entry: %a = alloca i32, align 4 %b = alloca i32, align 4 store i32 20, i32* %a, align 4 - %0 = load i32* %a, align 4 + %0 = load i32, i32* %a, align 4 %1 = call i32 asm sideeffect "addi $$9, $1, 8\0A\09subi $0, $$9, 6", "=r,r,~{$1}"(i32 %0) store i32 %1, i32* %b, align 4 ret void diff --git a/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll b/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll index 3d9dec76fb3..fd726fc689c 100644 --- a/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll +++ b/llvm/test/CodeGen/Mips/inlineasm-operand-code.ll @@ -125,7 +125,7 @@ entry: ;CHECK_BIG_32: #APP ;CHECK_BIG_32: or ${{[0-9]+}},$[[SECOND]],${{[0-9]+}} ;CHECK_BIG_32: #NO_APP - %bosco = load i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 + %bosco = load i64, i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 %trunc1 = trunc i64 %bosco to i32 tail call i32 asm sideeffect "or $0,${1:D},$2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind ret i32 0 @@ -149,7 +149,7 @@ entry: ;CHECK_BIG_32: #APP ;CHECK_BIG_32: or ${{[0-9]+}},$[[SECOND]],${{[0-9]+}} ;CHECK_BIG_32: #NO_APP - %bosco = load i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 + %bosco = load i64, i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 %trunc1 = trunc i64 %bosco to i32 tail call i32 asm sideeffect "or $0,${1:L},$2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind ret i32 0 @@ -173,7 +173,7 @@ entry: ;CHECK_BIG_32: #APP ;CHECK_BIG_32: or ${{[0-9]+}},$[[FIRST]],${{[0-9]+}} ;CHECK_BIG_32: #NO_APP - %bosco = load i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 + %bosco = load i64, i64* getelementptr inbounds (%union.u_tag* @uval, i32 0, i32 0), align 8 %trunc1 = trunc i64 %bosco to i32 tail call i32 asm sideeffect "or $0,${1:M},$2", "=r,r,r"(i64 %bosco, i32 %trunc1) nounwind ret i32 0 diff --git a/llvm/test/CodeGen/Mips/inlineasm64.ll b/llvm/test/CodeGen/Mips/inlineasm64.ll index a8e949b50c2..82abdf82a3e 100644 --- a/llvm/test/CodeGen/Mips/inlineasm64.ll +++ b/llvm/test/CodeGen/Mips/inlineasm64.ll @@ -8,8 +8,8 @@ define void @foo1() nounwind { entry: ; CHECK: foo1 ; CHECK: daddu - %0 = load i64* @gl1, align 8 - %1 = load i64* @gl0, align 8 + %0 = load i64, i64* @gl1, align 8 + %1 = load i64, i64* @gl0, align 8 %2 = tail call i64 asm "daddu $0, $1, $2", "=r,r,r"(i64 %0, i64 %1) nounwind store i64 %2, i64* @gl2, align 8 ret void diff --git a/llvm/test/CodeGen/Mips/internalfunc.ll b/llvm/test/CodeGen/Mips/internalfunc.ll index 863375ad4d4..0320e28a448 100644 --- a/llvm/test/CodeGen/Mips/internalfunc.ll +++ b/llvm/test/CodeGen/Mips/internalfunc.ll @@ -20,7 +20,7 @@ entry: br i1 %tobool, label %if.end, label %if.then if.then: ; preds = %entry - %tmp1 = load void (...)** @caller.sf1, align 4 + %tmp1 = load void (...)*, void (...)** @caller.sf1, align 4 tail call void (...)* %tmp1() nounwind br label %if.end @@ -30,7 +30,7 @@ if.end: ; preds = %entry, %if.then ; CHECK: lw $[[R3:[0-9]+]], %got(caller.sf1) ; CHECK: sw ${{[0-9]+}}, %lo(caller.sf1)($[[R3]]) %tobool3 = icmp ne i32 %a0, 0 - %tmp4 = load void (...)** @gf1, align 4 + %tmp4 = load void (...)*, void (...)** @gf1, align 4 %cond = select i1 %tobool3, void (...)* %tmp4, void (...)* bitcast (void ()* @sf2 to void (...)*) store void (...)* %cond, void (...)** @caller.sf1, align 4 ret void diff --git a/llvm/test/CodeGen/Mips/jtstat.ll b/llvm/test/CodeGen/Mips/jtstat.ll index 01afc080c2e..35f71cf2dc8 100644 --- a/llvm/test/CodeGen/Mips/jtstat.ll +++ b/llvm/test/CodeGen/Mips/jtstat.ll @@ -8,7 +8,7 @@ define void @test(i32 %i) nounwind { entry: %i.addr = alloca i32, align 4 store i32 %i, i32* %i.addr, align 4 - %0 = load i32* %i.addr, align 4 + %0 = load i32, i32* %i.addr, align 4 switch i32 %0, label %sw.epilog [ i32 115, label %sw.bb i32 105, label %sw.bb1 diff --git a/llvm/test/CodeGen/Mips/l3mc.ll b/llvm/test/CodeGen/Mips/l3mc.ll index 3bfb389ba05..6aeed04f779 100644 --- a/llvm/test/CodeGen/Mips/l3mc.ll +++ b/llvm/test/CodeGen/Mips/l3mc.ll @@ -42,28 +42,28 @@ ; Function Attrs: nounwind define void @_Z3foov() #0 { entry: - %0 = load double* @d1, align 8 + %0 = load double, double* @d1, align 8 %conv = fptosi double %0 to i64 store i64 %conv, i64* @ll1, align 8 - %1 = load double* @d2, align 8 + %1 = load double, double* @d2, align 8 %conv1 = fptoui double %1 to i64 store i64 %conv1, i64* @ull1, align 8 - %2 = load float* @f1, align 4 + %2 = load float, float* @f1, align 4 %conv2 = fptosi float %2 to i64 store i64 %conv2, i64* @ll2, align 8 - %3 = load float* @f2, align 4 + %3 = load float, float* @f2, align 4 %conv3 = fptoui float %3 to i64 store i64 %conv3, i64* @ull2, align 8 - %4 = load double* @d3, align 8 + %4 = load double, double* @d3, align 8 %conv4 = fptosi double %4 to i32 store i32 %conv4, i32* @l1, align 4 - %5 = load double* @d4, align 8 + %5 = load double, double* @d4, align 8 %conv5 = fptoui double %5 to i32 store i32 %conv5, i32* @ul1, align 4 - %6 = load float* @f3, align 4 + %6 = load float, float* @f3, align 4 %conv6 = fptosi float %6 to i32 store i32 %conv6, i32* @l2, align 4 - %7 = load float* @f4, align 4 + %7 = load float, float* @f4, align 4 %conv7 = fptoui float %7 to i32 store i32 %conv7, i32* @ul2, align 4 ret void @@ -72,28 +72,28 @@ entry: ; Function Attrs: nounwind define void @_Z3goov() #0 { entry: - %0 = load i64* @ll1, align 8 + %0 = load i64, i64* @ll1, align 8 %conv = sitofp i64 %0 to double store double %conv, double* @d1, align 8 - %1 = load i64* @ull1, align 8 + %1 = load i64, i64* @ull1, align 8 %conv1 = uitofp i64 %1 to double store double %conv1, double* @d2, align 8 - %2 = load i64* @ll2, align 8 + %2 = load i64, i64* @ll2, align 8 %conv2 = sitofp i64 %2 to float store float %conv2, float* @f1, align 4 - %3 = load i64* @ull2, align 8 + %3 = load i64, i64* @ull2, align 8 %conv3 = uitofp i64 %3 to float store float %conv3, float* @f2, align 4 - %4 = load i32* @l1, align 4 + %4 = load i32, i32* @l1, align 4 %conv4 = sitofp i32 %4 to double store double %conv4, double* @d3, align 8 - %5 = load i32* @ul1, align 4 + %5 = load i32, i32* @ul1, align 4 %conv5 = uitofp i32 %5 to double store double %conv5, double* @d4, align 8 - %6 = load i32* @l2, align 4 + %6 = load i32, i32* @l2, align 4 %conv6 = sitofp i32 %6 to float store float %conv6, float* @f3, align 4 - %7 = load i32* @ul2, align 4 + %7 = load i32, i32* @ul2, align 4 %conv7 = uitofp i32 %7 to float store float %conv7, float* @f4, align 4 ret void diff --git a/llvm/test/CodeGen/Mips/lb1.ll b/llvm/test/CodeGen/Mips/lb1.ll index aac2767a4e4..2db28a6a324 100644 --- a/llvm/test/CodeGen/Mips/lb1.ll +++ b/llvm/test/CodeGen/Mips/lb1.ll @@ -6,11 +6,11 @@ define i32 @main() nounwind { entry: %i = alloca i32, align 4 - %0 = load i8* @c, align 1 + %0 = load i8, i8* @c, align 1 ; 16: lb ${{[0-9]+}}, 0(${{[0-9]+}}) %conv = sext i8 %0 to i32 store i32 %conv, i32* %i, align 4 - %1 = load i32* %i, align 4 + %1 = load i32, i32* %i, align 4 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/llvm/test/CodeGen/Mips/lbu1.ll b/llvm/test/CodeGen/Mips/lbu1.ll index 63e0cca1684..369babf0573 100644 --- a/llvm/test/CodeGen/Mips/lbu1.ll +++ b/llvm/test/CodeGen/Mips/lbu1.ll @@ -6,11 +6,11 @@ define i32 @main() nounwind { entry: %i = alloca i32, align 4 - %0 = load i8* @c, align 1 + %0 = load i8, i8* @c, align 1 %conv = zext i8 %0 to i32 ; 16: lbu ${{[0-9]+}}, 0(${{[0-9]+}}) store i32 %conv, i32* %i, align 4 - %1 = load i8* @c, align 1 + %1 = load i8, i8* @c, align 1 %conv1 = zext i8 %1 to i32 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %conv1) ret i32 0 diff --git a/llvm/test/CodeGen/Mips/lcb2.ll b/llvm/test/CodeGen/Mips/lcb2.ll index 59b96e64e95..716a6bbce8f 100644 --- a/llvm/test/CodeGen/Mips/lcb2.ll +++ b/llvm/test/CodeGen/Mips/lcb2.ll @@ -9,7 +9,7 @@ ; Function Attrs: nounwind optsize define i32 @bnez() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.end @@ -31,7 +31,7 @@ if.end: ; preds = %if.then, %entry ; Function Attrs: nounwind optsize define i32 @beqz() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -60,8 +60,8 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @bteqz() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %if.else @@ -90,15 +90,15 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @btz() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp1 = icmp sgt i32 %0, %1 br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %entry, %if.then tail call void asm sideeffect ".space 60000", ""() #1, !srcloc !10 - %2 = load i32* @i, align 4, !tbaa !1 - %3 = load i32* @j, align 4, !tbaa !1 + %2 = load i32, i32* @i, align 4, !tbaa !1 + %3 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp sgt i32 %2, %3 br i1 %cmp, label %if.then, label %if.end diff --git a/llvm/test/CodeGen/Mips/lcb3c.ll b/llvm/test/CodeGen/Mips/lcb3c.ll index eb832914542..d6e259c7260 100644 --- a/llvm/test/CodeGen/Mips/lcb3c.ll +++ b/llvm/test/CodeGen/Mips/lcb3c.ll @@ -7,7 +7,7 @@ ; Function Attrs: nounwind define i32 @s() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -30,7 +30,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind define i32 @b() #0 { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/Mips/lcb4a.ll b/llvm/test/CodeGen/Mips/lcb4a.ll index fbcadd2552f..0285ae17e1d 100644 --- a/llvm/test/CodeGen/Mips/lcb4a.ll +++ b/llvm/test/CodeGen/Mips/lcb4a.ll @@ -7,7 +7,7 @@ ; Function Attrs: nounwind optsize define i32 @foo() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -32,7 +32,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define i32 @goo() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/Mips/lcb5.ll b/llvm/test/CodeGen/Mips/lcb5.ll index b2a8d1d33ef..172ecb3d4da 100644 --- a/llvm/test/CodeGen/Mips/lcb5.ll +++ b/llvm/test/CodeGen/Mips/lcb5.ll @@ -7,7 +7,7 @@ ; Function Attrs: nounwind optsize define i32 @x0() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -33,7 +33,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define i32 @x1() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -61,7 +61,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define i32 @y0() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -86,7 +86,7 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define i32 @y1() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.else @@ -114,8 +114,8 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @z0() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %if.else @@ -140,8 +140,8 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @z1() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp eq i32 %0, %1 br i1 %cmp, label %if.then, label %if.else @@ -169,15 +169,15 @@ if.end: ; preds = %if.else, %if.then ; Function Attrs: nounwind optsize define void @z3() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp1 = icmp sgt i32 %0, %1 br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %entry, %if.then tail call void asm sideeffect ".space 10000", ""() #1, !srcloc !17 - %2 = load i32* @i, align 4, !tbaa !1 - %3 = load i32* @j, align 4, !tbaa !1 + %2 = load i32, i32* @i, align 4, !tbaa !1 + %3 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp sgt i32 %2, %3 br i1 %cmp, label %if.then, label %if.end @@ -192,15 +192,15 @@ if.end: ; preds = %if.then, %entry ; Function Attrs: nounwind optsize define void @z4() #0 { entry: - %0 = load i32* @i, align 4, !tbaa !1 - %1 = load i32* @j, align 4, !tbaa !1 + %0 = load i32, i32* @i, align 4, !tbaa !1 + %1 = load i32, i32* @j, align 4, !tbaa !1 %cmp1 = icmp sgt i32 %0, %1 br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %entry, %if.then tail call void asm sideeffect ".space 10000000", ""() #1, !srcloc !18 - %2 = load i32* @i, align 4, !tbaa !1 - %3 = load i32* @j, align 4, !tbaa !1 + %2 = load i32, i32* @i, align 4, !tbaa !1 + %3 = load i32, i32* @j, align 4, !tbaa !1 %cmp = icmp sgt i32 %2, %3 br i1 %cmp, label %if.then, label %if.end diff --git a/llvm/test/CodeGen/Mips/lh1.ll b/llvm/test/CodeGen/Mips/lh1.ll index 1f95b090346..4e2fb985510 100644 --- a/llvm/test/CodeGen/Mips/lh1.ll +++ b/llvm/test/CodeGen/Mips/lh1.ll @@ -6,11 +6,11 @@ define i32 @main() nounwind { entry: %i = alloca i32, align 4 - %0 = load i16* @s, align 2 + %0 = load i16, i16* @s, align 2 %conv = sext i16 %0 to i32 ; 16: lh ${{[0-9]+}}, 0(${{[0-9]+}}) store i32 %conv, i32* %i, align 4 - %1 = load i32* %i, align 4 + %1 = load i32, i32* %i, align 4 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/llvm/test/CodeGen/Mips/lhu1.ll b/llvm/test/CodeGen/Mips/lhu1.ll index 0cfcede669e..bd6d0c0d3c2 100644 --- a/llvm/test/CodeGen/Mips/lhu1.ll +++ b/llvm/test/CodeGen/Mips/lhu1.ll @@ -7,11 +7,11 @@ define i32 @main() nounwind { entry: %i = alloca i32, align 4 - %0 = load i16* @s, align 2 + %0 = load i16, i16* @s, align 2 %conv = zext i16 %0 to i32 ; 16: lhu ${{[0-9]+}}, 0(${{[0-9]+}}) store i32 %conv, i32* %i, align 4 - %1 = load i32* %i, align 4 + %1 = load i32, i32* %i, align 4 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/llvm/test/CodeGen/Mips/llcarry.ll b/llvm/test/CodeGen/Mips/llcarry.ll index 7763daec3b3..f4120ecec17 100644 --- a/llvm/test/CodeGen/Mips/llcarry.ll +++ b/llvm/test/CodeGen/Mips/llcarry.ll @@ -9,8 +9,8 @@ define void @test1() nounwind { entry: - %0 = load i64* @i, align 8 - %1 = load i64* @j, align 8 + %0 = load i64, i64* @i, align 8 + %1 = load i64, i64* @j, align 8 %add = add nsw i64 %1, %0 store i64 %add, i64* @k, align 8 ; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} @@ -23,8 +23,8 @@ entry: define void @test2() nounwind { entry: - %0 = load i64* @i, align 8 - %1 = load i64* @j, align 8 + %0 = load i64, i64* @i, align 8 + %1 = load i64, i64* @j, align 8 %sub = sub nsw i64 %0, %1 ; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}} ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} @@ -37,7 +37,7 @@ entry: define void @test3() nounwind { entry: - %0 = load i64* @ii, align 8 + %0 = load i64, i64* @ii, align 8 %add = add nsw i64 %0, 15 ; 16: addiu ${{[0-9]+}}, 15 ; 16: sltu ${{[0-9]+}}, ${{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/load-store-left-right.ll b/llvm/test/CodeGen/Mips/load-store-left-right.ll index b8e6e8338d9..ade0d984ebe 100644 --- a/llvm/test/CodeGen/Mips/load-store-left-right.ll +++ b/llvm/test/CodeGen/Mips/load-store-left-right.ll @@ -43,7 +43,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)( ; MIPS64R6: lw $2, 0($[[PTR]]) - %0 = load i32* getelementptr inbounds (%struct.SI* @si, i32 0, i32 0), align 1 + %0 = load i32, i32* getelementptr inbounds (%struct.SI* @si, i32 0, i32 0), align 1 ret i32 %0 } @@ -100,7 +100,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sll)( ; MIPS64R6: ld $2, 0($[[PTR]]) - %0 = load i64* getelementptr inbounds (%struct.SLL* @sll, i64 0, i32 0), align 1 + %0 = load i64, i64* getelementptr inbounds (%struct.SLL* @sll, i64 0, i32 0), align 1 ret i64 %0 } @@ -129,7 +129,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(si)( ; MIPS64R6: lw $2, 0($[[PTR]]) - %0 = load i32* getelementptr inbounds (%struct.SI* @si, i64 0, i32 0), align 1 + %0 = load i32, i32* getelementptr inbounds (%struct.SI* @si, i64 0, i32 0), align 1 %conv = sext i32 %0 to i64 ret i64 %conv } @@ -165,7 +165,7 @@ entry: ; MIPS64R6: ld $[[PTR:[0-9]+]], %got_disp(sui)( ; MIPS64R6: lwu $2, 0($[[PTR]]) - %0 = load i32* getelementptr inbounds (%struct.SUI* @sui, i64 0, i32 0), align 1 + %0 = load i32, i32* getelementptr inbounds (%struct.SUI* @sui, i64 0, i32 0), align 1 %conv = zext i32 %0 to i64 ret i64 %conv } @@ -257,7 +257,7 @@ entry: ; ALL-DAG: lbu $[[R1:[0-9]+]], 1($[[PTR]]) ; ALL-DAG: sb $[[R1]], 3($[[PTR]]) - %0 = load %struct.S0* getelementptr inbounds (%struct.S0* @struct_s0, i32 0), align 1 + %0 = load %struct.S0, %struct.S0* getelementptr inbounds (%struct.S0* @struct_s0, i32 0), align 1 store %struct.S0 %0, %struct.S0* getelementptr inbounds (%struct.S0* @struct_s0, i32 1), align 1 ret void } @@ -300,7 +300,7 @@ entry: ; MIPS64R6-DAG: lhu $[[R1:[0-9]+]], 2($[[PTR]]) ; MIPS64R6-DAG: sh $[[R1]], 6($[[PTR]]) - %0 = load %struct.S1* getelementptr inbounds (%struct.S1* @struct_s1, i32 0), align 1 + %0 = load %struct.S1, %struct.S1* getelementptr inbounds (%struct.S1* @struct_s1, i32 0), align 1 store %struct.S1 %0, %struct.S1* getelementptr inbounds (%struct.S1* @struct_s1, i32 1), align 1 ret void } @@ -361,7 +361,7 @@ entry: ; MIPS64R6-DAG: lw $[[R1:[0-9]+]], 4($[[PTR]]) ; MIPS64R6-DAG: sw $[[R1]], 12($[[PTR]]) - %0 = load %struct.S2* getelementptr inbounds (%struct.S2* @struct_s2, i32 0), align 1 + %0 = load %struct.S2, %struct.S2* getelementptr inbounds (%struct.S2* @struct_s2, i32 0), align 1 store %struct.S2 %0, %struct.S2* getelementptr inbounds (%struct.S2* @struct_s2, i32 1), align 1 ret void } diff --git a/llvm/test/CodeGen/Mips/machineverifier.ll b/llvm/test/CodeGen/Mips/machineverifier.ll index c673fe557e6..d496b833a6c 100644 --- a/llvm/test/CodeGen/Mips/machineverifier.ll +++ b/llvm/test/CodeGen/Mips/machineverifier.ll @@ -6,7 +6,7 @@ define void @foo() nounwind { entry: - %0 = load i32* @g, align 4 + %0 = load i32, i32* @g, align 4 %tobool = icmp eq i32 %0, 0 br i1 %tobool, label %if.end, label %if.then diff --git a/llvm/test/CodeGen/Mips/mbrsize4a.ll b/llvm/test/CodeGen/Mips/mbrsize4a.ll index 15e1f47ce29..ad8eb641995 100644 --- a/llvm/test/CodeGen/Mips/mbrsize4a.ll +++ b/llvm/test/CodeGen/Mips/mbrsize4a.ll @@ -21,7 +21,7 @@ y: ; preds = %z br label %z return: ; No predecessors! - %0 = load i32* %retval + %0 = load i32, i32* %retval ret i32 %0 ; jal16: jal $BB{{[0-9]+}}_{{[0-9]+}} } diff --git a/llvm/test/CodeGen/Mips/micromips-addiu.ll b/llvm/test/CodeGen/Mips/micromips-addiu.ll index c5bee34028c..66550f40056 100644 --- a/llvm/test/CodeGen/Mips/micromips-addiu.ll +++ b/llvm/test/CodeGen/Mips/micromips-addiu.ll @@ -8,17 +8,17 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %addiu1 = add i32 %0, -7 %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %addiu1) - %1 = load i32* @y, align 4 + %1 = load i32, i32* @y, align 4 %addiu2 = add i32 %1, 55 %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %addiu2) - %2 = load i32* @z, align 4 + %2 = load i32, i32* @z, align 4 %addiu3 = add i32 %2, 24 %call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %addiu3) diff --git a/llvm/test/CodeGen/Mips/micromips-and16.ll b/llvm/test/CodeGen/Mips/micromips-and16.ll index 4eacf18867e..d0a16ac28a0 100644 --- a/llvm/test/CodeGen/Mips/micromips-and16.ll +++ b/llvm/test/CodeGen/Mips/micromips-and16.ll @@ -8,8 +8,8 @@ entry: %b = alloca i32, align 4 %c = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* %b, align 4 - %1 = load i32* %c, align 4 + %0 = load i32, i32* %b, align 4 + %1 = load i32, i32* %c, align 4 %and = and i32 %0, %1 store i32 %and, i32* %a, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Mips/micromips-andi.ll b/llvm/test/CodeGen/Mips/micromips-andi.ll index b82d2b09eae..1507c751f34 100644 --- a/llvm/test/CodeGen/Mips/micromips-andi.ll +++ b/llvm/test/CodeGen/Mips/micromips-andi.ll @@ -7,12 +7,12 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %and1 = and i32 %0, 4 %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %and1) - %1 = load i32* @y, align 4 + %1 = load i32, i32* @y, align 4 %and2 = and i32 %1, 5 %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %and2) diff --git a/llvm/test/CodeGen/Mips/micromips-compact-branches.ll b/llvm/test/CodeGen/Mips/micromips-compact-branches.ll index 670f9a05064..c689944d386 100644 --- a/llvm/test/CodeGen/Mips/micromips-compact-branches.ll +++ b/llvm/test/CodeGen/Mips/micromips-compact-branches.ll @@ -4,7 +4,7 @@ define void @main() nounwind uwtable { entry: %x = alloca i32, align 4 - %0 = load i32* %x, align 4 + %0 = load i32, i32* %x, align 4 %cmp = icmp eq i32 %0, 0 br i1 %cmp, label %if.then, label %if.end diff --git a/llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll b/llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll index c01e6704f35..fa121f83a75 100644 --- a/llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll +++ b/llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll @@ -14,7 +14,7 @@ L1: ; preds = %entry, %L1 %puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str, i32 0, i32 0)) %inc = add i32 %i.0, 1 %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @main.L, i32 0, i32 %i.0 - %0 = load i8** %arrayidx, align 4, !tbaa !1 + %0 = load i8*, i8** %arrayidx, align 4, !tbaa !1 indirectbr i8* %0, [label %L1, label %L2] L2: ; preds = %L1 diff --git a/llvm/test/CodeGen/Mips/micromips-delay-slot.ll b/llvm/test/CodeGen/Mips/micromips-delay-slot.ll index b5f6c56235b..ef654623283 100644 --- a/llvm/test/CodeGen/Mips/micromips-delay-slot.ll +++ b/llvm/test/CodeGen/Mips/micromips-delay-slot.ll @@ -6,7 +6,7 @@ define i32 @foo(i32 signext %a) #0 { entry: %a.addr = alloca i32, align 4 store i32 %a, i32* %a.addr, align 4 - %0 = load i32* %a.addr, align 4 + %0 = load i32, i32* %a.addr, align 4 %shl = shl i32 %0, 2 %call = call i32 @bar(i32 signext %shl) ret i32 %call diff --git a/llvm/test/CodeGen/Mips/micromips-gp-rc.ll b/llvm/test/CodeGen/Mips/micromips-gp-rc.ll index 945917a3c89..f139f7a8486 100644 --- a/llvm/test/CodeGen/Mips/micromips-gp-rc.ll +++ b/llvm/test/CodeGen/Mips/micromips-gp-rc.ll @@ -6,7 +6,7 @@ ; Function Attrs: noreturn nounwind define void @foo() #0 { entry: - %0 = load i32* @g, align 4 + %0 = load i32, i32* @g, align 4 tail call void @exit(i32 signext %0) unreachable } diff --git a/llvm/test/CodeGen/Mips/micromips-jal.ll b/llvm/test/CodeGen/Mips/micromips-jal.ll index fccc2291972..51832fe333d 100644 --- a/llvm/test/CodeGen/Mips/micromips-jal.ll +++ b/llvm/test/CodeGen/Mips/micromips-jal.ll @@ -7,8 +7,8 @@ entry: %b.addr = alloca i32, align 4 store i32 %a, i32* %a.addr, align 4 store i32 %b, i32* %b.addr, align 4 - %0 = load i32* %a.addr, align 4 - %1 = load i32* %b.addr, align 4 + %0 = load i32, i32* %a.addr, align 4 + %1 = load i32, i32* %b.addr, align 4 %add = add nsw i32 %0, %1 ret i32 %add } @@ -20,11 +20,11 @@ entry: %y = alloca i32, align 4 %z = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* %y, align 4 - %1 = load i32* %z, align 4 + %0 = load i32, i32* %y, align 4 + %1 = load i32, i32* %z, align 4 %call = call i32 @sum(i32 %0, i32 %1) store i32 %call, i32* %x, align 4 - %2 = load i32* %x, align 4 + %2 = load i32, i32* %x, align 4 ret i32 %2 } diff --git a/llvm/test/CodeGen/Mips/micromips-load-effective-address.ll b/llvm/test/CodeGen/Mips/micromips-load-effective-address.ll index afba760f0e6..47045809821 100644 --- a/llvm/test/CodeGen/Mips/micromips-load-effective-address.ll +++ b/llvm/test/CodeGen/Mips/micromips-load-effective-address.ll @@ -7,10 +7,10 @@ entry: %y.addr = alloca i32*, align 8 store i32* %x, i32** %x.addr, align 8 store i32* %y, i32** %y.addr, align 8 - %0 = load i32** %x.addr, align 8 - %1 = load i32* %0, align 4 - %2 = load i32** %y.addr, align 8 - %3 = load i32* %2, align 4 + %0 = load i32*, i32** %x.addr, align 8 + %1 = load i32, i32* %0, align 4 + %2 = load i32*, i32** %y.addr, align 8 + %3 = load i32, i32* %2, align 4 %add = add nsw i32 %1, %3 ret i32 %add } diff --git a/llvm/test/CodeGen/Mips/micromips-or16.ll b/llvm/test/CodeGen/Mips/micromips-or16.ll index ab7e79abab6..82ea9c687df 100644 --- a/llvm/test/CodeGen/Mips/micromips-or16.ll +++ b/llvm/test/CodeGen/Mips/micromips-or16.ll @@ -8,8 +8,8 @@ entry: %b = alloca i32, align 4 %c = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* %b, align 4 - %1 = load i32* %c, align 4 + %0 = load i32, i32* %b, align 4 + %1 = load i32, i32* %c, align 4 %or = or i32 %0, %1 store i32 %or, i32* %a, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll b/llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll index af40a879682..ebe4dddd012 100644 --- a/llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll +++ b/llvm/test/CodeGen/Mips/micromips-rdhwr-directives.ll @@ -10,6 +10,6 @@ entry: ; CHECK: rdhwr ; CHECK: .set pop - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 ret i32 %0 } diff --git a/llvm/test/CodeGen/Mips/micromips-shift.ll b/llvm/test/CodeGen/Mips/micromips-shift.ll index 8215010bfc7..ed1bcbbf083 100644 --- a/llvm/test/CodeGen/Mips/micromips-shift.ll +++ b/llvm/test/CodeGen/Mips/micromips-shift.ll @@ -8,11 +8,11 @@ define i32 @shift_left() nounwind { entry: - %0 = load i32* @a, align 4 + %0 = load i32, i32* @a, align 4 %shl = shl i32 %0, 4 store i32 %shl, i32* @b, align 4 - %1 = load i32* @c, align 4 + %1 = load i32, i32* @c, align 4 %shl1 = shl i32 %1, 10 store i32 %shl1, i32* @d, align 4 @@ -29,11 +29,11 @@ entry: define i32 @shift_right() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %shr = lshr i32 %0, 4 store i32 %shr, i32* @j, align 4 - %1 = load i32* @m, align 4 + %1 = load i32, i32* @m, align 4 %shr1 = lshr i32 %1, 10 store i32 %shr1, i32* @n, align 4 diff --git a/llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll b/llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll index 7ea4413afa1..358372649b5 100644 --- a/llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll +++ b/llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll @@ -6,16 +6,16 @@ define void @bar(i32* %p) #0 { entry: %p.addr = alloca i32*, align 4 store i32* %p, i32** %p.addr, align 4 - %0 = load i32** %p.addr, align 4 - %1 = load i32* %0, align 4 + %0 = load i32*, i32** %p.addr, align 4 + %1 = load i32, i32* %0, align 4 %add = add nsw i32 7, %1 - %2 = load i32** %p.addr, align 4 + %2 = load i32*, i32** %p.addr, align 4 store i32 %add, i32* %2, align 4 - %3 = load i32** %p.addr, align 4 + %3 = load i32*, i32** %p.addr, align 4 %add.ptr = getelementptr inbounds i32, i32* %3, i32 1 - %4 = load i32* %add.ptr, align 4 + %4 = load i32, i32* %add.ptr, align 4 %add1 = add nsw i32 7, %4 - %5 = load i32** %p.addr, align 4 + %5 = load i32*, i32** %p.addr, align 4 %add.ptr2 = getelementptr inbounds i32, i32* %5, i32 1 store i32 %add1, i32* %add.ptr2, align 4 ret void diff --git a/llvm/test/CodeGen/Mips/micromips-xor16.ll b/llvm/test/CodeGen/Mips/micromips-xor16.ll index 991511275af..53c75acd4d3 100644 --- a/llvm/test/CodeGen/Mips/micromips-xor16.ll +++ b/llvm/test/CodeGen/Mips/micromips-xor16.ll @@ -8,8 +8,8 @@ entry: %b = alloca i32, align 4 %c = alloca i32, align 4 store i32 0, i32* %retval - %0 = load i32* %b, align 4 - %1 = load i32* %c, align 4 + %0 = load i32, i32* %b, align 4 + %1 = load i32, i32* %c, align 4 %xor = xor i32 %0, %1 store i32 %xor, i32* %a, align 4 ret i32 0 diff --git a/llvm/test/CodeGen/Mips/mips16_32_8.ll b/llvm/test/CodeGen/Mips/mips16_32_8.ll index 2f5bc219cf3..5f03bf3a89c 100644 --- a/llvm/test/CodeGen/Mips/mips16_32_8.ll +++ b/llvm/test/CodeGen/Mips/mips16_32_8.ll @@ -22,11 +22,11 @@ entry: define void @nofoo() #1 { entry: store i32 20, i32* @i, align 4 - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %add = fadd float %0, %1 store float %add, float* @f, align 4 - %2 = load float* @f, align 4 + %2 = load float, float* @f, align 4 %conv = fpext float %2 to double %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str, i32 0, i32 0), double %conv) ret void @@ -48,10 +48,10 @@ declare i32 @printf(i8*, ...) #2 define i32 @main() #3 { entry: call void @foo() - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str1, i32 0, i32 0), i32 %0) call void @nofoo() - %1 = load i32* @i, align 4 + %1 = load i32, i32* @i, align 4 %call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([13 x i8]* @.str2, i32 0, i32 0), i32 %1) ret i32 0 } diff --git a/llvm/test/CodeGen/Mips/mips16_fpret.ll b/llvm/test/CodeGen/Mips/mips16_fpret.ll index 635b28d81a7..bf232c98442 100644 --- a/llvm/test/CodeGen/Mips/mips16_fpret.ll +++ b/llvm/test/CodeGen/Mips/mips16_fpret.ll @@ -11,7 +11,7 @@ define float @foox() { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 ret float %0 ; 1: .ent foox ; 1: lw $2, %lo(x)(${{[0-9]+}}) @@ -20,7 +20,7 @@ entry: define double @foodx() { entry: - %0 = load double* @dx, align 8 + %0 = load double, double* @dx, align 8 ret double %0 ; 1: .ent foodx ; 1: lw $2, %lo(dx)(${{[0-9]+}}) @@ -34,13 +34,13 @@ entry: define { float, float } @foocx() { entry: %retval = alloca { float, float }, align 4 - %cx.real = load float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 0) - %cx.imag = load float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 1) + %cx.real = load float, float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 0) + %cx.imag = load float, float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 1) %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0 %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1 store float %cx.real, float* %real store float %cx.imag, float* %imag - %0 = load { float, float }* %retval + %0 = load { float, float }, { float, float }* %retval ret { float, float } %0 ; 1: .ent foocx ; 1: lw $2, %lo(cx)(${{[0-9]+}}) @@ -53,13 +53,13 @@ entry: define { double, double } @foodcx() { entry: %retval = alloca { double, double }, align 8 - %dcx.real = load double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 0) - %dcx.imag = load double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 1) + %dcx.real = load double, double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 0) + %dcx.imag = load double, double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 1) %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0 %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1 store double %dcx.real, double* %real store double %dcx.imag, double* %imag - %0 = load { double, double }* %retval + %0 = load { double, double }, { double, double }* %retval ret { double, double } %0 ; 1: .ent foodcx ; 1: lw ${{[0-9]}}, %lo(dcx)(${{[0-9]+}}) diff --git a/llvm/test/CodeGen/Mips/mips16ex.ll b/llvm/test/CodeGen/Mips/mips16ex.ll index 3f70c72dbba..983d4dac94e 100644 --- a/llvm/test/CodeGen/Mips/mips16ex.ll +++ b/llvm/test/CodeGen/Mips/mips16ex.ll @@ -33,18 +33,18 @@ lpad: ; preds = %entry br label %catch.dispatch catch.dispatch: ; preds = %lpad - %sel = load i32* %ehselector.slot + %sel = load i32, i32* %ehselector.slot %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) nounwind %matches = icmp eq i32 %sel, %4 br i1 %matches, label %catch, label %eh.resume catch: ; preds = %catch.dispatch - %exn = load i8** %exn.slot + %exn = load i8*, i8** %exn.slot %5 = call i8* @__cxa_begin_catch(i8* %exn) nounwind %6 = bitcast i8* %5 to i32* - %exn.scalar = load i32* %6 + %exn.scalar = load i32, i32* %6 store i32 %exn.scalar, i32* %e, align 4 - %7 = load i32* %e, align 4 + %7 = load i32, i32* %e, align 4 %call2 = invoke i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([15 x i8]* @.str1, i32 0, i32 0), i32 %7) to label %invoke.cont unwind label %lpad1 @@ -66,8 +66,8 @@ lpad1: ; preds = %catch br label %eh.resume eh.resume: ; preds = %lpad1, %catch.dispatch - %exn3 = load i8** %exn.slot - %sel4 = load i32* %ehselector.slot + %exn3 = load i8*, i8** %exn.slot + %sel4 = load i32, i32* %ehselector.slot %lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0 %lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %sel4, 1 resume { i8*, i32 } %lpad.val5 diff --git a/llvm/test/CodeGen/Mips/mips16fpe.ll b/llvm/test/CodeGen/Mips/mips16fpe.ll index 987980e080f..f8b916da3a4 100644 --- a/llvm/test/CodeGen/Mips/mips16fpe.ll +++ b/llvm/test/CodeGen/Mips/mips16fpe.ll @@ -42,8 +42,8 @@ define void @test_addsf3() nounwind { entry: ;16hf-LABEL: test_addsf3: - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %add = fadd float %0, %1 store float %add, float* @addsf3_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_addsf3)(${{[0-9]+}}) @@ -53,8 +53,8 @@ entry: define void @test_adddf3() nounwind { entry: ;16hf-LABEL: test_adddf3: - %0 = load double* @xd, align 8 - %1 = load double* @yd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @yd, align 8 %add = fadd double %0, %1 store double %add, double* @adddf3_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_adddf3)(${{[0-9]+}}) @@ -64,8 +64,8 @@ entry: define void @test_subsf3() nounwind { entry: ;16hf-LABEL: test_subsf3: - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %sub = fsub float %0, %1 store float %sub, float* @subsf3_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_subsf3)(${{[0-9]+}}) @@ -75,8 +75,8 @@ entry: define void @test_subdf3() nounwind { entry: ;16hf-LABEL: test_subdf3: - %0 = load double* @xd, align 8 - %1 = load double* @yd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @yd, align 8 %sub = fsub double %0, %1 store double %sub, double* @subdf3_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_subdf3)(${{[0-9]+}}) @@ -86,8 +86,8 @@ entry: define void @test_mulsf3() nounwind { entry: ;16hf-LABEL: test_mulsf3: - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %mul = fmul float %0, %1 store float %mul, float* @mulsf3_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_mulsf3)(${{[0-9]+}}) @@ -97,8 +97,8 @@ entry: define void @test_muldf3() nounwind { entry: ;16hf-LABEL: test_muldf3: - %0 = load double* @xd, align 8 - %1 = load double* @yd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @yd, align 8 %mul = fmul double %0, %1 store double %mul, double* @muldf3_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_muldf3)(${{[0-9]+}}) @@ -108,8 +108,8 @@ entry: define void @test_divsf3() nounwind { entry: ;16hf-LABEL: test_divsf3: - %0 = load float* @y, align 4 - %1 = load float* @x, align 4 + %0 = load float, float* @y, align 4 + %1 = load float, float* @x, align 4 %div = fdiv float %0, %1 store float %div, float* @divsf3_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_divsf3)(${{[0-9]+}}) @@ -119,9 +119,9 @@ entry: define void @test_divdf3() nounwind { entry: ;16hf-LABEL: test_divdf3: - %0 = load double* @yd, align 8 + %0 = load double, double* @yd, align 8 %mul = fmul double %0, 2.000000e+00 - %1 = load double* @xd, align 8 + %1 = load double, double* @xd, align 8 %div = fdiv double %mul, %1 store double %div, double* @divdf3_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_divdf3)(${{[0-9]+}}) @@ -131,7 +131,7 @@ entry: define void @test_extendsfdf2() nounwind { entry: ;16hf-LABEL: test_extendsfdf2: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %conv = fpext float %0 to double store double %conv, double* @extendsfdf2_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_extendsfdf2)(${{[0-9]+}}) @@ -141,7 +141,7 @@ entry: define void @test_truncdfsf2() nounwind { entry: ;16hf-LABEL: test_truncdfsf2: - %0 = load double* @xd2, align 8 + %0 = load double, double* @xd2, align 8 %conv = fptrunc double %0 to float store float %conv, float* @truncdfsf2_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_truncdfsf2)(${{[0-9]+}}) @@ -151,7 +151,7 @@ entry: define void @test_fix_truncsfsi() nounwind { entry: ;16hf-LABEL: test_fix_truncsfsi: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %conv = fptosi float %0 to i32 store i32 %conv, i32* @fix_truncsfsi_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncsfsi)(${{[0-9]+}}) @@ -161,7 +161,7 @@ entry: define void @test_fix_truncdfsi() nounwind { entry: ;16hf-LABEL: test_fix_truncdfsi: - %0 = load double* @xd, align 8 + %0 = load double, double* @xd, align 8 %conv = fptosi double %0 to i32 store i32 %conv, i32* @fix_truncdfsi_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_fix_truncdfsi)(${{[0-9]+}}) @@ -171,7 +171,7 @@ entry: define void @test_floatsisf() nounwind { entry: ;16hf-LABEL: test_floatsisf: - %0 = load i32* @si, align 4 + %0 = load i32, i32* @si, align 4 %conv = sitofp i32 %0 to float store float %conv, float* @floatsisf_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsisf)(${{[0-9]+}}) @@ -181,7 +181,7 @@ entry: define void @test_floatsidf() nounwind { entry: ;16hf-LABEL: test_floatsidf: - %0 = load i32* @si, align 4 + %0 = load i32, i32* @si, align 4 %conv = sitofp i32 %0 to double store double %conv, double* @floatsidf_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatsidf)(${{[0-9]+}}) @@ -191,7 +191,7 @@ entry: define void @test_floatunsisf() nounwind { entry: ;16hf-LABEL: test_floatunsisf: - %0 = load i32* @ui, align 4 + %0 = load i32, i32* @ui, align 4 %conv = uitofp i32 %0 to float store float %conv, float* @floatunsisf_result, align 4 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsisf)(${{[0-9]+}}) @@ -201,7 +201,7 @@ entry: define void @test_floatunsidf() nounwind { entry: ;16hf-LABEL: test_floatunsidf: - %0 = load i32* @ui, align 4 + %0 = load i32, i32* @ui, align 4 %conv = uitofp i32 %0 to double store double %conv, double* @floatunsidf_result, align 8 ;16hf: lw ${{[0-9]+}}, %call16(__mips16_floatunsidf)(${{[0-9]+}}) @@ -211,8 +211,8 @@ entry: define void @test_eqsf2() nounwind { entry: ;16hf-LABEL: test_eqsf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %cmp = fcmp oeq float %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @eqsf2_result, align 4 @@ -223,8 +223,8 @@ entry: define void @test_eqdf2() nounwind { entry: ;16hf-LABEL: test_eqdf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %cmp = fcmp oeq double %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @eqdf2_result, align 4 @@ -235,8 +235,8 @@ entry: define void @test_nesf2() nounwind { entry: ;16hf-LABEL: test_nesf2: - %0 = load float* @x, align 4 - %1 = load float* @y, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @y, align 4 %cmp = fcmp une float %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @nesf2_result, align 4 @@ -247,8 +247,8 @@ entry: define void @test_nedf2() nounwind { entry: ;16hf-LABEL: test_nedf2: - %0 = load double* @xd, align 8 - %1 = load double* @yd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @yd, align 8 %cmp = fcmp une double %0, %1 %conv = zext i1 %cmp to i32 store i32 %conv, i32* @nedf2_result, align 4 @@ -259,10 +259,10 @@ entry: define void @test_gesf2() nounwind { entry: ;16hf-LABEL: test_gesf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %cmp = fcmp oge float %0, %1 - %2 = load float* @y, align 4 + %2 = load float, float* @y, align 4 %cmp1 = fcmp oge float %2, %0 %and3 = and i1 %cmp, %cmp1 %and = zext i1 %and3 to i32 @@ -274,10 +274,10 @@ entry: define void @test_gedf2() nounwind { entry: ;16hf-LABEL: test_gedf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %cmp = fcmp oge double %0, %1 - %2 = load double* @yd, align 8 + %2 = load double, double* @yd, align 8 %cmp1 = fcmp oge double %2, %0 %and3 = and i1 %cmp, %cmp1 %and = zext i1 %and3 to i32 @@ -289,10 +289,10 @@ entry: define void @test_ltsf2() nounwind { entry: ;16hf-LABEL: test_ltsf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %lnot = fcmp uge float %0, %1 - %2 = load float* @y, align 4 + %2 = load float, float* @y, align 4 %cmp1 = fcmp olt float %0, %2 %and2 = and i1 %lnot, %cmp1 %and = zext i1 %and2 to i32 @@ -305,10 +305,10 @@ entry: define void @test_ltdf2() nounwind { entry: ;16hf-LABEL: test_ltdf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %lnot = fcmp uge double %0, %1 - %2 = load double* @yd, align 8 + %2 = load double, double* @yd, align 8 %cmp1 = fcmp olt double %0, %2 %and2 = and i1 %lnot, %cmp1 %and = zext i1 %and2 to i32 @@ -321,10 +321,10 @@ entry: define void @test_lesf2() nounwind { entry: ;16hf-LABEL: test_lesf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %cmp = fcmp ole float %0, %1 - %2 = load float* @y, align 4 + %2 = load float, float* @y, align 4 %cmp1 = fcmp ole float %0, %2 %and3 = and i1 %cmp, %cmp1 %and = zext i1 %and3 to i32 @@ -336,10 +336,10 @@ entry: define void @test_ledf2() nounwind { entry: ;16hf-LABEL: test_ledf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %cmp = fcmp ole double %0, %1 - %2 = load double* @yd, align 8 + %2 = load double, double* @yd, align 8 %cmp1 = fcmp ole double %0, %2 %and3 = and i1 %cmp, %cmp1 %and = zext i1 %and3 to i32 @@ -351,10 +351,10 @@ entry: define void @test_gtsf2() nounwind { entry: ;16hf-LABEL: test_gtsf2: - %0 = load float* @x, align 4 - %1 = load float* @xx, align 4 + %0 = load float, float* @x, align 4 + %1 = load float, float* @xx, align 4 %lnot = fcmp ule float %0, %1 - %2 = load float* @y, align 4 + %2 = load float, float* @y, align 4 %cmp1 = fcmp ogt float %2, %0 %and2 = and i1 %lnot, %cmp1 %and = zext i1 %and2 to i32 @@ -366,10 +366,10 @@ entry: define void @test_gtdf2() nounwind { entry: ;16hf-LABEL: test_gtdf2: - %0 = load double* @xd, align 8 - %1 = load double* @xxd, align 8 + %0 = load double, double* @xd, align 8 + %1 = load double, double* @xxd, align 8 %lnot = fcmp ule double %0, %1 - %2 = load double* @yd, align 8 + %2 = load double, double* @yd, align 8 %cmp1 = fcmp ogt double %2, %0 %and2 = and i1 %lnot, %cmp1 %and = zext i1 %and2 to i32 diff --git a/llvm/test/CodeGen/Mips/mips64-f128-call.ll b/llvm/test/CodeGen/Mips/mips64-f128-call.ll index 455e540e5df..9a093e6f982 100644 --- a/llvm/test/CodeGen/Mips/mips64-f128-call.ll +++ b/llvm/test/CodeGen/Mips/mips64-f128-call.ll @@ -19,7 +19,7 @@ entry: define void @foo1() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 tail call void @foo2(fp128 %0) ret void } @@ -38,7 +38,7 @@ define fp128 @foo3() { entry: %call = tail call fp128 @foo4() store fp128 %call, fp128* @gld0, align 16 - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 ret fp128 %0 } diff --git a/llvm/test/CodeGen/Mips/mips64-f128.ll b/llvm/test/CodeGen/Mips/mips64-f128.ll index 6987d4ab073..9dd41e389d1 100644 --- a/llvm/test/CodeGen/Mips/mips64-f128.ll +++ b/llvm/test/CodeGen/Mips/mips64-f128.ll @@ -18,8 +18,8 @@ define fp128 @addLD() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %add = fadd fp128 %0, %1 ret fp128 %add } @@ -29,8 +29,8 @@ entry: define fp128 @subLD() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %sub = fsub fp128 %0, %1 ret fp128 %sub } @@ -40,8 +40,8 @@ entry: define fp128 @mulLD() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %mul = fmul fp128 %0, %1 ret fp128 %mul } @@ -51,8 +51,8 @@ entry: define fp128 @divLD() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %div = fdiv fp128 %0, %1 ret fp128 %div } @@ -247,7 +247,7 @@ entry: define fp128 @libcall1_fabsl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @fabsl(fp128 %0) nounwind readnone ret fp128 %call } @@ -259,7 +259,7 @@ declare fp128 @fabsl(fp128) #1 define fp128 @libcall1_ceill() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @ceill(fp128 %0) nounwind readnone ret fp128 %call } @@ -271,7 +271,7 @@ declare fp128 @ceill(fp128) #1 define fp128 @libcall1_sinl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @sinl(fp128 %0) nounwind ret fp128 %call } @@ -283,7 +283,7 @@ declare fp128 @sinl(fp128) #2 define fp128 @libcall1_cosl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @cosl(fp128 %0) nounwind ret fp128 %call } @@ -295,7 +295,7 @@ declare fp128 @cosl(fp128) #2 define fp128 @libcall1_expl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @expl(fp128 %0) nounwind ret fp128 %call } @@ -307,7 +307,7 @@ declare fp128 @expl(fp128) #2 define fp128 @libcall1_exp2l() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @exp2l(fp128 %0) nounwind ret fp128 %call } @@ -319,7 +319,7 @@ declare fp128 @exp2l(fp128) #2 define fp128 @libcall1_logl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @logl(fp128 %0) nounwind ret fp128 %call } @@ -331,7 +331,7 @@ declare fp128 @logl(fp128) #2 define fp128 @libcall1_log2l() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @log2l(fp128 %0) nounwind ret fp128 %call } @@ -343,7 +343,7 @@ declare fp128 @log2l(fp128) #2 define fp128 @libcall1_log10l() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @log10l(fp128 %0) nounwind ret fp128 %call } @@ -355,7 +355,7 @@ declare fp128 @log10l(fp128) #2 define fp128 @libcall1_nearbyintl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @nearbyintl(fp128 %0) nounwind readnone ret fp128 %call } @@ -367,7 +367,7 @@ declare fp128 @nearbyintl(fp128) #1 define fp128 @libcall1_floorl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @floorl(fp128 %0) nounwind readnone ret fp128 %call } @@ -379,7 +379,7 @@ declare fp128 @floorl(fp128) #1 define fp128 @libcall1_sqrtl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @sqrtl(fp128 %0) nounwind ret fp128 %call } @@ -391,7 +391,7 @@ declare fp128 @sqrtl(fp128) #2 define fp128 @libcall1_rintl() { entry: - %0 = load fp128* @gld0, align 16 + %0 = load fp128, fp128* @gld0, align 16 %call = tail call fp128 @rintl(fp128 %0) nounwind readnone ret fp128 %call } @@ -424,8 +424,8 @@ declare fp128 @llvm.powi.f128(fp128, i32) #3 define fp128 @libcall2_copysignl() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %call = tail call fp128 @copysignl(fp128 %0, fp128 %1) nounwind readnone ret fp128 %call } @@ -437,8 +437,8 @@ declare fp128 @copysignl(fp128, fp128) #1 define fp128 @libcall2_powl() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %call = tail call fp128 @powl(fp128 %0, fp128 %1) nounwind ret fp128 %call } @@ -450,8 +450,8 @@ declare fp128 @powl(fp128, fp128) #2 define fp128 @libcall2_fmodl() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld1, align 16 %call = tail call fp128 @fmodl(fp128 %0, fp128 %1) nounwind ret fp128 %call } @@ -463,9 +463,9 @@ declare fp128 @fmodl(fp128, fp128) #2 define fp128 @libcall3_fmal() { entry: - %0 = load fp128* @gld0, align 16 - %1 = load fp128* @gld2, align 16 - %2 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld0, align 16 + %1 = load fp128, fp128* @gld2, align 16 + %2 = load fp128, fp128* @gld1, align 16 %3 = tail call fp128 @llvm.fma.f128(fp128 %0, fp128 %2, fp128 %1) ret fp128 %3 } @@ -539,7 +539,7 @@ entry: define fp128 @load_LD_LD() { entry: - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 ret fp128 %0 } @@ -551,7 +551,7 @@ entry: define fp128 @load_LD_float() { entry: - %0 = load float* @gf1, align 4 + %0 = load float, float* @gf1, align 4 %conv = fpext float %0 to fp128 ret fp128 %conv } @@ -564,7 +564,7 @@ entry: define fp128 @load_LD_double() { entry: - %0 = load double* @gd1, align 8 + %0 = load double, double* @gd1, align 8 %conv = fpext double %0 to fp128 ret fp128 %conv } @@ -579,7 +579,7 @@ entry: define void @store_LD_LD() { entry: - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 store fp128 %0, fp128* @gld0, align 16 ret void } @@ -595,7 +595,7 @@ entry: define void @store_LD_float() { entry: - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 %conv = fptrunc fp128 %0 to float store float %conv, float* @gf1, align 4 ret void @@ -612,7 +612,7 @@ entry: define void @store_LD_double() { entry: - %0 = load fp128* @gld1, align 16 + %0 = load fp128, fp128* @gld1, align 16 %conv = fptrunc fp128 %0 to double store double %conv, double* @gd1, align 8 ret void diff --git a/llvm/test/CodeGen/Mips/mips64directive.ll b/llvm/test/CodeGen/Mips/mips64directive.ll index c4ba5340e39..b1052f77f5a 100644 --- a/llvm/test/CodeGen/Mips/mips64directive.ll +++ b/llvm/test/CodeGen/Mips/mips64directive.ll @@ -6,7 +6,7 @@ ; CHECK: 8byte define i64 @foo1() nounwind readonly { entry: - %0 = load i64* @gl, align 8 + %0 = load i64, i64* @gl, align 8 ret i64 %0 } diff --git a/llvm/test/CodeGen/Mips/mips64fpldst.ll b/llvm/test/CodeGen/Mips/mips64fpldst.ll index 5d62156e91b..55d5c775cbb 100644 --- a/llvm/test/CodeGen/Mips/mips64fpldst.ll +++ b/llvm/test/CodeGen/Mips/mips64fpldst.ll @@ -16,7 +16,7 @@ entry: ; CHECK-N32: funcfl1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(f0) ; CHECK-N32: lwc1 $f{{[0-9]+}}, 0($[[R0]]) - %0 = load float* @f0, align 4 + %0 = load float, float* @f0, align 4 ret float %0 } @@ -28,7 +28,7 @@ entry: ; CHECK-N32: funcfl2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(d0) ; CHECK-N32: ldc1 $f{{[0-9]+}}, 0($[[R0]]) - %0 = load double* @d0, align 8 + %0 = load double, double* @d0, align 8 ret double %0 } @@ -40,7 +40,7 @@ entry: ; CHECK-N32: funcfs1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(f0) ; CHECK-N32: swc1 $f{{[0-9]+}}, 0($[[R0]]) - %0 = load float* @f1, align 4 + %0 = load float, float* @f1, align 4 store float %0, float* @f0, align 4 ret void } @@ -53,7 +53,7 @@ entry: ; CHECK-N32: funcfs2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(d0) ; CHECK-N32: sdc1 $f{{[0-9]+}}, 0($[[R0]]) - %0 = load double* @d1, align 8 + %0 = load double, double* @d1, align 8 store double %0, double* @d0, align 8 ret void } diff --git a/llvm/test/CodeGen/Mips/mips64instrs.ll b/llvm/test/CodeGen/Mips/mips64instrs.ll index ed617be6532..d64cdceb6b8 100644 --- a/llvm/test/CodeGen/Mips/mips64instrs.ll +++ b/llvm/test/CodeGen/Mips/mips64instrs.ll @@ -123,8 +123,8 @@ entry: ; GPRMULDIV: ddiv $2, $[[T0]], $[[T1]] ; GPRMULDIV: teq $[[T1]], $zero, 7 - %0 = load i64* @gll0, align 8 - %1 = load i64* @gll1, align 8 + %0 = load i64, i64* @gll0, align 8 + %1 = load i64, i64* @gll1, align 8 %div = sdiv i64 %0, %1 ret i64 %div } @@ -144,8 +144,8 @@ entry: ; GPRMULDIV: ddivu $2, $[[T0]], $[[T1]] ; GPRMULDIV: teq $[[T1]], $zero, 7 - %0 = load i64* @gll0, align 8 - %1 = load i64* @gll1, align 8 + %0 = load i64, i64* @gll0, align 8 + %1 = load i64, i64* @gll1, align 8 %div = udiv i64 %0, %1 ret i64 %div } diff --git a/llvm/test/CodeGen/Mips/mips64intldst.ll b/llvm/test/CodeGen/Mips/mips64intldst.ll index 1ceafc1f5e0..658ab88481c 100644 --- a/llvm/test/CodeGen/Mips/mips64intldst.ll +++ b/llvm/test/CodeGen/Mips/mips64intldst.ll @@ -20,7 +20,7 @@ entry: ; CHECK-N32: func1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(c) ; CHECK-N32: lb ${{[0-9]+}}, 0($[[R0]]) - %0 = load i8* @c, align 4 + %0 = load i8, i8* @c, align 4 %conv = sext i8 %0 to i64 ret i64 %conv } @@ -33,7 +33,7 @@ entry: ; CHECK-N32: func2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(s) ; CHECK-N32: lh ${{[0-9]+}}, 0($[[R0]]) - %0 = load i16* @s, align 4 + %0 = load i16, i16* @s, align 4 %conv = sext i16 %0 to i64 ret i64 %conv } @@ -46,7 +46,7 @@ entry: ; CHECK-N32: func3 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(i) ; CHECK-N32: lw ${{[0-9]+}}, 0($[[R0]]) - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %conv = sext i32 %0 to i64 ret i64 %conv } @@ -59,7 +59,7 @@ entry: ; CHECK-N32: func4 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(l) ; CHECK-N32: ld ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l, align 8 + %0 = load i64, i64* @l, align 8 ret i64 %0 } @@ -71,7 +71,7 @@ entry: ; CHECK-N32: ufunc1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(uc) ; CHECK-N32: lbu ${{[0-9]+}}, 0($[[R0]]) - %0 = load i8* @uc, align 4 + %0 = load i8, i8* @uc, align 4 %conv = zext i8 %0 to i64 ret i64 %conv } @@ -84,7 +84,7 @@ entry: ; CHECK-N32: ufunc2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(us) ; CHECK-N32: lhu ${{[0-9]+}}, 0($[[R0]]) - %0 = load i16* @us, align 4 + %0 = load i16, i16* @us, align 4 %conv = zext i16 %0 to i64 ret i64 %conv } @@ -97,7 +97,7 @@ entry: ; CHECK-N32: ufunc3 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(ui) ; CHECK-N32: lwu ${{[0-9]+}}, 0($[[R0]]) - %0 = load i32* @ui, align 4 + %0 = load i32, i32* @ui, align 4 %conv = zext i32 %0 to i64 ret i64 %conv } @@ -110,7 +110,7 @@ entry: ; CHECK-N32: sfunc1 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(c) ; CHECK-N32: sb ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l1, align 8 + %0 = load i64, i64* @l1, align 8 %conv = trunc i64 %0 to i8 store i8 %conv, i8* @c, align 4 ret void @@ -124,7 +124,7 @@ entry: ; CHECK-N32: sfunc2 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(s) ; CHECK-N32: sh ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l1, align 8 + %0 = load i64, i64* @l1, align 8 %conv = trunc i64 %0 to i16 store i16 %conv, i16* @s, align 4 ret void @@ -138,7 +138,7 @@ entry: ; CHECK-N32: sfunc3 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(i) ; CHECK-N32: sw ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l1, align 8 + %0 = load i64, i64* @l1, align 8 %conv = trunc i64 %0 to i32 store i32 %conv, i32* @i, align 4 ret void @@ -152,7 +152,7 @@ entry: ; CHECK-N32: sfunc4 ; CHECK-N32: lw $[[R0:[0-9]+]], %got_disp(l) ; CHECK-N32: sd ${{[0-9]+}}, 0($[[R0]]) - %0 = load i64* @l1, align 8 + %0 = load i64, i64* @l1, align 8 store i64 %0, i64* @l, align 8 ret void } diff --git a/llvm/test/CodeGen/Mips/mips64sinttofpsf.ll b/llvm/test/CodeGen/Mips/mips64sinttofpsf.ll index d3d46036f7d..7bd75bbe15b 100644 --- a/llvm/test/CodeGen/Mips/mips64sinttofpsf.ll +++ b/llvm/test/CodeGen/Mips/mips64sinttofpsf.ll @@ -5,7 +5,7 @@ define double @foo() #0 { entry: %x = alloca i32, align 4 store volatile i32 -32, i32* %x, align 4 - %0 = load volatile i32* %x, align 4 + %0 = load volatile i32, i32* %x, align 4 %conv = sitofp i32 %0 to double ret double %conv diff --git a/llvm/test/CodeGen/Mips/mipslopat.ll b/llvm/test/CodeGen/Mips/mipslopat.ll index 1f433b9870c..63b68c1762b 100644 --- a/llvm/test/CodeGen/Mips/mipslopat.ll +++ b/llvm/test/CodeGen/Mips/mipslopat.ll @@ -6,10 +6,10 @@ define void @simple_vol_file() nounwind { entry: - %tmp = load volatile i32** @stat_vol_ptr_int, align 4 + %tmp = load volatile i32*, i32** @stat_vol_ptr_int, align 4 %0 = bitcast i32* %tmp to i8* call void @llvm.prefetch(i8* %0, i32 0, i32 0, i32 1) - %tmp1 = load i32** @stat_ptr_vol_int, align 4 + %tmp1 = load i32*, i32** @stat_ptr_vol_int, align 4 %1 = bitcast i32* %tmp1 to i8* call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1) ret void diff --git a/llvm/test/CodeGen/Mips/misha.ll b/llvm/test/CodeGen/Mips/misha.ll index 3000b5c6a79..23ad7f6057a 100644 --- a/llvm/test/CodeGen/Mips/misha.ll +++ b/llvm/test/CodeGen/Mips/misha.ll @@ -8,7 +8,7 @@ entry: br i1 %cmp8, label %for.end, label %for.body.lr.ph for.body.lr.ph: ; preds = %entry - %.pre = load i8* %to, align 1 + %.pre = load i8, i8* %to, align 1 br label %for.body for.body: ; preds = %for.body.lr.ph, %for.body @@ -16,7 +16,7 @@ for.body: ; preds = %for.body.lr.ph, %fo %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] %from.addr.09 = phi i8* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] %incdec.ptr = getelementptr inbounds i8, i8* %from.addr.09, i32 1 - %2 = load i8* %from.addr.09, align 1 + %2 = load i8, i8* %from.addr.09, align 1 %conv27 = zext i8 %2 to i32 %conv36 = zext i8 %1 to i32 %add = add nsw i32 %conv36, %conv27 @@ -44,7 +44,7 @@ entry: br i1 %cmp8, label %for.end, label %for.body.lr.ph for.body.lr.ph: ; preds = %entry - %.pre = load i16* %to, align 2 + %.pre = load i16, i16* %to, align 2 br label %for.body for.body: ; preds = %for.body.lr.ph, %for.body @@ -52,7 +52,7 @@ for.body: ; preds = %for.body.lr.ph, %fo %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] %from.addr.09 = phi i16* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ] %incdec.ptr = getelementptr inbounds i16, i16* %from.addr.09, i32 1 - %2 = load i16* %from.addr.09, align 2 + %2 = load i16, i16* %from.addr.09, align 2 %conv27 = zext i16 %2 to i32 %conv36 = zext i16 %1 to i32 %add = add nsw i32 %conv36, %conv27 diff --git a/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll b/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll index f42850fb8ad..c7eda3320bc 100644 --- a/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll +++ b/llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll @@ -111,7 +111,7 @@ define double @test_ldc1() { entry: - %0 = load double* @g0, align 8 + %0 = load double, double* @g0, align 8 ret double %0 } @@ -213,7 +213,7 @@ entry: define double @test_ldxc1(double* nocapture readonly %a, i32 %i) { entry: %arrayidx = getelementptr inbounds double, double* %a, i32 %i - %0 = load double* %arrayidx, align 8 + %0 = load double, double* %arrayidx, align 8 ret double %0 } diff --git a/llvm/test/CodeGen/Mips/msa/2r.ll b/llvm/test/CodeGen/Mips/msa/2r.ll index da35ad82cad..501936c76e7 100644 --- a/llvm/test/CodeGen/Mips/msa/2r.ll +++ b/llvm/test/CodeGen/Mips/msa/2r.ll @@ -8,7 +8,7 @@ define void @llvm_mips_nloc_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_nloc_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nloc_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.nloc.b(<16 x i8> %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_nloc_b_RES ret void @@ -29,7 +29,7 @@ declare <16 x i8> @llvm.mips.nloc.b(<16 x i8>) nounwind define void @llvm_mips_nloc_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_nloc_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nloc_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.nloc.h(<8 x i16> %0) store <8 x i16> %1, <8 x i16>* @llvm_mips_nloc_h_RES ret void @@ -50,7 +50,7 @@ declare <8 x i16> @llvm.mips.nloc.h(<8 x i16>) nounwind define void @llvm_mips_nloc_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_nloc_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nloc_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.nloc.w(<4 x i32> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_nloc_w_RES ret void @@ -71,7 +71,7 @@ declare <4 x i32> @llvm.mips.nloc.w(<4 x i32>) nounwind define void @llvm_mips_nloc_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_nloc_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nloc_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.nloc.d(<2 x i64> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_nloc_d_RES ret void @@ -92,7 +92,7 @@ declare <2 x i64> @llvm.mips.nloc.d(<2 x i64>) nounwind define void @llvm_mips_nlzc_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_nlzc_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nlzc_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.nlzc.b(<16 x i8> %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_nlzc_b_RES ret void @@ -113,7 +113,7 @@ declare <16 x i8> @llvm.mips.nlzc.b(<16 x i8>) nounwind define void @llvm_mips_nlzc_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_nlzc_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nlzc_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.nlzc.h(<8 x i16> %0) store <8 x i16> %1, <8 x i16>* @llvm_mips_nlzc_h_RES ret void @@ -134,7 +134,7 @@ declare <8 x i16> @llvm.mips.nlzc.h(<8 x i16>) nounwind define void @llvm_mips_nlzc_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_nlzc_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nlzc_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.nlzc.w(<4 x i32> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_nlzc_w_RES ret void @@ -155,7 +155,7 @@ declare <4 x i32> @llvm.mips.nlzc.w(<4 x i32>) nounwind define void @llvm_mips_nlzc_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_nlzc_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nlzc_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.nlzc.d(<2 x i64> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_nlzc_d_RES ret void @@ -176,7 +176,7 @@ declare <2 x i64> @llvm.mips.nlzc.d(<2 x i64>) nounwind define void @llvm_mips_pcnt_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_pcnt_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pcnt_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.pcnt.b(<16 x i8> %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_pcnt_b_RES ret void @@ -197,7 +197,7 @@ declare <16 x i8> @llvm.mips.pcnt.b(<16 x i8>) nounwind define void @llvm_mips_pcnt_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_pcnt_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pcnt_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.pcnt.h(<8 x i16> %0) store <8 x i16> %1, <8 x i16>* @llvm_mips_pcnt_h_RES ret void @@ -218,7 +218,7 @@ declare <8 x i16> @llvm.mips.pcnt.h(<8 x i16>) nounwind define void @llvm_mips_pcnt_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_pcnt_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pcnt_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.pcnt.w(<4 x i32> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_pcnt_w_RES ret void @@ -239,7 +239,7 @@ declare <4 x i32> @llvm.mips.pcnt.w(<4 x i32>) nounwind define void @llvm_mips_pcnt_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_pcnt_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pcnt_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.pcnt.d(<2 x i64> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_pcnt_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll b/llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll index 64e459e4d9a..ddcd3cf757d 100644 --- a/llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll +++ b/llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll @@ -15,7 +15,7 @@ define void @llvm_mips_fill_b_test() nounwind { entry: - %0 = load i32* @llvm_mips_fill_b_ARG1 + %0 = load i32, i32* @llvm_mips_fill_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.fill.b(i32 %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_fill_b_RES ret void @@ -35,7 +35,7 @@ declare <16 x i8> @llvm.mips.fill.b(i32) nounwind define void @llvm_mips_fill_h_test() nounwind { entry: - %0 = load i32* @llvm_mips_fill_h_ARG1 + %0 = load i32, i32* @llvm_mips_fill_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.fill.h(i32 %0) store <8 x i16> %1, <8 x i16>* @llvm_mips_fill_h_RES ret void @@ -55,7 +55,7 @@ declare <8 x i16> @llvm.mips.fill.h(i32) nounwind define void @llvm_mips_fill_w_test() nounwind { entry: - %0 = load i32* @llvm_mips_fill_w_ARG1 + %0 = load i32, i32* @llvm_mips_fill_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.fill.w(i32 %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_fill_w_RES ret void @@ -75,7 +75,7 @@ declare <4 x i32> @llvm.mips.fill.w(i32) nounwind define void @llvm_mips_fill_d_test() nounwind { entry: - %0 = load i64* @llvm_mips_fill_d_ARG1 + %0 = load i64, i64* @llvm_mips_fill_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.fill.d(i64 %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_fill_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/2rf.ll b/llvm/test/CodeGen/Mips/msa/2rf.ll index b361ef5eae2..1dbfbda1b61 100644 --- a/llvm/test/CodeGen/Mips/msa/2rf.ll +++ b/llvm/test/CodeGen/Mips/msa/2rf.ll @@ -8,7 +8,7 @@ define void @llvm_mips_flog2_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1 %1 = tail call <4 x float> @llvm.mips.flog2.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES ret void @@ -29,7 +29,7 @@ declare <4 x float> @llvm.mips.flog2.w(<4 x float>) nounwind define void @llvm_mips_flog2_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1 %1 = tail call <2 x double> @llvm.mips.flog2.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES ret void @@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.flog2.d(<2 x double>) nounwind define void @flog2_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_flog2_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_flog2_w_ARG1 %1 = tail call <4 x float> @llvm.log2.v4f32(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_flog2_w_RES ret void @@ -65,7 +65,7 @@ declare <4 x float> @llvm.log2.v4f32(<4 x float> %val) define void @flog2_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_flog2_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_flog2_d_ARG1 %1 = tail call <2 x double> @llvm.log2.v2f64(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_flog2_d_RES ret void @@ -86,7 +86,7 @@ declare <2 x double> @llvm.log2.v2f64(<2 x double> %val) define void @llvm_mips_frint_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_frint_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1 %1 = tail call <4 x float> @llvm.mips.frint.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES ret void @@ -107,7 +107,7 @@ declare <4 x float> @llvm.mips.frint.w(<4 x float>) nounwind define void @llvm_mips_frint_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_frint_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1 %1 = tail call <2 x double> @llvm.mips.frint.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES ret void @@ -125,7 +125,7 @@ declare <2 x double> @llvm.mips.frint.d(<2 x double>) nounwind define void @frint_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_frint_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_frint_w_ARG1 %1 = tail call <4 x float> @llvm.rint.v4f32(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_frint_w_RES ret void @@ -143,7 +143,7 @@ declare <4 x float> @llvm.rint.v4f32(<4 x float>) nounwind define void @frint_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_frint_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_frint_d_ARG1 %1 = tail call <2 x double> @llvm.rint.v2f64(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_frint_d_RES ret void @@ -164,7 +164,7 @@ declare <2 x double> @llvm.rint.v2f64(<2 x double>) nounwind define void @llvm_mips_frcp_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_frcp_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_frcp_w_ARG1 %1 = tail call <4 x float> @llvm.mips.frcp.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_frcp_w_RES ret void @@ -185,7 +185,7 @@ declare <4 x float> @llvm.mips.frcp.w(<4 x float>) nounwind define void @llvm_mips_frcp_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_frcp_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_frcp_d_ARG1 %1 = tail call <2 x double> @llvm.mips.frcp.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_frcp_d_RES ret void @@ -206,7 +206,7 @@ declare <2 x double> @llvm.mips.frcp.d(<2 x double>) nounwind define void @llvm_mips_frsqrt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_frsqrt_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_frsqrt_w_ARG1 %1 = tail call <4 x float> @llvm.mips.frsqrt.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_frsqrt_w_RES ret void @@ -227,7 +227,7 @@ declare <4 x float> @llvm.mips.frsqrt.w(<4 x float>) nounwind define void @llvm_mips_frsqrt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_frsqrt_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_frsqrt_d_ARG1 %1 = tail call <2 x double> @llvm.mips.frsqrt.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_frsqrt_d_RES ret void @@ -248,7 +248,7 @@ declare <2 x double> @llvm.mips.frsqrt.d(<2 x double>) nounwind define void @llvm_mips_fsqrt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1 %1 = tail call <4 x float> @llvm.mips.fsqrt.w(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES ret void @@ -269,7 +269,7 @@ declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>) nounwind define void @llvm_mips_fsqrt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1 %1 = tail call <2 x double> @llvm.mips.fsqrt.d(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES ret void @@ -287,7 +287,7 @@ declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>) nounwind define void @fsqrt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsqrt_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsqrt_w_ARG1 %1 = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %0) store <4 x float> %1, <4 x float>* @llvm_mips_fsqrt_w_RES ret void @@ -305,7 +305,7 @@ declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) nounwind define void @fsqrt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsqrt_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsqrt_d_ARG1 %1 = tail call <2 x double> @llvm.sqrt.v2f64(<2 x double> %0) store <2 x double> %1, <2 x double>* @llvm_mips_fsqrt_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/2rf_exup.ll b/llvm/test/CodeGen/Mips/msa/2rf_exup.ll index 8d7cc367040..fd81ff6d112 100644 --- a/llvm/test/CodeGen/Mips/msa/2rf_exup.ll +++ b/llvm/test/CodeGen/Mips/msa/2rf_exup.ll @@ -9,7 +9,7 @@ define void @llvm_mips_fexupl_w_test() nounwind { entry: - %0 = load <8 x half>* @llvm_mips_fexupl_w_ARG1 + %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupl_w_ARG1 %1 = tail call <4 x float> @llvm.mips.fexupl.w(<8 x half> %0) store <4 x float> %1, <4 x float>* @llvm_mips_fexupl_w_RES ret void @@ -28,7 +28,7 @@ declare <4 x float> @llvm.mips.fexupl.w(<8 x half>) nounwind define void @llvm_mips_fexupl_d_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fexupl_d_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupl_d_ARG1 %1 = tail call <2 x double> @llvm.mips.fexupl.d(<4 x float> %0) store <2 x double> %1, <2 x double>* @llvm_mips_fexupl_d_RES ret void @@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.fexupl.d(<4 x float>) nounwind define void @llvm_mips_fexupr_w_test() nounwind { entry: - %0 = load <8 x half>* @llvm_mips_fexupr_w_ARG1 + %0 = load <8 x half>, <8 x half>* @llvm_mips_fexupr_w_ARG1 %1 = tail call <4 x float> @llvm.mips.fexupr.w(<8 x half> %0) store <4 x float> %1, <4 x float>* @llvm_mips_fexupr_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x float> @llvm.mips.fexupr.w(<8 x half>) nounwind define void @llvm_mips_fexupr_d_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fexupr_d_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fexupr_d_ARG1 %1 = tail call <2 x double> @llvm.mips.fexupr.d(<4 x float> %0) store <2 x double> %1, <2 x double>* @llvm_mips_fexupr_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/2rf_float_int.ll b/llvm/test/CodeGen/Mips/msa/2rf_float_int.ll index 3b5dfda2d1e..369015814b0 100644 --- a/llvm/test/CodeGen/Mips/msa/2rf_float_int.ll +++ b/llvm/test/CodeGen/Mips/msa/2rf_float_int.ll @@ -9,7 +9,7 @@ define void @llvm_mips_ffint_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ffint_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_s_w_ARG1 %1 = tail call <4 x float> @llvm.mips.ffint.s.w(<4 x i32> %0) store <4 x float> %1, <4 x float>* @llvm_mips_ffint_s_w_RES ret void @@ -30,7 +30,7 @@ declare <4 x float> @llvm.mips.ffint.s.w(<4 x i32>) nounwind define void @llvm_mips_ffint_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ffint_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_s_d_ARG1 %1 = tail call <2 x double> @llvm.mips.ffint.s.d(<2 x i64> %0) store <2 x double> %1, <2 x double>* @llvm_mips_ffint_s_d_RES ret void @@ -51,7 +51,7 @@ declare <2 x double> @llvm.mips.ffint.s.d(<2 x i64>) nounwind define void @llvm_mips_ffint_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ffint_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffint_u_w_ARG1 %1 = tail call <4 x float> @llvm.mips.ffint.u.w(<4 x i32> %0) store <4 x float> %1, <4 x float>* @llvm_mips_ffint_u_w_RES ret void @@ -72,7 +72,7 @@ declare <4 x float> @llvm.mips.ffint.u.w(<4 x i32>) nounwind define void @llvm_mips_ffint_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ffint_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ffint_u_d_ARG1 %1 = tail call <2 x double> @llvm.mips.ffint.u.d(<2 x i64> %0) store <2 x double> %1, <2 x double>* @llvm_mips_ffint_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/2rf_fq.ll b/llvm/test/CodeGen/Mips/msa/2rf_fq.ll index 021dd937fad..05c649ee918 100644 --- a/llvm/test/CodeGen/Mips/msa/2rf_fq.ll +++ b/llvm/test/CodeGen/Mips/msa/2rf_fq.ll @@ -9,7 +9,7 @@ define void @llvm_mips_ffql_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ffql_w_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffql_w_ARG1 %1 = tail call <4 x float> @llvm.mips.ffql.w(<8 x i16> %0) store <4 x float> %1, <4 x float>* @llvm_mips_ffql_w_RES ret void @@ -28,7 +28,7 @@ declare <4 x float> @llvm.mips.ffql.w(<8 x i16>) nounwind define void @llvm_mips_ffql_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ffql_d_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffql_d_ARG1 %1 = tail call <2 x double> @llvm.mips.ffql.d(<4 x i32> %0) store <2 x double> %1, <2 x double>* @llvm_mips_ffql_d_RES ret void @@ -47,7 +47,7 @@ declare <2 x double> @llvm.mips.ffql.d(<4 x i32>) nounwind define void @llvm_mips_ffqr_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ffqr_w_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ffqr_w_ARG1 %1 = tail call <4 x float> @llvm.mips.ffqr.w(<8 x i16> %0) store <4 x float> %1, <4 x float>* @llvm_mips_ffqr_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x float> @llvm.mips.ffqr.w(<8 x i16>) nounwind define void @llvm_mips_ffqr_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ffqr_d_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ffqr_d_ARG1 %1 = tail call <2 x double> @llvm.mips.ffqr.d(<4 x i32> %0) store <2 x double> %1, <2 x double>* @llvm_mips_ffqr_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/2rf_int_float.ll b/llvm/test/CodeGen/Mips/msa/2rf_int_float.ll index 4665ae066a4..77d1404f9cf 100644 --- a/llvm/test/CodeGen/Mips/msa/2rf_int_float.ll +++ b/llvm/test/CodeGen/Mips/msa/2rf_int_float.ll @@ -10,7 +10,7 @@ define void @llvm_mips_fclass_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fclass_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fclass_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.fclass.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_fclass_w_RES ret void @@ -31,7 +31,7 @@ declare <4 x i32> @llvm.mips.fclass.w(<4 x float>) nounwind define void @llvm_mips_fclass_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fclass_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fclass_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.fclass.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_fclass_d_RES ret void @@ -52,7 +52,7 @@ declare <2 x i64> @llvm.mips.fclass.d(<2 x double>) nounwind define void @llvm_mips_ftrunc_s_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftrunc_s_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_s_w_RES ret void @@ -73,7 +73,7 @@ declare <4 x i32> @llvm.mips.ftrunc.s.w(<4 x float>) nounwind define void @llvm_mips_ftrunc_s_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftrunc_s_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_s_d_RES ret void @@ -94,7 +94,7 @@ declare <2 x i64> @llvm.mips.ftrunc.s.d(<2 x double>) nounwind define void @llvm_mips_ftrunc_u_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftrunc_u_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftrunc_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_ftrunc_u_w_RES ret void @@ -115,7 +115,7 @@ declare <4 x i32> @llvm.mips.ftrunc.u.w(<4 x float>) nounwind define void @llvm_mips_ftrunc_u_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftrunc_u_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftrunc_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_ftrunc_u_d_RES ret void @@ -136,7 +136,7 @@ declare <2 x i64> @llvm.mips.ftrunc.u.d(<2 x double>) nounwind define void @llvm_mips_ftint_s_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftint_s_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ftint.s.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_s_w_RES ret void @@ -157,7 +157,7 @@ declare <4 x i32> @llvm.mips.ftint.s.w(<4 x float>) nounwind define void @llvm_mips_ftint_s_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftint_s_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ftint.s.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_s_d_RES ret void @@ -178,7 +178,7 @@ declare <2 x i64> @llvm.mips.ftint.s.d(<2 x double>) nounwind define void @llvm_mips_ftint_u_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftint_u_w_ARG1 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftint_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ftint.u.w(<4 x float> %0) store <4 x i32> %1, <4 x i32>* @llvm_mips_ftint_u_w_RES ret void @@ -199,7 +199,7 @@ declare <4 x i32> @llvm.mips.ftint.u.w(<4 x float>) nounwind define void @llvm_mips_ftint_u_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftint_u_d_ARG1 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftint_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ftint.u.d(<2 x double> %0) store <2 x i64> %1, <2 x i64>* @llvm_mips_ftint_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/2rf_tq.ll b/llvm/test/CodeGen/Mips/msa/2rf_tq.ll index 6f3c508f5b8..9b7f02a5ef3 100644 --- a/llvm/test/CodeGen/Mips/msa/2rf_tq.ll +++ b/llvm/test/CodeGen/Mips/msa/2rf_tq.ll @@ -10,8 +10,8 @@ define void @llvm_mips_ftq_h_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_ftq_h_ARG1 - %1 = load <4 x float>* @llvm_mips_ftq_h_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_ftq_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ftq.h(<4 x float> %0, <4 x float> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ftq_h_RES ret void @@ -32,8 +32,8 @@ declare <8 x i16> @llvm.mips.ftq.h(<4 x float>, <4 x float>) nounwind define void @llvm_mips_ftq_w_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_ftq_w_ARG1 - %1 = load <2 x double>* @llvm_mips_ftq_w_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_ftq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ftq.w(<2 x double> %0, <2 x double> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ftq_w_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-a.ll b/llvm/test/CodeGen/Mips/msa/3r-a.ll index dab15b66b7c..db772f91861 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-a.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-a.ll @@ -15,8 +15,8 @@ define void @llvm_mips_add_a_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_add_a_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_add_a_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_add_a_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.add.a.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_add_a_b_RES ret void @@ -40,8 +40,8 @@ declare <16 x i8> @llvm.mips.add.a.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_add_a_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_add_a_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_add_a_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_add_a_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.add.a.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_add_a_h_RES ret void @@ -65,8 +65,8 @@ declare <8 x i16> @llvm.mips.add.a.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_add_a_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_add_a_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_add_a_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_add_a_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.add.a.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_add_a_w_RES ret void @@ -90,8 +90,8 @@ declare <4 x i32> @llvm.mips.add.a.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_add_a_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_add_a_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_add_a_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_add_a_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.add.a.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_add_a_d_RES ret void @@ -115,8 +115,8 @@ declare <2 x i64> @llvm.mips.add.a.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_adds_a_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_adds_a_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_adds_a_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_a_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.adds.a.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_a_b_RES ret void @@ -140,8 +140,8 @@ declare <16 x i8> @llvm.mips.adds.a.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_adds_a_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_adds_a_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_adds_a_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_a_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.adds.a.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_a_h_RES ret void @@ -165,8 +165,8 @@ declare <8 x i16> @llvm.mips.adds.a.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_adds_a_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_adds_a_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_adds_a_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_a_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.adds.a.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_a_w_RES ret void @@ -190,8 +190,8 @@ declare <4 x i32> @llvm.mips.adds.a.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_adds_a_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_adds_a_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_adds_a_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_a_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.adds.a.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_a_d_RES ret void @@ -215,8 +215,8 @@ declare <2 x i64> @llvm.mips.adds.a.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_adds_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_adds_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_adds_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.adds.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_s_b_RES ret void @@ -240,8 +240,8 @@ declare <16 x i8> @llvm.mips.adds.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_adds_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_adds_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_adds_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.adds.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_s_h_RES ret void @@ -265,8 +265,8 @@ declare <8 x i16> @llvm.mips.adds.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_adds_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_adds_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_adds_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.adds.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_s_w_RES ret void @@ -290,8 +290,8 @@ declare <4 x i32> @llvm.mips.adds.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_adds_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_adds_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_adds_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.adds.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_s_d_RES ret void @@ -315,8 +315,8 @@ declare <2 x i64> @llvm.mips.adds.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_adds_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_adds_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_adds_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_adds_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.adds.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_adds_u_b_RES ret void @@ -340,8 +340,8 @@ declare <16 x i8> @llvm.mips.adds.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_adds_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_adds_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_adds_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_adds_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.adds.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_adds_u_h_RES ret void @@ -365,8 +365,8 @@ declare <8 x i16> @llvm.mips.adds.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_adds_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_adds_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_adds_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_adds_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.adds.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_adds_u_w_RES ret void @@ -390,8 +390,8 @@ declare <4 x i32> @llvm.mips.adds.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_adds_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_adds_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_adds_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_adds_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.adds.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_adds_u_d_RES ret void @@ -415,8 +415,8 @@ declare <2 x i64> @llvm.mips.adds.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_addv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES ret void @@ -440,8 +440,8 @@ declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_addv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES ret void @@ -465,8 +465,8 @@ declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_addv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES ret void @@ -490,8 +490,8 @@ declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_addv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES ret void @@ -512,8 +512,8 @@ declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) nounwind define void @addv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_addv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_addv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_addv_b_ARG2 %2 = add <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_addv_b_RES ret void @@ -532,8 +532,8 @@ entry: define void @addv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_addv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_addv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_addv_h_ARG2 %2 = add <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_addv_h_RES ret void @@ -552,8 +552,8 @@ entry: define void @addv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_addv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_addv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_addv_w_ARG2 %2 = add <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_addv_w_RES ret void @@ -572,8 +572,8 @@ entry: define void @addv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_addv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_addv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_addv_d_ARG2 %2 = add <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_addv_d_RES ret void @@ -595,8 +595,8 @@ entry: define void @llvm_mips_asub_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_asub_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_asub_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.asub.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_s_b_RES ret void @@ -620,8 +620,8 @@ declare <16 x i8> @llvm.mips.asub.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_asub_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_asub_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_asub_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.asub.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_s_h_RES ret void @@ -645,8 +645,8 @@ declare <8 x i16> @llvm.mips.asub.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_asub_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_asub_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_asub_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.asub.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_s_w_RES ret void @@ -670,8 +670,8 @@ declare <4 x i32> @llvm.mips.asub.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_asub_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_asub_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_asub_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.asub.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_s_d_RES ret void @@ -695,8 +695,8 @@ declare <2 x i64> @llvm.mips.asub.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_asub_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_asub_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_asub_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_asub_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.asub.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_asub_u_b_RES ret void @@ -720,8 +720,8 @@ declare <16 x i8> @llvm.mips.asub.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_asub_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_asub_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_asub_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_asub_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.asub.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_asub_u_h_RES ret void @@ -745,8 +745,8 @@ declare <8 x i16> @llvm.mips.asub.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_asub_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_asub_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_asub_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_asub_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.asub.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_asub_u_w_RES ret void @@ -770,8 +770,8 @@ declare <4 x i32> @llvm.mips.asub.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_asub_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_asub_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_asub_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_asub_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.asub.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_asub_u_d_RES ret void @@ -795,8 +795,8 @@ declare <2 x i64> @llvm.mips.asub.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ave_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ave_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ave_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ave.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_s_b_RES ret void @@ -820,8 +820,8 @@ declare <16 x i8> @llvm.mips.ave.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ave_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ave_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ave_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ave.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_s_h_RES ret void @@ -845,8 +845,8 @@ declare <8 x i16> @llvm.mips.ave.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ave_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ave_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ave_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ave.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_s_w_RES ret void @@ -870,8 +870,8 @@ declare <4 x i32> @llvm.mips.ave.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ave_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ave_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ave_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ave.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_s_d_RES ret void @@ -895,8 +895,8 @@ declare <2 x i64> @llvm.mips.ave.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ave_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ave_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ave_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ave_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ave.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ave_u_b_RES ret void @@ -920,8 +920,8 @@ declare <16 x i8> @llvm.mips.ave.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ave_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ave_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ave_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ave_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ave.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ave_u_h_RES ret void @@ -945,8 +945,8 @@ declare <8 x i16> @llvm.mips.ave.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ave_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ave_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ave_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ave_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ave.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ave_u_w_RES ret void @@ -970,8 +970,8 @@ declare <4 x i32> @llvm.mips.ave.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ave_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ave_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ave_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ave_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ave.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ave_u_d_RES ret void @@ -995,8 +995,8 @@ declare <2 x i64> @llvm.mips.ave.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_aver_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_aver_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_aver_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.aver.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_s_b_RES ret void @@ -1020,8 +1020,8 @@ declare <16 x i8> @llvm.mips.aver.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_aver_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_aver_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_aver_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.aver.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_s_h_RES ret void @@ -1045,8 +1045,8 @@ declare <8 x i16> @llvm.mips.aver.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_aver_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_aver_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_aver_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.aver.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_s_w_RES ret void @@ -1070,8 +1070,8 @@ declare <4 x i32> @llvm.mips.aver.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_aver_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_aver_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_aver_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.aver.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_s_d_RES ret void @@ -1095,8 +1095,8 @@ declare <2 x i64> @llvm.mips.aver.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_aver_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_aver_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_aver_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_aver_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.aver.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_aver_u_b_RES ret void @@ -1120,8 +1120,8 @@ declare <16 x i8> @llvm.mips.aver.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_aver_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_aver_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_aver_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_aver_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.aver.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_aver_u_h_RES ret void @@ -1145,8 +1145,8 @@ declare <8 x i16> @llvm.mips.aver.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_aver_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_aver_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_aver_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_aver_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.aver.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_aver_u_w_RES ret void @@ -1170,8 +1170,8 @@ declare <4 x i32> @llvm.mips.aver.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_aver_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_aver_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_aver_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_aver_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.aver.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_aver_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-b.ll b/llvm/test/CodeGen/Mips/msa/3r-b.ll index a05d19b4d49..2ecdc429006 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-b.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-b.ll @@ -10,8 +10,8 @@ define void @llvm_mips_bclr_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bclr_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bclr_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bclr_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bclr.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_bclr_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.bclr.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_bclr_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bclr_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bclr_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bclr_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.bclr.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_bclr_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.bclr.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_bclr_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bclr_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bclr_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bclr_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.bclr.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_bclr_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.bclr.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_bclr_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bclr_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bclr_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bclr_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.bclr.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_bclr_d_RES ret void @@ -99,9 +99,9 @@ declare <2 x i64> @llvm.mips.bclr.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_binsl_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_binsl_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_binsl_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_binsl_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsl_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.binsl.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_binsl_b_RES ret void @@ -127,9 +127,9 @@ declare <16 x i8> @llvm.mips.binsl.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_binsl_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_binsl_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_binsl_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_binsl_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsl_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.binsl.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_binsl_h_RES ret void @@ -155,9 +155,9 @@ declare <8 x i16> @llvm.mips.binsl.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_binsl_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_binsl_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_binsl_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_binsl_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsl_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.binsl.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_binsl_w_RES ret void @@ -183,9 +183,9 @@ declare <4 x i32> @llvm.mips.binsl.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_binsl_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_binsl_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_binsl_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_binsl_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsl_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.binsl.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_binsl_d_RES ret void @@ -211,9 +211,9 @@ declare <2 x i64> @llvm.mips.binsl.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind define void @llvm_mips_binsr_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_binsr_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_binsr_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_binsr_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_binsr_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.binsr.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_binsr_b_RES ret void @@ -239,9 +239,9 @@ declare <16 x i8> @llvm.mips.binsr.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_binsr_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_binsr_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_binsr_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_binsr_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_binsr_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.binsr.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_binsr_h_RES ret void @@ -267,9 +267,9 @@ declare <8 x i16> @llvm.mips.binsr.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_binsr_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_binsr_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_binsr_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_binsr_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_binsr_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.binsr.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_binsr_w_RES ret void @@ -295,9 +295,9 @@ declare <4 x i32> @llvm.mips.binsr.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_binsr_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_binsr_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_binsr_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_binsr_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_binsr_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.binsr.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_binsr_d_RES ret void @@ -322,8 +322,8 @@ declare <2 x i64> @llvm.mips.binsr.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind define void @llvm_mips_bneg_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bneg_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bneg_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bneg_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bneg.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_bneg_b_RES ret void @@ -344,8 +344,8 @@ declare <16 x i8> @llvm.mips.bneg.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_bneg_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bneg_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bneg_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bneg_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.bneg.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_bneg_h_RES ret void @@ -366,8 +366,8 @@ declare <8 x i16> @llvm.mips.bneg.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_bneg_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bneg_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bneg_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bneg_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.bneg.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_bneg_w_RES ret void @@ -388,8 +388,8 @@ declare <4 x i32> @llvm.mips.bneg.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_bneg_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bneg_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bneg_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bneg_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.bneg.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_bneg_d_RES ret void @@ -410,8 +410,8 @@ declare <2 x i64> @llvm.mips.bneg.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_bset_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bset_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bset_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bset_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bset.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_bset_b_RES ret void @@ -432,8 +432,8 @@ declare <16 x i8> @llvm.mips.bset.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_bset_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bset_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bset_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bset_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.bset.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_bset_h_RES ret void @@ -454,8 +454,8 @@ declare <8 x i16> @llvm.mips.bset.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_bset_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bset_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bset_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bset_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.bset.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_bset_w_RES ret void @@ -476,8 +476,8 @@ declare <4 x i32> @llvm.mips.bset.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_bset_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bset_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bset_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bset_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.bset.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_bset_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-c.ll b/llvm/test/CodeGen/Mips/msa/3r-c.ll index 6ec92c284fe..a3913e0a27f 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-c.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-c.ll @@ -10,8 +10,8 @@ define void @llvm_mips_ceq_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ceq_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ceq_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ceq_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ceq.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ceq_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.ceq.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ceq_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ceq_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ceq_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ceq_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ceq.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ceq_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.ceq.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ceq_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ceq_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ceq_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ceq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ceq.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ceq_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.ceq.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ceq_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ceq_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ceq_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ceq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ceq.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ceq_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.ceq.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_cle_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_cle_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_cle_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.cle.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_s_b_RES ret void @@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.cle.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_cle_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_cle_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_cle_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.cle.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_s_h_RES ret void @@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.cle.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_cle_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_cle_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_cle_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.cle.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_s_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.cle.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_cle_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_cle_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_cle_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.cle.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_s_d_RES ret void @@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.cle.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_cle_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_cle_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_cle_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_cle_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.cle.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_cle_u_b_RES ret void @@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.cle.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_cle_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_cle_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_cle_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_cle_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.cle.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_cle_u_h_RES ret void @@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.cle.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_cle_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_cle_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_cle_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_cle_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.cle.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_cle_u_w_RES ret void @@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.cle.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_cle_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_cle_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_cle_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_cle_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.cle.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_cle_u_d_RES ret void @@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.cle.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_clt_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clt_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_clt_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.clt.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_s_b_RES ret void @@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.clt.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_clt_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clt_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_clt_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.clt.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_s_h_RES ret void @@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.clt.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_clt_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clt_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_clt_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.clt.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_s_w_RES ret void @@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.clt.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_clt_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clt_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_clt_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.clt.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_s_d_RES ret void @@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.clt.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_clt_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clt_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_clt_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_clt_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.clt.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_clt_u_b_RES ret void @@ -384,8 +384,8 @@ declare <16 x i8> @llvm.mips.clt.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_clt_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clt_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_clt_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_clt_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.clt.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_clt_u_h_RES ret void @@ -406,8 +406,8 @@ declare <8 x i16> @llvm.mips.clt.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_clt_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clt_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_clt_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_clt_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.clt.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_clt_u_w_RES ret void @@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.clt.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_clt_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clt_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_clt_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_clt_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.clt.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_clt_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-d.ll b/llvm/test/CodeGen/Mips/msa/3r-d.ll index 0099554a8ee..4fc32b76a7b 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-d.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-d.ll @@ -10,8 +10,8 @@ define void @llvm_mips_div_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.div.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_div_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.div.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_div_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.div.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_div_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.div.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES ret void @@ -95,8 +95,8 @@ declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>) nounwind define void @div_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2 %2 = sdiv <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES ret void @@ -111,8 +111,8 @@ entry: define void @div_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2 %2 = sdiv <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES ret void @@ -127,8 +127,8 @@ entry: define void @div_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2 %2 = sdiv <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES ret void @@ -143,8 +143,8 @@ entry: define void @div_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2 %2 = sdiv <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES ret void @@ -163,8 +163,8 @@ entry: define void @llvm_mips_div_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.div.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES ret void @@ -185,8 +185,8 @@ declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_div_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.div.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES ret void @@ -207,8 +207,8 @@ declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_div_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.div.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES ret void @@ -229,8 +229,8 @@ declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_div_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.div.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES ret void @@ -248,8 +248,8 @@ declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>) nounwind define void @div_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_div_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_div_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2 %2 = udiv <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES ret void @@ -264,8 +264,8 @@ entry: define void @div_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_div_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_div_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2 %2 = udiv <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES ret void @@ -280,8 +280,8 @@ entry: define void @div_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_div_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_div_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2 %2 = udiv <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES ret void @@ -296,8 +296,8 @@ entry: define void @div_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_div_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_div_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2 %2 = udiv <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES ret void @@ -326,8 +326,8 @@ entry: define void @llvm_mips_dotp_s_h_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dotp_s_h_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.dotp.s.h(<16 x i8> %0, <16 x i8> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_s_h_RES ret void @@ -353,8 +353,8 @@ declare <8 x i16> @llvm.mips.dotp.s.h(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dotp_s_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dotp_s_w_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.dotp.s.w(<8 x i16> %0, <8 x i16> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_s_w_RES ret void @@ -377,8 +377,8 @@ declare <4 x i32> @llvm.mips.dotp.s.w(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dotp_s_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dotp_s_d_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.dotp.s.d(<4 x i32> %0, <4 x i32> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_s_d_RES ret void @@ -409,8 +409,8 @@ declare <2 x i64> @llvm.mips.dotp.s.d(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dotp_u_h_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dotp_u_h_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dotp_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.dotp.u.h(<16 x i8> %0, <16 x i8> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_dotp_u_h_RES ret void @@ -436,8 +436,8 @@ declare <8 x i16> @llvm.mips.dotp.u.h(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dotp_u_w_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dotp_u_w_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dotp_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.dotp.u.w(<8 x i16> %0, <8 x i16> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_dotp_u_w_RES ret void @@ -460,8 +460,8 @@ declare <4 x i32> @llvm.mips.dotp.u.w(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dotp_u_d_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dotp_u_d_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dotp_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.dotp.u.d(<4 x i32> %0, <4 x i32> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_dotp_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-i.ll b/llvm/test/CodeGen/Mips/msa/3r-i.ll index 2ef30471b02..7147b756b15 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-i.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-i.ll @@ -10,8 +10,8 @@ define void @llvm_mips_ilvev_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ilvev_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ilvev_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvev_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ilvev.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvev_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.ilvev.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ilvev_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ilvev_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ilvev_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvev_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ilvev.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvev_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.ilvev.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ilvev_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ilvev_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ilvev_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvev_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ilvev.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvev_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.ilvev.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ilvev_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ilvev_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ilvev_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvev_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ilvev.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvev_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.ilvev.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ilvl_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ilvl_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ilvl_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvl_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ilvl.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvl_b_RES ret void @@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.ilvl.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ilvl_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ilvl_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ilvl_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvl_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ilvl.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvl_h_RES ret void @@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.ilvl.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ilvl_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ilvl_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ilvl_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvl_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ilvl.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvl_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.ilvl.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ilvl_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ilvl_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ilvl_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvl_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ilvl.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvl_d_RES ret void @@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.ilvl.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ilvod_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ilvod_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ilvod_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvod_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ilvod.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvod_b_RES ret void @@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.ilvod.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ilvod_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ilvod_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ilvod_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvod_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ilvod.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvod_h_RES ret void @@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.ilvod.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ilvod_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ilvod_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ilvod_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvod_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ilvod.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvod_w_RES ret void @@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.ilvod.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ilvod_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ilvod_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ilvod_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvod_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ilvod.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvod_d_RES ret void @@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.ilvod.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_ilvr_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ilvr_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_ilvr_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_ilvr_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.ilvr.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_ilvr_b_RES ret void @@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.ilvr.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_ilvr_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ilvr_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_ilvr_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_ilvr_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.ilvr.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_ilvr_h_RES ret void @@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.ilvr.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_ilvr_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ilvr_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_ilvr_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_ilvr_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.ilvr.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_ilvr_w_RES ret void @@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.ilvr.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_ilvr_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ilvr_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_ilvr_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_ilvr_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.ilvr.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_ilvr_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-m.ll b/llvm/test/CodeGen/Mips/msa/3r-m.ll index ddfd720a2f8..39b4f7db3a4 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-m.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-m.ll @@ -10,8 +10,8 @@ define void @llvm_mips_max_a_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_max_a_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_max_a_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_a_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.max.a.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_max_a_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.max.a.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_max_a_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_max_a_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_max_a_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_a_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.max.a.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_max_a_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.max.a.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_max_a_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_max_a_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_max_a_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_a_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.max.a.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_max_a_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.max.a.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_max_a_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_max_a_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_max_a_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_a_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.max.a.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_max_a_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.max.a.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_max_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_max_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_max_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.max.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_max_s_b_RES ret void @@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.max.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_max_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_max_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_max_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.max.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_max_s_h_RES ret void @@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.max.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_max_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_max_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_max_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.max.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_max_s_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.max.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_max_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_max_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_max_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.max.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_max_s_d_RES ret void @@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.max.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_max_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_max_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_max_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_max_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.max.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_max_u_b_RES ret void @@ -208,8 +208,8 @@ declare <16 x i8> @llvm.mips.max.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_max_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_max_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_max_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_max_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.max.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_max_u_h_RES ret void @@ -230,8 +230,8 @@ declare <8 x i16> @llvm.mips.max.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_max_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_max_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_max_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_max_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.max.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_max_u_w_RES ret void @@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.max.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_max_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_max_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_max_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_max_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.max.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_max_u_d_RES ret void @@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.max.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_min_a_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_min_a_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_min_a_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_a_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.min.a.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_min_a_b_RES ret void @@ -296,8 +296,8 @@ declare <16 x i8> @llvm.mips.min.a.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_min_a_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_min_a_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_min_a_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_a_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.min.a.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_min_a_h_RES ret void @@ -318,8 +318,8 @@ declare <8 x i16> @llvm.mips.min.a.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_min_a_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_min_a_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_min_a_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_a_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.min.a.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_min_a_w_RES ret void @@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.min.a.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_min_a_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_min_a_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_min_a_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_a_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.min.a.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_min_a_d_RES ret void @@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.min.a.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_min_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_min_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_min_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.min.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_min_s_b_RES ret void @@ -384,8 +384,8 @@ declare <16 x i8> @llvm.mips.min.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_min_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_min_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_min_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.min.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_min_s_h_RES ret void @@ -406,8 +406,8 @@ declare <8 x i16> @llvm.mips.min.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_min_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_min_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_min_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.min.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_min_s_w_RES ret void @@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.min.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_min_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_min_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_min_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.min.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_min_s_d_RES ret void @@ -450,8 +450,8 @@ declare <2 x i64> @llvm.mips.min.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_min_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_min_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_min_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_min_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.min.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_min_u_b_RES ret void @@ -472,8 +472,8 @@ declare <16 x i8> @llvm.mips.min.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_min_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_min_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_min_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_min_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.min.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_min_u_h_RES ret void @@ -494,8 +494,8 @@ declare <8 x i16> @llvm.mips.min.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_min_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_min_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_min_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_min_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.min.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_min_u_w_RES ret void @@ -516,8 +516,8 @@ declare <4 x i32> @llvm.mips.min.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_min_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_min_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_min_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_min_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.min.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_min_u_d_RES ret void @@ -538,8 +538,8 @@ declare <2 x i64> @llvm.mips.min.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_mod_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mod_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_mod_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.mod.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_s_b_RES ret void @@ -560,8 +560,8 @@ declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_mod_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mod_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mod_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mod.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_s_h_RES ret void @@ -582,8 +582,8 @@ declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mod_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mod_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mod_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mod.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_s_w_RES ret void @@ -604,8 +604,8 @@ declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_mod_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mod_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_mod_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.mod.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_s_d_RES ret void @@ -626,8 +626,8 @@ declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_mod_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mod_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_mod_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mod_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.mod.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_mod_u_b_RES ret void @@ -648,8 +648,8 @@ declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_mod_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mod_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mod_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mod_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mod.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mod_u_h_RES ret void @@ -670,8 +670,8 @@ declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mod_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mod_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mod_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mod_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mod.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mod_u_w_RES ret void @@ -692,8 +692,8 @@ declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_mod_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mod_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_mod_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mod_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.mod.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_mod_u_d_RES ret void @@ -714,8 +714,8 @@ declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_mulv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.mulv.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES ret void @@ -736,8 +736,8 @@ declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_mulv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mulv.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES ret void @@ -758,8 +758,8 @@ declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mulv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mulv.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES ret void @@ -780,8 +780,8 @@ declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_mulv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.mulv.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES ret void @@ -798,8 +798,8 @@ declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>) nounwind define void @mulv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mulv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_mulv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_mulv_b_ARG2 %2 = mul <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_mulv_b_RES ret void @@ -814,8 +814,8 @@ entry: define void @mulv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mulv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mulv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulv_h_ARG2 %2 = mul <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_mulv_h_RES ret void @@ -830,8 +830,8 @@ entry: define void @mulv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mulv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mulv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulv_w_ARG2 %2 = mul <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_mulv_w_RES ret void @@ -846,8 +846,8 @@ entry: define void @mulv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mulv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_mulv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_mulv_d_ARG2 %2 = mul <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_mulv_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-p.ll b/llvm/test/CodeGen/Mips/msa/3r-p.ll index 852023b0824..70b98aa8f9a 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-p.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-p.ll @@ -10,8 +10,8 @@ define void @llvm_mips_pckev_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_pckev_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_pckev_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckev_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.pckev.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_pckev_b_RES ret void @@ -32,8 +32,8 @@ declare <16 x i8> @llvm.mips.pckev.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_pckev_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_pckev_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_pckev_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckev_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.pckev.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_pckev_h_RES ret void @@ -54,8 +54,8 @@ declare <8 x i16> @llvm.mips.pckev.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_pckev_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_pckev_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_pckev_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckev_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.pckev.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_pckev_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.pckev.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_pckev_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_pckev_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_pckev_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckev_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.pckev.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_pckev_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.pckev.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_pckod_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_pckod_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_pckod_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_pckod_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.pckod.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_pckod_b_RES ret void @@ -120,8 +120,8 @@ declare <16 x i8> @llvm.mips.pckod.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_pckod_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_pckod_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_pckod_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_pckod_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.pckod.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_pckod_h_RES ret void @@ -142,8 +142,8 @@ declare <8 x i16> @llvm.mips.pckod.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_pckod_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_pckod_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_pckod_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_pckod_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.pckod.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_pckod_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.pckod.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_pckod_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_pckod_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_pckod_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_pckod_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.pckod.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_pckod_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-s.ll b/llvm/test/CodeGen/Mips/msa/3r-s.ll index 581c3bfd78a..d04c5ff165f 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-s.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-s.ll @@ -11,9 +11,9 @@ define void @llvm_mips_sld_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sld_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sld_b_ARG2 - %2 = load i32* @llvm_mips_sld_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sld_b_ARG2 + %2 = load i32, i32* @llvm_mips_sld_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.sld.b(<16 x i8> %0, <16 x i8> %1, i32 %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_sld_b_RES ret void @@ -39,9 +39,9 @@ declare <16 x i8> @llvm.mips.sld.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_sld_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sld_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sld_h_ARG2 - %2 = load i32* @llvm_mips_sld_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sld_h_ARG2 + %2 = load i32, i32* @llvm_mips_sld_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.sld.h(<8 x i16> %0, <8 x i16> %1, i32 %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_sld_h_RES ret void @@ -67,9 +67,9 @@ declare <8 x i16> @llvm.mips.sld.h(<8 x i16>, <8 x i16>, i32) nounwind define void @llvm_mips_sld_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sld_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sld_w_ARG2 - %2 = load i32* @llvm_mips_sld_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sld_w_ARG2 + %2 = load i32, i32* @llvm_mips_sld_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.sld.w(<4 x i32> %0, <4 x i32> %1, i32 %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_sld_w_RES ret void @@ -95,9 +95,9 @@ declare <4 x i32> @llvm.mips.sld.w(<4 x i32>, <4 x i32>, i32) nounwind define void @llvm_mips_sld_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sld_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sld_d_ARG2 - %2 = load i32* @llvm_mips_sld_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sld_d_ARG2 + %2 = load i32, i32* @llvm_mips_sld_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.sld.d(<2 x i64> %0, <2 x i64> %1, i32 %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_sld_d_RES ret void @@ -122,8 +122,8 @@ declare <2 x i64> @llvm.mips.sld.d(<2 x i64>, <2 x i64>, i32) nounwind define void @llvm_mips_sll_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.sll.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES ret void @@ -146,8 +146,8 @@ declare <16 x i8> @llvm.mips.sll.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_sll_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.sll.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES ret void @@ -170,8 +170,8 @@ declare <8 x i16> @llvm.mips.sll.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_sll_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.sll.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES ret void @@ -194,8 +194,8 @@ declare <4 x i32> @llvm.mips.sll.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_sll_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.sll.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES ret void @@ -214,8 +214,8 @@ declare <2 x i64> @llvm.mips.sll.d(<2 x i64>, <2 x i64>) nounwind define void @sll_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sll_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sll_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sll_b_ARG2 %2 = shl <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_sll_b_RES ret void @@ -232,8 +232,8 @@ entry: define void @sll_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sll_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sll_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sll_h_ARG2 %2 = shl <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_sll_h_RES ret void @@ -250,8 +250,8 @@ entry: define void @sll_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sll_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sll_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sll_w_ARG2 %2 = shl <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_sll_w_RES ret void @@ -268,8 +268,8 @@ entry: define void @sll_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sll_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sll_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sll_d_ARG2 %2 = shl <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_sll_d_RES ret void @@ -290,8 +290,8 @@ entry: define void @llvm_mips_sra_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.sra.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES ret void @@ -314,8 +314,8 @@ declare <16 x i8> @llvm.mips.sra.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_sra_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.sra.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES ret void @@ -338,8 +338,8 @@ declare <8 x i16> @llvm.mips.sra.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_sra_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.sra.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES ret void @@ -362,8 +362,8 @@ declare <4 x i32> @llvm.mips.sra.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_sra_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.sra.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES ret void @@ -383,8 +383,8 @@ declare <2 x i64> @llvm.mips.sra.d(<2 x i64>, <2 x i64>) nounwind define void @sra_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sra_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sra_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sra_b_ARG2 %2 = ashr <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_sra_b_RES ret void @@ -401,8 +401,8 @@ entry: define void @sra_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sra_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sra_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sra_h_ARG2 %2 = ashr <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_sra_h_RES ret void @@ -419,8 +419,8 @@ entry: define void @sra_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sra_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sra_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sra_w_ARG2 %2 = ashr <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_sra_w_RES ret void @@ -437,8 +437,8 @@ entry: define void @sra_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sra_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sra_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sra_d_ARG2 %2 = ashr <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_sra_d_RES ret void @@ -459,8 +459,8 @@ entry: define void @llvm_mips_srar_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srar_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_srar_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srar_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.srar.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_srar_b_RES ret void @@ -483,8 +483,8 @@ declare <16 x i8> @llvm.mips.srar.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_srar_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srar_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_srar_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srar_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.srar.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_srar_h_RES ret void @@ -507,8 +507,8 @@ declare <8 x i16> @llvm.mips.srar.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_srar_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srar_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_srar_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srar_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.srar.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_srar_w_RES ret void @@ -531,8 +531,8 @@ declare <4 x i32> @llvm.mips.srar.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_srar_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srar_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_srar_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srar_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.srar.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_srar_d_RES ret void @@ -555,8 +555,8 @@ declare <2 x i64> @llvm.mips.srar.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_srl_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.srl.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES ret void @@ -579,8 +579,8 @@ declare <16 x i8> @llvm.mips.srl.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_srl_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.srl.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES ret void @@ -603,8 +603,8 @@ declare <8 x i16> @llvm.mips.srl.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_srl_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.srl.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES ret void @@ -627,8 +627,8 @@ declare <4 x i32> @llvm.mips.srl.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_srl_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.srl.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES ret void @@ -651,8 +651,8 @@ declare <2 x i64> @llvm.mips.srl.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_srlr_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srlr_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_srlr_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srlr_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.srlr.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_srlr_b_RES ret void @@ -675,8 +675,8 @@ declare <16 x i8> @llvm.mips.srlr.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_srlr_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srlr_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_srlr_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srlr_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.srlr.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_srlr_h_RES ret void @@ -699,8 +699,8 @@ declare <8 x i16> @llvm.mips.srlr.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_srlr_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srlr_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_srlr_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srlr_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.srlr.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_srlr_w_RES ret void @@ -723,8 +723,8 @@ declare <4 x i32> @llvm.mips.srlr.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_srlr_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srlr_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_srlr_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srlr_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.srlr.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_srlr_d_RES ret void @@ -744,8 +744,8 @@ declare <2 x i64> @llvm.mips.srlr.d(<2 x i64>, <2 x i64>) nounwind define void @srl_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srl_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_srl_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_srl_b_ARG2 %2 = lshr <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_srl_b_RES ret void @@ -762,8 +762,8 @@ entry: define void @srl_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srl_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_srl_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_srl_h_ARG2 %2 = lshr <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_srl_h_RES ret void @@ -780,8 +780,8 @@ entry: define void @srl_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srl_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_srl_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_srl_w_ARG2 %2 = lshr <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_srl_w_RES ret void @@ -798,8 +798,8 @@ entry: define void @srl_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srl_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_srl_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_srl_d_ARG2 %2 = lshr <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_srl_d_RES ret void @@ -820,8 +820,8 @@ entry: define void @llvm_mips_subs_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subs_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subs_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subs.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_s_b_RES ret void @@ -844,8 +844,8 @@ declare <16 x i8> @llvm.mips.subs.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subs_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subs_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subs_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subs.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_s_h_RES ret void @@ -868,8 +868,8 @@ declare <8 x i16> @llvm.mips.subs.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subs_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subs_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subs_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subs.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_s_w_RES ret void @@ -892,8 +892,8 @@ declare <4 x i32> @llvm.mips.subs.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subs_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subs_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subs_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subs.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_s_d_RES ret void @@ -916,8 +916,8 @@ declare <2 x i64> @llvm.mips.subs.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_subs_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subs_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subs_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subs_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subs.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subs_u_b_RES ret void @@ -940,8 +940,8 @@ declare <16 x i8> @llvm.mips.subs.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subs_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subs_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subs_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subs_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subs.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subs_u_h_RES ret void @@ -964,8 +964,8 @@ declare <8 x i16> @llvm.mips.subs.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subs_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subs_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subs_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subs_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subs.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subs_u_w_RES ret void @@ -988,8 +988,8 @@ declare <4 x i32> @llvm.mips.subs.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subs_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subs_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subs_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subs_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subs.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subs_u_d_RES ret void @@ -1012,8 +1012,8 @@ declare <2 x i64> @llvm.mips.subs.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_subsus_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subsus_u_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsus_u_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subsus.u.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subsus_u_b_RES ret void @@ -1036,8 +1036,8 @@ declare <16 x i8> @llvm.mips.subsus.u.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subsus_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subsus_u_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsus_u_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subsus.u.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subsus_u_h_RES ret void @@ -1060,8 +1060,8 @@ declare <8 x i16> @llvm.mips.subsus.u.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subsus_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subsus_u_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsus_u_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subsus.u.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subsus_u_w_RES ret void @@ -1084,8 +1084,8 @@ declare <4 x i32> @llvm.mips.subsus.u.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subsus_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subsus_u_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsus_u_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subsus.u.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subsus_u_d_RES ret void @@ -1108,8 +1108,8 @@ declare <2 x i64> @llvm.mips.subsus.u.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_subsuu_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subsuu_s_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subsuu_s_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subsuu_s_b_RES ret void @@ -1132,8 +1132,8 @@ declare <16 x i8> @llvm.mips.subsuu.s.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subsuu_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subsuu_s_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subsuu_s_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subsuu_s_h_RES ret void @@ -1156,8 +1156,8 @@ declare <8 x i16> @llvm.mips.subsuu.s.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subsuu_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subsuu_s_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subsuu_s_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subsuu_s_w_RES ret void @@ -1180,8 +1180,8 @@ declare <4 x i32> @llvm.mips.subsuu.s.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subsuu_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subsuu_s_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subsuu_s_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subsuu_s_d_RES ret void @@ -1204,8 +1204,8 @@ declare <2 x i64> @llvm.mips.subsuu.s.d(<2 x i64>, <2 x i64>) nounwind define void @llvm_mips_subv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.subv.b(<16 x i8> %0, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES ret void @@ -1228,8 +1228,8 @@ declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>) nounwind define void @llvm_mips_subv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.subv.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES ret void @@ -1252,8 +1252,8 @@ declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_subv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.subv.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES ret void @@ -1276,8 +1276,8 @@ declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_subv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.subv.d(<2 x i64> %0, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES ret void @@ -1297,8 +1297,8 @@ declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>) nounwind define void @subv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_subv_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_subv_b_ARG2 %2 = sub <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_subv_b_RES ret void @@ -1315,8 +1315,8 @@ entry: define void @subv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_subv_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_subv_h_ARG2 %2 = sub <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_subv_h_RES ret void @@ -1333,8 +1333,8 @@ entry: define void @subv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_subv_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_subv_w_ARG2 %2 = sub <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_subv_w_RES ret void @@ -1351,8 +1351,8 @@ entry: define void @subv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_subv_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_subv_d_ARG2 %2 = sub <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_subv_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r-v.ll b/llvm/test/CodeGen/Mips/msa/3r-v.ll index c9693f90d55..2d36da40d2b 100644 --- a/llvm/test/CodeGen/Mips/msa/3r-v.ll +++ b/llvm/test/CodeGen/Mips/msa/3r-v.ll @@ -11,9 +11,9 @@ define void @llvm_mips_vshf_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_vshf_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_vshf_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_vshf_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_vshf_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.vshf.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_vshf_b_RES ret void @@ -36,9 +36,9 @@ declare <16 x i8> @llvm.mips.vshf.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_vshf_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_vshf_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_vshf_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_vshf_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_vshf_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.vshf.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_vshf_h_RES ret void @@ -61,9 +61,9 @@ declare <8 x i16> @llvm.mips.vshf.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_vshf_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_vshf_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_vshf_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_vshf_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_vshf_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.vshf.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_vshf_w_RES ret void @@ -86,9 +86,9 @@ declare <4 x i32> @llvm.mips.vshf.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_vshf_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_vshf_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_vshf_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_vshf_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_vshf_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.vshf.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_vshf_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r_4r.ll b/llvm/test/CodeGen/Mips/msa/3r_4r.ll index b7fd7283788..73d104c68c5 100644 --- a/llvm/test/CodeGen/Mips/msa/3r_4r.ll +++ b/llvm/test/CodeGen/Mips/msa/3r_4r.ll @@ -11,9 +11,9 @@ define void @llvm_mips_maddv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_maddv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_maddv_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_maddv_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_maddv_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.maddv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_maddv_b_RES ret void @@ -36,9 +36,9 @@ declare <16 x i8> @llvm.mips.maddv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_maddv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_maddv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_maddv_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_maddv_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddv_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.maddv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_maddv_h_RES ret void @@ -61,9 +61,9 @@ declare <8 x i16> @llvm.mips.maddv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_maddv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_maddv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_maddv_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_maddv_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddv_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.maddv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_maddv_w_RES ret void @@ -86,9 +86,9 @@ declare <4 x i32> @llvm.mips.maddv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_maddv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_maddv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_maddv_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_maddv_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_maddv_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.maddv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_maddv_d_RES ret void @@ -111,9 +111,9 @@ declare <2 x i64> @llvm.mips.maddv.d(<2 x i64>, <2 x i64>, <2 x i64>) nounwind define void @llvm_mips_msubv_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_msubv_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_msubv_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_msubv_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_msubv_b_ARG3 %3 = tail call <16 x i8> @llvm.mips.msubv.b(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) store <16 x i8> %3, <16 x i8>* @llvm_mips_msubv_b_RES ret void @@ -136,9 +136,9 @@ declare <16 x i8> @llvm.mips.msubv.b(<16 x i8>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_msubv_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_msubv_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_msubv_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_msubv_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubv_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.msubv.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_msubv_h_RES ret void @@ -161,9 +161,9 @@ declare <8 x i16> @llvm.mips.msubv.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_msubv_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_msubv_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_msubv_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_msubv_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubv_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.msubv.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_msubv_w_RES ret void @@ -186,9 +186,9 @@ declare <4 x i32> @llvm.mips.msubv.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_msubv_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_msubv_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_msubv_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_msubv_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_msubv_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.msubv.d(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_msubv_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll b/llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll index 7063e4566a7..fe248eeb566 100644 --- a/llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll +++ b/llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll @@ -12,9 +12,9 @@ define void @llvm_mips_dpadd_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dpadd_s_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG2 - %2 = load <16 x i8>* @llvm_mips_dpadd_s_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_s_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_s_h_RES ret void @@ -37,9 +37,9 @@ declare <8 x i16> @llvm.mips.dpadd.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dpadd_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dpadd_s_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG2 - %2 = load <8 x i16>* @llvm_mips_dpadd_s_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_s_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_s_w_RES ret void @@ -62,9 +62,9 @@ declare <4 x i32> @llvm.mips.dpadd.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dpadd_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_dpadd_s_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG2 - %2 = load <4 x i32>* @llvm_mips_dpadd_s_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_s_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_s_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_s_d_RES ret void @@ -87,9 +87,9 @@ declare <2 x i64> @llvm.mips.dpadd.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dpadd_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dpadd_u_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG2 - %2 = load <16 x i8>* @llvm_mips_dpadd_u_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpadd_u_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_dpadd_u_h_RES ret void @@ -112,9 +112,9 @@ declare <8 x i16> @llvm.mips.dpadd.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dpadd_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dpadd_u_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG2 - %2 = load <8 x i16>* @llvm_mips_dpadd_u_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpadd_u_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_dpadd_u_w_RES ret void @@ -137,9 +137,9 @@ declare <4 x i32> @llvm.mips.dpadd.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dpadd_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_dpadd_u_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG2 - %2 = load <4 x i32>* @llvm_mips_dpadd_u_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpadd_u_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpadd_u_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_dpadd_u_d_RES ret void @@ -162,9 +162,9 @@ declare <2 x i64> @llvm.mips.dpadd.u.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dpsub_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dpsub_s_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG2 - %2 = load <16 x i8>* @llvm_mips_dpsub_s_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_s_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_s_h_RES ret void @@ -187,9 +187,9 @@ declare <8 x i16> @llvm.mips.dpsub.s.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dpsub_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dpsub_s_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG2 - %2 = load <8 x i16>* @llvm_mips_dpsub_s_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_s_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_s_w_RES ret void @@ -212,9 +212,9 @@ declare <4 x i32> @llvm.mips.dpsub.s.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dpsub_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_dpsub_s_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG2 - %2 = load <4 x i32>* @llvm_mips_dpsub_s_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_s_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_s_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_s_d_RES ret void @@ -237,9 +237,9 @@ declare <2 x i64> @llvm.mips.dpsub.s.d(<2 x i64>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_dpsub_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_dpsub_u_h_ARG1 - %1 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG2 - %2 = load <16 x i8>* @llvm_mips_dpsub_u_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_h_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_dpsub_u_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16> %0, <16 x i8> %1, <16 x i8> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_dpsub_u_h_RES ret void @@ -262,9 +262,9 @@ declare <8 x i16> @llvm.mips.dpsub.u.h(<8 x i16>, <16 x i8>, <16 x i8>) nounwind define void @llvm_mips_dpsub_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_dpsub_u_w_ARG1 - %1 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG2 - %2 = load <8 x i16>* @llvm_mips_dpsub_u_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_w_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_dpsub_u_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32> %0, <8 x i16> %1, <8 x i16> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_dpsub_u_w_RES ret void @@ -287,9 +287,9 @@ declare <4 x i32> @llvm.mips.dpsub.u.w(<4 x i32>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_dpsub_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_dpsub_u_d_ARG1 - %1 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG2 - %2 = load <4 x i32>* @llvm_mips_dpsub_u_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_dpsub_u_d_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_dpsub_u_d_ARG3 %3 = tail call <2 x i64> @llvm.mips.dpsub.u.d(<2 x i64> %0, <4 x i32> %1, <4 x i32> %2) store <2 x i64> %3, <2 x i64>* @llvm_mips_dpsub_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3r_splat.ll b/llvm/test/CodeGen/Mips/msa/3r_splat.ll index 6b0cb26f8c8..56d26b030de 100644 --- a/llvm/test/CodeGen/Mips/msa/3r_splat.ll +++ b/llvm/test/CodeGen/Mips/msa/3r_splat.ll @@ -11,7 +11,7 @@ define void @llvm_mips_splat_b_test(i32 %a) nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_splat_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splat_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.splat.b(<16 x i8> %0, i32 %a) store <16 x i8> %1, <16 x i8>* @llvm_mips_splat_b_RES ret void @@ -32,7 +32,7 @@ declare <16 x i8> @llvm.mips.splat.b(<16 x i8>, i32) nounwind define void @llvm_mips_splat_h_test(i32 %a) nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_splat_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splat_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.splat.h(<8 x i16> %0, i32 %a) store <8 x i16> %1, <8 x i16>* @llvm_mips_splat_h_RES ret void @@ -53,7 +53,7 @@ declare <8 x i16> @llvm.mips.splat.h(<8 x i16>, i32) nounwind define void @llvm_mips_splat_w_test(i32 %a) nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_splat_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splat_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.splat.w(<4 x i32> %0, i32 %a) store <4 x i32> %1, <4 x i32>* @llvm_mips_splat_w_RES ret void @@ -74,7 +74,7 @@ declare <4 x i32> @llvm.mips.splat.w(<4 x i32>, i32) nounwind define void @llvm_mips_splat_d_test(i32 %a) nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_splat_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splat_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.splat.d(<2 x i64> %0, i32 %a) store <2 x i64> %1, <2 x i64>* @llvm_mips_splat_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3rf.ll b/llvm/test/CodeGen/Mips/msa/3rf.ll index ae665afcc95..dce0c275e8d 100644 --- a/llvm/test/CodeGen/Mips/msa/3rf.ll +++ b/llvm/test/CodeGen/Mips/msa/3rf.ll @@ -9,8 +9,8 @@ define void @llvm_mips_fadd_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES ret void @@ -31,8 +31,8 @@ declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fadd_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES ret void @@ -49,8 +49,8 @@ declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) nounwind define void @fadd_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fadd_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fadd_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fadd_w_ARG2 %2 = fadd <4 x float> %0, %1 store <4 x float> %2, <4 x float>* @llvm_mips_fadd_w_RES ret void @@ -65,8 +65,8 @@ entry: define void @fadd_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fadd_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fadd_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fadd_d_ARG2 %2 = fadd <2 x double> %0, %1 store <2 x double> %2, <2 x double>* @llvm_mips_fadd_d_RES ret void @@ -85,8 +85,8 @@ entry: define void @llvm_mips_fdiv_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fdiv.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES ret void @@ -107,8 +107,8 @@ declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fdiv_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fdiv.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES ret void @@ -125,8 +125,8 @@ declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>) nounwind define void @fdiv_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fdiv_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fdiv_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fdiv_w_ARG2 %2 = fdiv <4 x float> %0, %1 store <4 x float> %2, <4 x float>* @llvm_mips_fdiv_w_RES ret void @@ -141,8 +141,8 @@ entry: define void @fdiv_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fdiv_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fdiv_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fdiv_d_ARG2 %2 = fdiv <2 x double> %0, %1 store <2 x double> %2, <2 x double>* @llvm_mips_fdiv_d_RES ret void @@ -161,8 +161,8 @@ entry: define void @llvm_mips_fmin_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmin_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmin_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmin_w_RES ret void @@ -183,8 +183,8 @@ declare <4 x float> @llvm.mips.fmin.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmin_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmin_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmin_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmin_d_RES ret void @@ -205,8 +205,8 @@ declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fmin_a_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmin_a_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmin_a_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmin_a_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmin.a.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmin_a_w_RES ret void @@ -227,8 +227,8 @@ declare <4 x float> @llvm.mips.fmin.a.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmin_a_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmin_a_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmin_a_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmin_a_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmin.a.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmin_a_d_RES ret void @@ -249,8 +249,8 @@ declare <2 x double> @llvm.mips.fmin.a.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fmax_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmax_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmax_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmax_w_RES ret void @@ -271,8 +271,8 @@ declare <4 x float> @llvm.mips.fmax.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmax_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmax_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmax_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmax_d_RES ret void @@ -293,8 +293,8 @@ declare <2 x double> @llvm.mips.fmax.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fmax_a_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmax_a_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmax_a_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmax_a_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmax.a.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmax_a_w_RES ret void @@ -315,8 +315,8 @@ declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmax_a_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmax_a_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmax_a_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmax_a_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmax.a.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmax_a_d_RES ret void @@ -337,8 +337,8 @@ declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fmul_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fmul.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES ret void @@ -359,8 +359,8 @@ declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fmul_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fmul.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES ret void @@ -377,8 +377,8 @@ declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>) nounwind define void @fmul_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmul_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmul_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmul_w_ARG2 %2 = fmul <4 x float> %0, %1 store <4 x float> %2, <4 x float>* @llvm_mips_fmul_w_RES ret void @@ -393,8 +393,8 @@ entry: define void @fmul_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmul_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmul_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmul_d_ARG2 %2 = fmul <2 x double> %0, %1 store <2 x double> %2, <2 x double>* @llvm_mips_fmul_d_RES ret void @@ -413,8 +413,8 @@ entry: define void @llvm_mips_fsub_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fsub.w(<4 x float> %0, <4 x float> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES ret void @@ -435,8 +435,8 @@ declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsub_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fsub.d(<2 x double> %0, <2 x double> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES ret void @@ -454,8 +454,8 @@ declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>) nounwind define void @fsub_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsub_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsub_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsub_w_ARG2 %2 = fsub <4 x float> %0, %1 store <4 x float> %2, <4 x float>* @llvm_mips_fsub_w_RES ret void @@ -470,8 +470,8 @@ entry: define void @fsub_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsub_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsub_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsub_d_ARG2 %2 = fsub <2 x double> %0, %1 store <2 x double> %2, <2 x double>* @llvm_mips_fsub_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3rf_4rf.ll b/llvm/test/CodeGen/Mips/msa/3rf_4rf.ll index 67ef7fd2bae..f1a3002e817 100644 --- a/llvm/test/CodeGen/Mips/msa/3rf_4rf.ll +++ b/llvm/test/CodeGen/Mips/msa/3rf_4rf.ll @@ -11,9 +11,9 @@ define void @llvm_mips_fmadd_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmadd_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmadd_w_ARG2 - %2 = load <4 x float>* @llvm_mips_fmadd_w_ARG3 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG2 + %2 = load <4 x float>, <4 x float>* @llvm_mips_fmadd_w_ARG3 %3 = tail call <4 x float> @llvm.mips.fmadd.w(<4 x float> %0, <4 x float> %1, <4 x float> %2) store <4 x float> %3, <4 x float>* @llvm_mips_fmadd_w_RES ret void @@ -36,9 +36,9 @@ declare <4 x float> @llvm.mips.fmadd.w(<4 x float>, <4 x float>, <4 x float>) no define void @llvm_mips_fmadd_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmadd_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmadd_d_ARG2 - %2 = load <2 x double>* @llvm_mips_fmadd_d_ARG3 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG2 + %2 = load <2 x double>, <2 x double>* @llvm_mips_fmadd_d_ARG3 %3 = tail call <2 x double> @llvm.mips.fmadd.d(<2 x double> %0, <2 x double> %1, <2 x double> %2) store <2 x double> %3, <2 x double>* @llvm_mips_fmadd_d_RES ret void @@ -61,9 +61,9 @@ declare <2 x double> @llvm.mips.fmadd.d(<2 x double>, <2 x double>, <2 x double> define void @llvm_mips_fmsub_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fmsub_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fmsub_w_ARG2 - %2 = load <4 x float>* @llvm_mips_fmsub_w_ARG3 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG2 + %2 = load <4 x float>, <4 x float>* @llvm_mips_fmsub_w_ARG3 %3 = tail call <4 x float> @llvm.mips.fmsub.w(<4 x float> %0, <4 x float> %1, <4 x float> %2) store <4 x float> %3, <4 x float>* @llvm_mips_fmsub_w_RES ret void @@ -86,9 +86,9 @@ declare <4 x float> @llvm.mips.fmsub.w(<4 x float>, <4 x float>, <4 x float>) no define void @llvm_mips_fmsub_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fmsub_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fmsub_d_ARG2 - %2 = load <2 x double>* @llvm_mips_fmsub_d_ARG3 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG2 + %2 = load <2 x double>, <2 x double>* @llvm_mips_fmsub_d_ARG3 %3 = tail call <2 x double> @llvm.mips.fmsub.d(<2 x double> %0, <2 x double> %1, <2 x double> %2) store <2 x double> %3, <2 x double>* @llvm_mips_fmsub_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll b/llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll index de28be0b1c2..704c4b7e7cb 100644 --- a/llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll +++ b/llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll @@ -11,9 +11,9 @@ define void @llvm_mips_madd_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_madd_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_madd_q_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_madd_q_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_madd_q_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.madd.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_madd_q_h_RES ret void @@ -36,9 +36,9 @@ declare <8 x i16> @llvm.mips.madd.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_madd_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_madd_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_madd_q_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_madd_q_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_madd_q_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.madd.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_madd_q_w_RES ret void @@ -61,9 +61,9 @@ declare <4 x i32> @llvm.mips.madd.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_maddr_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_maddr_q_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_maddr_q_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.maddr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_maddr_q_h_RES ret void @@ -86,9 +86,9 @@ declare <8 x i16> @llvm.mips.maddr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_maddr_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_maddr_q_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_maddr_q_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.maddr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_maddr_q_w_RES ret void @@ -111,9 +111,9 @@ declare <4 x i32> @llvm.mips.maddr.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_msub_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_msub_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_msub_q_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_msub_q_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msub_q_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.msub.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_msub_q_h_RES ret void @@ -136,9 +136,9 @@ declare <8 x i16> @llvm.mips.msub.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_msub_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_msub_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_msub_q_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_msub_q_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msub_q_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.msub.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_msub_q_w_RES ret void @@ -161,9 +161,9 @@ declare <4 x i32> @llvm.mips.msub.q.w(<4 x i32>, <4 x i32>, <4 x i32>) nounwind define void @llvm_mips_msubr_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_msubr_q_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_msubr_q_h_ARG3 %3 = tail call <8 x i16> @llvm.mips.msubr.q.h(<8 x i16> %0, <8 x i16> %1, <8 x i16> %2) store <8 x i16> %3, <8 x i16>* @llvm_mips_msubr_q_h_RES ret void @@ -186,9 +186,9 @@ declare <8 x i16> @llvm.mips.msubr.q.h(<8 x i16>, <8 x i16>, <8 x i16>) nounwind define void @llvm_mips_msubr_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_msubr_q_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_msubr_q_w_ARG3 %3 = tail call <4 x i32> @llvm.mips.msubr.q.w(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2) store <4 x i32> %3, <4 x i32>* @llvm_mips_msubr_q_w_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3rf_exdo.ll b/llvm/test/CodeGen/Mips/msa/3rf_exdo.ll index 8a7f268a506..1b1b2e9243e 100644 --- a/llvm/test/CodeGen/Mips/msa/3rf_exdo.ll +++ b/llvm/test/CodeGen/Mips/msa/3rf_exdo.ll @@ -10,8 +10,8 @@ define void @llvm_mips_fexdo_h_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fexdo_h_ARG1 - %1 = load <4 x float>* @llvm_mips_fexdo_h_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fexdo_h_ARG2 %2 = tail call <8 x half> @llvm.mips.fexdo.h(<4 x float> %0, <4 x float> %1) store <8 x half> %2, <8 x half>* @llvm_mips_fexdo_h_RES ret void @@ -32,8 +32,8 @@ declare <8 x half> @llvm.mips.fexdo.h(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fexdo_w_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fexdo_w_ARG1 - %1 = load <2 x double>* @llvm_mips_fexdo_w_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fexdo_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fexdo.w(<2 x double> %0, <2 x double> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fexdo_w_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3rf_float_int.ll b/llvm/test/CodeGen/Mips/msa/3rf_float_int.ll index 7b01e1721db..2bd056d3cc8 100644 --- a/llvm/test/CodeGen/Mips/msa/3rf_float_int.ll +++ b/llvm/test/CodeGen/Mips/msa/3rf_float_int.ll @@ -10,8 +10,8 @@ define void @llvm_mips_fexp2_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fexp2_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_fexp2_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fexp2_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_fexp2_w_ARG2 %2 = tail call <4 x float> @llvm.mips.fexp2.w(<4 x float> %0, <4 x i32> %1) store <4 x float> %2, <4 x float>* @llvm_mips_fexp2_w_RES ret void @@ -32,8 +32,8 @@ declare <4 x float> @llvm.mips.fexp2.w(<4 x float>, <4 x i32>) nounwind define void @llvm_mips_fexp2_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fexp2_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_fexp2_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fexp2_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_fexp2_d_ARG2 %2 = tail call <2 x double> @llvm.mips.fexp2.d(<2 x double> %0, <2 x i64> %1) store <2 x double> %2, <2 x double>* @llvm_mips_fexp2_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3rf_int_float.ll b/llvm/test/CodeGen/Mips/msa/3rf_int_float.ll index 5624771b835..545e5435d64 100644 --- a/llvm/test/CodeGen/Mips/msa/3rf_int_float.ll +++ b/llvm/test/CodeGen/Mips/msa/3rf_int_float.ll @@ -10,8 +10,8 @@ define void @llvm_mips_fcaf_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcaf_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcaf_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcaf_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcaf.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcaf_w_RES ret void @@ -32,8 +32,8 @@ declare <4 x i32> @llvm.mips.fcaf.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcaf_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcaf_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcaf_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcaf_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcaf.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcaf_d_RES ret void @@ -54,8 +54,8 @@ declare <2 x i64> @llvm.mips.fcaf.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fceq_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fceq_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fceq_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fceq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fceq.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fceq_w_RES ret void @@ -76,8 +76,8 @@ declare <4 x i32> @llvm.mips.fceq.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fceq_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fceq_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fceq_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fceq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fceq.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fceq_d_RES ret void @@ -98,8 +98,8 @@ declare <2 x i64> @llvm.mips.fceq.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcle_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcle_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcle_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcle_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcle.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcle_w_RES ret void @@ -120,8 +120,8 @@ declare <4 x i32> @llvm.mips.fcle.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcle_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcle_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcle_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcle_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcle.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcle_d_RES ret void @@ -142,8 +142,8 @@ declare <2 x i64> @llvm.mips.fcle.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fclt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fclt_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fclt_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fclt_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fclt.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fclt_w_RES ret void @@ -164,8 +164,8 @@ declare <4 x i32> @llvm.mips.fclt.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fclt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fclt_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fclt_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fclt_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fclt.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fclt_d_RES ret void @@ -186,8 +186,8 @@ declare <2 x i64> @llvm.mips.fclt.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcor_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcor_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcor_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcor_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcor.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcor_w_RES ret void @@ -208,8 +208,8 @@ declare <4 x i32> @llvm.mips.fcor.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcor_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcor_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcor_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcor_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcor.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcor_d_RES ret void @@ -230,8 +230,8 @@ declare <2 x i64> @llvm.mips.fcor.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcne_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcne_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcne_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcne_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcne.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcne_w_RES ret void @@ -252,8 +252,8 @@ declare <4 x i32> @llvm.mips.fcne.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcne_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcne_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcne_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcne_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcne.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcne_d_RES ret void @@ -274,8 +274,8 @@ declare <2 x i64> @llvm.mips.fcne.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcueq_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcueq_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcueq_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcueq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcueq.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcueq_w_RES ret void @@ -296,8 +296,8 @@ declare <4 x i32> @llvm.mips.fcueq.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcueq_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcueq_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcueq_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcueq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcueq.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcueq_d_RES ret void @@ -318,8 +318,8 @@ declare <2 x i64> @llvm.mips.fcueq.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcult_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcult_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcult_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcult_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcult.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcult_w_RES ret void @@ -340,8 +340,8 @@ declare <4 x i32> @llvm.mips.fcult.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcult_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcult_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcult_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcult_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcult.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcult_d_RES ret void @@ -362,8 +362,8 @@ declare <2 x i64> @llvm.mips.fcult.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcule_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcule_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcule_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcule_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcule.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcule_w_RES ret void @@ -384,8 +384,8 @@ declare <4 x i32> @llvm.mips.fcule.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcule_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcule_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcule_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcule_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcule.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcule_d_RES ret void @@ -406,8 +406,8 @@ declare <2 x i64> @llvm.mips.fcule.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcun_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcun_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcun_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcun_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcun.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcun_w_RES ret void @@ -428,8 +428,8 @@ declare <4 x i32> @llvm.mips.fcun.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcun_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcun_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcun_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcun_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcun.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcun_d_RES ret void @@ -450,8 +450,8 @@ declare <2 x i64> @llvm.mips.fcun.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fcune_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fcune_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fcune_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fcune_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fcune.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fcune_w_RES ret void @@ -472,8 +472,8 @@ declare <4 x i32> @llvm.mips.fcune.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fcune_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fcune_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fcune_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fcune_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fcune.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fcune_d_RES ret void @@ -494,8 +494,8 @@ declare <2 x i64> @llvm.mips.fcune.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsaf_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsaf_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsaf_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsaf_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsaf.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsaf_w_RES ret void @@ -516,8 +516,8 @@ declare <4 x i32> @llvm.mips.fsaf.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsaf_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsaf_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsaf_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsaf_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsaf.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsaf_d_RES ret void @@ -538,8 +538,8 @@ declare <2 x i64> @llvm.mips.fsaf.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fseq_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fseq_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fseq_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fseq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fseq.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fseq_w_RES ret void @@ -560,8 +560,8 @@ declare <4 x i32> @llvm.mips.fseq.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fseq_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fseq_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fseq_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fseq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fseq.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fseq_d_RES ret void @@ -582,8 +582,8 @@ declare <2 x i64> @llvm.mips.fseq.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsle_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsle_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsle_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsle_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsle.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsle_w_RES ret void @@ -604,8 +604,8 @@ declare <4 x i32> @llvm.mips.fsle.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsle_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsle_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsle_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsle_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsle.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsle_d_RES ret void @@ -626,8 +626,8 @@ declare <2 x i64> @llvm.mips.fsle.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fslt_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fslt_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fslt_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fslt_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fslt.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fslt_w_RES ret void @@ -648,8 +648,8 @@ declare <4 x i32> @llvm.mips.fslt.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fslt_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fslt_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fslt_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fslt_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fslt.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fslt_d_RES ret void @@ -670,8 +670,8 @@ declare <2 x i64> @llvm.mips.fslt.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsor_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsor_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsor_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsor_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsor.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsor_w_RES ret void @@ -692,8 +692,8 @@ declare <4 x i32> @llvm.mips.fsor.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsor_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsor_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsor_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsor_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsor.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsor_d_RES ret void @@ -714,8 +714,8 @@ declare <2 x i64> @llvm.mips.fsor.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsne_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsne_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsne_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsne_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsne.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsne_w_RES ret void @@ -736,8 +736,8 @@ declare <4 x i32> @llvm.mips.fsne.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsne_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsne_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsne_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsne_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsne.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsne_d_RES ret void @@ -758,8 +758,8 @@ declare <2 x i64> @llvm.mips.fsne.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsueq_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsueq_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsueq_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsueq_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsueq.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsueq_w_RES ret void @@ -780,8 +780,8 @@ declare <4 x i32> @llvm.mips.fsueq.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsueq_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsueq_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsueq_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsueq_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsueq.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsueq_d_RES ret void @@ -802,8 +802,8 @@ declare <2 x i64> @llvm.mips.fsueq.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsult_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsult_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsult_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsult_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsult.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsult_w_RES ret void @@ -824,8 +824,8 @@ declare <4 x i32> @llvm.mips.fsult.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsult_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsult_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsult_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsult_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsult.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsult_d_RES ret void @@ -846,8 +846,8 @@ declare <2 x i64> @llvm.mips.fsult.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsule_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsule_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsule_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsule_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsule.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsule_w_RES ret void @@ -868,8 +868,8 @@ declare <4 x i32> @llvm.mips.fsule.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsule_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsule_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsule_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsule_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsule.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsule_d_RES ret void @@ -890,8 +890,8 @@ declare <2 x i64> @llvm.mips.fsule.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsun_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsun_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsun_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsun_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsun.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsun_w_RES ret void @@ -912,8 +912,8 @@ declare <4 x i32> @llvm.mips.fsun.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsun_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsun_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsun_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsun_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsun.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsun_d_RES ret void @@ -934,8 +934,8 @@ declare <2 x i64> @llvm.mips.fsun.d(<2 x double>, <2 x double>) nounwind define void @llvm_mips_fsune_w_test() nounwind { entry: - %0 = load <4 x float>* @llvm_mips_fsune_w_ARG1 - %1 = load <4 x float>* @llvm_mips_fsune_w_ARG2 + %0 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG1 + %1 = load <4 x float>, <4 x float>* @llvm_mips_fsune_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.fsune.w(<4 x float> %0, <4 x float> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_fsune_w_RES ret void @@ -956,8 +956,8 @@ declare <4 x i32> @llvm.mips.fsune.w(<4 x float>, <4 x float>) nounwind define void @llvm_mips_fsune_d_test() nounwind { entry: - %0 = load <2 x double>* @llvm_mips_fsune_d_ARG1 - %1 = load <2 x double>* @llvm_mips_fsune_d_ARG2 + %0 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG1 + %1 = load <2 x double>, <2 x double>* @llvm_mips_fsune_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.fsune.d(<2 x double> %0, <2 x double> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_fsune_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/3rf_q.ll b/llvm/test/CodeGen/Mips/msa/3rf_q.ll index f7000ee913a..c8b0a500002 100644 --- a/llvm/test/CodeGen/Mips/msa/3rf_q.ll +++ b/llvm/test/CodeGen/Mips/msa/3rf_q.ll @@ -10,8 +10,8 @@ define void @llvm_mips_mul_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mul_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mul_q_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mul_q_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mul.q.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mul_q_h_RES ret void @@ -32,8 +32,8 @@ declare <8 x i16> @llvm.mips.mul.q.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mul_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mul_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mul_q_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mul_q_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mul.q.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mul_q_w_RES ret void @@ -54,8 +54,8 @@ declare <4 x i32> @llvm.mips.mul.q.w(<4 x i32>, <4 x i32>) nounwind define void @llvm_mips_mulr_q_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_mulr_q_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_mulr_q_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.mulr.q.h(<8 x i16> %0, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_mulr_q_h_RES ret void @@ -76,8 +76,8 @@ declare <8 x i16> @llvm.mips.mulr.q.h(<8 x i16>, <8 x i16>) nounwind define void @llvm_mips_mulr_q_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_mulr_q_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_mulr_q_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.mulr.q.w(<4 x i32> %0, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_mulr_q_w_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/arithmetic.ll b/llvm/test/CodeGen/Mips/msa/arithmetic.ll index 09ee5023c7b..3ecd0e43589 100644 --- a/llvm/test/CodeGen/Mips/msa/arithmetic.ll +++ b/llvm/test/CodeGen/Mips/msa/arithmetic.ll @@ -4,9 +4,9 @@ define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: add_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = add <16 x i8> %1, %2 ; CHECK-DAG: addv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -20,9 +20,9 @@ define void @add_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: add_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = add <8 x i16> %1, %2 ; CHECK-DAG: addv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -36,9 +36,9 @@ define void @add_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: add_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = add <4 x i32> %1, %2 ; CHECK-DAG: addv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -52,9 +52,9 @@ define void @add_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: add_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = add <2 x i64> %1, %2 ; CHECK-DAG: addv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -68,7 +68,7 @@ define void @add_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: add_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = add <16 x i8> %1, @@ -83,7 +83,7 @@ define void @add_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: add_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = add <8 x i16> %1, @@ -98,7 +98,7 @@ define void @add_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: add_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = add <4 x i32> %1, ; CHECK-DAG: addvi.w [[R3:\$w[0-9]+]], [[R1]], 1 @@ -112,7 +112,7 @@ define void @add_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: add_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = add <2 x i64> %1, ; CHECK-DAG: addvi.d [[R3:\$w[0-9]+]], [[R1]], 1 @@ -126,9 +126,9 @@ define void @add_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: sub_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = sub <16 x i8> %1, %2 ; CHECK-DAG: subv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -142,9 +142,9 @@ define void @sub_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: sub_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = sub <8 x i16> %1, %2 ; CHECK-DAG: subv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -158,9 +158,9 @@ define void @sub_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: sub_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = sub <4 x i32> %1, %2 ; CHECK-DAG: subv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -174,9 +174,9 @@ define void @sub_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: sub_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = sub <2 x i64> %1, %2 ; CHECK-DAG: subv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -190,7 +190,7 @@ define void @sub_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: sub_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = sub <16 x i8> %1, @@ -205,7 +205,7 @@ define void @sub_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: sub_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = sub <8 x i16> %1, @@ -220,7 +220,7 @@ define void @sub_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: sub_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = sub <4 x i32> %1, ; CHECK-DAG: subvi.w [[R3:\$w[0-9]+]], [[R1]], 1 @@ -234,7 +234,7 @@ define void @sub_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: sub_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = sub <2 x i64> %1, ; CHECK-DAG: subvi.d [[R3:\$w[0-9]+]], [[R1]], 1 @@ -248,9 +248,9 @@ define void @sub_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: mul_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = mul <16 x i8> %1, %2 ; CHECK-DAG: mulv.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -264,9 +264,9 @@ define void @mul_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: mul_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = mul <8 x i16> %1, %2 ; CHECK-DAG: mulv.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -280,9 +280,9 @@ define void @mul_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: mul_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = mul <4 x i32> %1, %2 ; CHECK-DAG: mulv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -296,9 +296,9 @@ define void @mul_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @mul_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: mul_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = mul <2 x i64> %1, %2 ; CHECK-DAG: mulv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -313,11 +313,11 @@ define void @maddv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: maddv_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %c + %3 = load <16 x i8>, <16 x i8>* %c ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = mul <16 x i8> %2, %3 %5 = add <16 x i8> %4, %1 @@ -333,11 +333,11 @@ define void @maddv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: maddv_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = load <8 x i16>* %c + %3 = load <8 x i16>, <8 x i16>* %c ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7) %4 = mul <8 x i16> %2, %3 %5 = add <8 x i16> %4, %1 @@ -353,11 +353,11 @@ define void @maddv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: maddv_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x i32>* %c + %3 = load <4 x i32>, <4 x i32>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = mul <4 x i32> %2, %3 %5 = add <4 x i32> %4, %1 @@ -373,11 +373,11 @@ define void @maddv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: maddv_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x i64>* %c + %3 = load <2 x i64>, <2 x i64>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = mul <2 x i64> %2, %3 %5 = add <2 x i64> %4, %1 @@ -393,11 +393,11 @@ define void @msubv_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: msubv_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %c + %3 = load <16 x i8>, <16 x i8>* %c ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = mul <16 x i8> %2, %3 %5 = sub <16 x i8> %1, %4 @@ -413,11 +413,11 @@ define void @msubv_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: msubv_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = load <8 x i16>* %c + %3 = load <8 x i16>, <8 x i16>* %c ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7) %4 = mul <8 x i16> %2, %3 %5 = sub <8 x i16> %1, %4 @@ -433,11 +433,11 @@ define void @msubv_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: msubv_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x i32>* %c + %3 = load <4 x i32>, <4 x i32>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = mul <4 x i32> %2, %3 %5 = sub <4 x i32> %1, %4 @@ -453,11 +453,11 @@ define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: msubv_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x i64>* %c + %3 = load <2 x i64>, <2 x i64>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = mul <2 x i64> %2, %3 %5 = sub <2 x i64> %1, %4 @@ -472,9 +472,9 @@ define void @msubv_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: div_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = sdiv <16 x i8> %1, %2 ; CHECK-DAG: div_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -488,9 +488,9 @@ define void @div_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: div_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = sdiv <8 x i16> %1, %2 ; CHECK-DAG: div_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -504,9 +504,9 @@ define void @div_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: div_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = sdiv <4 x i32> %1, %2 ; CHECK-DAG: div_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -520,9 +520,9 @@ define void @div_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: div_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = sdiv <2 x i64> %1, %2 ; CHECK-DAG: div_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -536,9 +536,9 @@ define void @div_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: div_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = udiv <16 x i8> %1, %2 ; CHECK-DAG: div_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -552,9 +552,9 @@ define void @div_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: div_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = udiv <8 x i16> %1, %2 ; CHECK-DAG: div_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -568,9 +568,9 @@ define void @div_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: div_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = udiv <4 x i32> %1, %2 ; CHECK-DAG: div_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -584,9 +584,9 @@ define void @div_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: div_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = udiv <2 x i64> %1, %2 ; CHECK-DAG: div_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -600,9 +600,9 @@ define void @div_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: mod_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = srem <16 x i8> %1, %2 ; CHECK-DAG: mod_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -616,9 +616,9 @@ define void @mod_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: mod_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = srem <8 x i16> %1, %2 ; CHECK-DAG: mod_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -632,9 +632,9 @@ define void @mod_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: mod_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = srem <4 x i32> %1, %2 ; CHECK-DAG: mod_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -648,9 +648,9 @@ define void @mod_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: mod_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = srem <2 x i64> %1, %2 ; CHECK-DAG: mod_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -664,9 +664,9 @@ define void @mod_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: mod_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = urem <16 x i8> %1, %2 ; CHECK-DAG: mod_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -680,9 +680,9 @@ define void @mod_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: mod_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = urem <8 x i16> %1, %2 ; CHECK-DAG: mod_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -696,9 +696,9 @@ define void @mod_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: mod_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = urem <4 x i32> %1, %2 ; CHECK-DAG: mod_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -712,9 +712,9 @@ define void @mod_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @mod_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: mod_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = urem <2 x i64> %1, %2 ; CHECK-DAG: mod_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] diff --git a/llvm/test/CodeGen/Mips/msa/arithmetic_float.ll b/llvm/test/CodeGen/Mips/msa/arithmetic_float.ll index 9aae284fe53..d2ead536804 100644 --- a/llvm/test/CodeGen/Mips/msa/arithmetic_float.ll +++ b/llvm/test/CodeGen/Mips/msa/arithmetic_float.ll @@ -4,9 +4,9 @@ define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: add_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fadd <4 x float> %1, %2 ; CHECK-DAG: fadd.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -20,9 +20,9 @@ define void @add_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: add_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fadd <2 x double> %1, %2 ; CHECK-DAG: fadd.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -36,9 +36,9 @@ define void @add_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: sub_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fsub <4 x float> %1, %2 ; CHECK-DAG: fsub.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -52,9 +52,9 @@ define void @sub_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: sub_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fsub <2 x double> %1, %2 ; CHECK-DAG: fsub.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -68,9 +68,9 @@ define void @sub_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: mul_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fmul <4 x float> %1, %2 ; CHECK-DAG: fmul.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -84,9 +84,9 @@ define void @mul_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @mul_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: mul_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fmul <2 x double> %1, %2 ; CHECK-DAG: fmul.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -101,11 +101,11 @@ define void @fma_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c) nounwind { ; CHECK: fma_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x float>* %c + %3 = load <4 x float>, <4 x float>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = tail call <4 x float> @llvm.fma.v4f32 (<4 x float> %1, <4 x float> %2, <4 x float> %3) @@ -121,11 +121,11 @@ define void @fma_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, <2 x double>* %c) nounwind { ; CHECK: fma_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x double>* %c + %3 = load <2 x double>, <2 x double>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = tail call <2 x double> @llvm.fma.v2f64 (<2 x double> %1, <2 x double> %2, <2 x double> %3) @@ -141,11 +141,11 @@ define void @fmsub_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c) nounwind { ; CHECK: fmsub_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x float>* %c + %3 = load <4 x float>, <4 x float>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = fmul <4 x float> %2, %3 %5 = fsub <4 x float> %1, %4 @@ -161,11 +161,11 @@ define void @fmsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, <2 x double>* %c) nounwind { ; CHECK: fmsub_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x double>* %c + %3 = load <2 x double>, <2 x double>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = fmul <2 x double> %2, %3 %5 = fsub <2 x double> %1, %4 @@ -180,9 +180,9 @@ define void @fmsub_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: fdiv_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fdiv <4 x float> %1, %2 ; CHECK-DAG: fdiv.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -196,9 +196,9 @@ define void @fdiv_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounw define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: fdiv_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fdiv <2 x double> %1, %2 ; CHECK-DAG: fdiv.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -212,7 +212,7 @@ define void @fdiv_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) no define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { ; CHECK: fabs_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x float> @llvm.fabs.v4f32 (<4 x float> %1) ; CHECK-DAG: fmax_a.w [[R3:\$w[0-9]+]], [[R1]], [[R1]] @@ -226,7 +226,7 @@ define void @fabs_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK: fabs_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.fabs.v2f64 (<2 x double> %1) ; CHECK-DAG: fmax_a.d [[R3:\$w[0-9]+]], [[R1]], [[R1]] @@ -240,7 +240,7 @@ define void @fabs_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { ; CHECK: fexp2_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1) ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1 @@ -256,7 +256,7 @@ define void @fexp2_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK: fexp2_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1) ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1 @@ -272,7 +272,7 @@ define void @fexp2_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind { ; CHECK: fexp2_v4f32_2: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x float> @llvm.exp2.v4f32 (<4 x float> %1) %3 = fmul <4 x float> , %2 @@ -289,7 +289,7 @@ define void @fexp2_v4f32_2(<4 x float>* %c, <4 x float>* %a) nounwind { define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK: fexp2_v2f64_2: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.exp2.v2f64 (<2 x double> %1) %3 = fmul <2 x double> , %2 @@ -306,7 +306,7 @@ define void @fexp2_v2f64_2(<2 x double>* %c, <2 x double>* %a) nounwind { define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { ; CHECK: fsqrt_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x float> @llvm.sqrt.v4f32 (<4 x float> %1) ; CHECK-DAG: fsqrt.w [[R3:\$w[0-9]+]], [[R1]] @@ -320,7 +320,7 @@ define void @fsqrt_v4f32(<4 x float>* %c, <4 x float>* %a) nounwind { define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { ; CHECK: fsqrt_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x double> @llvm.sqrt.v2f64 (<2 x double> %1) ; CHECK-DAG: fsqrt.d [[R3:\$w[0-9]+]], [[R1]] @@ -334,7 +334,7 @@ define void @fsqrt_v2f64(<2 x double>* %c, <2 x double>* %a) nounwind { define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind { ; CHECK: ffint_u_v4f32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = uitofp <4 x i32> %1 to <4 x float> ; CHECK-DAG: ffint_u.w [[R3:\$w[0-9]+]], [[R1]] @@ -348,7 +348,7 @@ define void @ffint_u_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind { define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind { ; CHECK: ffint_u_v2f64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = uitofp <2 x i64> %1 to <2 x double> ; CHECK-DAG: ffint_u.d [[R3:\$w[0-9]+]], [[R1]] @@ -362,7 +362,7 @@ define void @ffint_u_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind { define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind { ; CHECK: ffint_s_v4f32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = sitofp <4 x i32> %1 to <4 x float> ; CHECK-DAG: ffint_s.w [[R3:\$w[0-9]+]], [[R1]] @@ -376,7 +376,7 @@ define void @ffint_s_v4f32(<4 x float>* %c, <4 x i32>* %a) nounwind { define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind { ; CHECK: ffint_s_v2f64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = sitofp <2 x i64> %1 to <2 x double> ; CHECK-DAG: ffint_s.d [[R3:\$w[0-9]+]], [[R1]] @@ -390,7 +390,7 @@ define void @ffint_s_v2f64(<2 x double>* %c, <2 x i64>* %a) nounwind { define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind { ; CHECK: ftrunc_u_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = fptoui <4 x float> %1 to <4 x i32> ; CHECK-DAG: ftrunc_u.w [[R3:\$w[0-9]+]], [[R1]] @@ -404,7 +404,7 @@ define void @ftrunc_u_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind { define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind { ; CHECK: ftrunc_u_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = fptoui <2 x double> %1 to <2 x i64> ; CHECK-DAG: ftrunc_u.d [[R3:\$w[0-9]+]], [[R1]] @@ -418,7 +418,7 @@ define void @ftrunc_u_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind { define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind { ; CHECK: ftrunc_s_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = fptosi <4 x float> %1 to <4 x i32> ; CHECK-DAG: ftrunc_s.w [[R3:\$w[0-9]+]], [[R1]] @@ -432,7 +432,7 @@ define void @ftrunc_s_v4f32(<4 x i32>* %c, <4 x float>* %a) nounwind { define void @ftrunc_s_v2f64(<2 x i64>* %c, <2 x double>* %a) nounwind { ; CHECK: ftrunc_s_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = fptosi <2 x double> %1 to <2 x i64> ; CHECK-DAG: ftrunc_s.d [[R3:\$w[0-9]+]], [[R1]] diff --git a/llvm/test/CodeGen/Mips/msa/basic_operations.ll b/llvm/test/CodeGen/Mips/msa/basic_operations.ll index dbdf42be49c..97525be0491 100644 --- a/llvm/test/CodeGen/Mips/msa/basic_operations.ll +++ b/llvm/test/CodeGen/Mips/msa/basic_operations.ll @@ -258,7 +258,7 @@ define void @nonconst_v2i64(i64 %a, i64 %b) nounwind { define i32 @extract_sext_v16i8() nounwind { ; MIPS32-AE-LABEL: extract_sext_v16i8: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], %2 = add <16 x i8> %1, %1 @@ -277,7 +277,7 @@ define i32 @extract_sext_v16i8() nounwind { define i32 @extract_sext_v8i16() nounwind { ; MIPS32-AE-LABEL: extract_sext_v8i16: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], %2 = add <8 x i16> %1, %1 @@ -296,7 +296,7 @@ define i32 @extract_sext_v8i16() nounwind { define i32 @extract_sext_v4i32() nounwind { ; MIPS32-AE-LABEL: extract_sext_v4i32: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], %2 = add <4 x i32> %1, %1 @@ -312,7 +312,7 @@ define i32 @extract_sext_v4i32() nounwind { define i64 @extract_sext_v2i64() nounwind { ; MIPS32-AE-LABEL: extract_sext_v2i64: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], %2 = add <2 x i64> %1, %1 @@ -331,7 +331,7 @@ define i64 @extract_sext_v2i64() nounwind { define i32 @extract_zext_v16i8() nounwind { ; MIPS32-AE-LABEL: extract_zext_v16i8: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], %2 = add <16 x i8> %1, %1 @@ -349,7 +349,7 @@ define i32 @extract_zext_v16i8() nounwind { define i32 @extract_zext_v8i16() nounwind { ; MIPS32-AE-LABEL: extract_zext_v8i16: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], %2 = add <8 x i16> %1, %1 @@ -367,7 +367,7 @@ define i32 @extract_zext_v8i16() nounwind { define i32 @extract_zext_v4i32() nounwind { ; MIPS32-AE-LABEL: extract_zext_v4i32: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], %2 = add <4 x i32> %1, %1 @@ -383,7 +383,7 @@ define i32 @extract_zext_v4i32() nounwind { define i64 @extract_zext_v2i64() nounwind { ; MIPS32-AE-LABEL: extract_zext_v2i64: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], %2 = add <2 x i64> %1, %1 @@ -401,14 +401,14 @@ define i64 @extract_zext_v2i64() nounwind { define i32 @extract_sext_v16i8_vidx() nounwind { ; MIPS32-AE-LABEL: extract_sext_v16i8_vidx: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v16i8)( ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <16 x i8> %1, %1 ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -425,14 +425,14 @@ define i32 @extract_sext_v16i8_vidx() nounwind { define i32 @extract_sext_v8i16_vidx() nounwind { ; MIPS32-AE-LABEL: extract_sext_v8i16_vidx: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v8i16)( ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <8 x i16> %1, %1 ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -449,14 +449,14 @@ define i32 @extract_sext_v8i16_vidx() nounwind { define i32 @extract_sext_v4i32_vidx() nounwind { ; MIPS32-AE-LABEL: extract_sext_v4i32_vidx: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4i32)( ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <4 x i32> %1, %1 ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -472,14 +472,14 @@ define i32 @extract_sext_v4i32_vidx() nounwind { define i64 @extract_sext_v2i64_vidx() nounwind { ; MIPS32-AE-LABEL: extract_sext_v2i64_vidx: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2i64)( ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <2 x i64> %1, %1 ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -497,14 +497,14 @@ define i64 @extract_sext_v2i64_vidx() nounwind { define i32 @extract_zext_v16i8_vidx() nounwind { ; MIPS32-AE-LABEL: extract_zext_v16i8_vidx: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v16i8)( ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <16 x i8> %1, %1 ; MIPS32-AE-DAG: addv.b [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -521,14 +521,14 @@ define i32 @extract_zext_v16i8_vidx() nounwind { define i32 @extract_zext_v8i16_vidx() nounwind { ; MIPS32-AE-LABEL: extract_zext_v8i16_vidx: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v8i16)( ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <8 x i16> %1, %1 ; MIPS32-AE-DAG: addv.h [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -545,14 +545,14 @@ define i32 @extract_zext_v8i16_vidx() nounwind { define i32 @extract_zext_v4i32_vidx() nounwind { ; MIPS32-AE-LABEL: extract_zext_v4i32_vidx: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4i32)( ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <4 x i32> %1, %1 ; MIPS32-AE-DAG: addv.w [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -568,14 +568,14 @@ define i32 @extract_zext_v4i32_vidx() nounwind { define i64 @extract_zext_v2i64_vidx() nounwind { ; MIPS32-AE-LABEL: extract_zext_v2i64_vidx: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2i64)( ; MIPS32-AE-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = add <2 x i64> %1, %1 ; MIPS32-AE-DAG: addv.d [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -593,7 +593,7 @@ define i64 @extract_zext_v2i64_vidx() nounwind { define void @insert_v16i8(i32 %a) nounwind { ; MIPS32-AE-LABEL: insert_v16i8: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], %a2 = trunc i32 %a to i8 @@ -615,7 +615,7 @@ define void @insert_v16i8(i32 %a) nounwind { define void @insert_v8i16(i32 %a) nounwind { ; MIPS32-AE-LABEL: insert_v8i16: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], %a2 = trunc i32 %a to i16 @@ -637,7 +637,7 @@ define void @insert_v8i16(i32 %a) nounwind { define void @insert_v4i32(i32 %a) nounwind { ; MIPS32-AE-LABEL: insert_v4i32: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], ; MIPS32-AE-NOT: andi @@ -656,7 +656,7 @@ define void @insert_v4i32(i32 %a) nounwind { define void @insert_v2i64(i64 %a) nounwind { ; MIPS32-AE-LABEL: insert_v2i64: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], ; MIPS32-AE-NOT: andi @@ -676,10 +676,10 @@ define void @insert_v2i64(i64 %a) nounwind { define void @insert_v16i8_vidx(i32 %a) nounwind { ; MIPS32-AE: insert_v16i8_vidx: - %1 = load <16 x i8>* @v16i8 + %1 = load <16 x i8>, <16 x i8>* @v16i8 ; MIPS32-AE-DAG: ld.b [[R1:\$w[0-9]+]], - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -705,10 +705,10 @@ define void @insert_v16i8_vidx(i32 %a) nounwind { define void @insert_v8i16_vidx(i32 %a) nounwind { ; MIPS32-AE: insert_v8i16_vidx: - %1 = load <8 x i16>* @v8i16 + %1 = load <8 x i16>, <8 x i16>* @v8i16 ; MIPS32-AE-DAG: ld.h [[R1:\$w[0-9]+]], - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -735,10 +735,10 @@ define void @insert_v8i16_vidx(i32 %a) nounwind { define void @insert_v4i32_vidx(i32 %a) nounwind { ; MIPS32-AE: insert_v4i32_vidx: - %1 = load <4 x i32>* @v4i32 + %1 = load <4 x i32>, <4 x i32>* @v4i32 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -762,10 +762,10 @@ define void @insert_v4i32_vidx(i32 %a) nounwind { define void @insert_v2i64_vidx(i64 %a) nounwind { ; MIPS32-AE: insert_v2i64_vidx: - %1 = load <2 x i64>* @v2i64 + %1 = load <2 x i64>, <2 x i64>* @v2i64 ; MIPS32-AE-DAG: ld.w [[R1:\$w[0-9]+]], - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-AE-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-AE-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) diff --git a/llvm/test/CodeGen/Mips/msa/basic_operations_float.ll b/llvm/test/CodeGen/Mips/msa/basic_operations_float.ll index a0c9d29e231..53c1f11f3ad 100644 --- a/llvm/test/CodeGen/Mips/msa/basic_operations_float.ll +++ b/llvm/test/CodeGen/Mips/msa/basic_operations_float.ll @@ -75,7 +75,7 @@ define void @const_v2f64() nounwind { define void @nonconst_v4f32() nounwind { ; MIPS32-LABEL: nonconst_v4f32: - %1 = load float *@f32 + %1 = load float , float *@f32 %2 = insertelement <4 x float> undef, float %1, i32 0 %3 = insertelement <4 x float> %2, float %1, i32 1 %4 = insertelement <4 x float> %3, float %1, i32 2 @@ -91,7 +91,7 @@ define void @nonconst_v4f32() nounwind { define void @nonconst_v2f64() nounwind { ; MIPS32-LABEL: nonconst_v2f64: - %1 = load double *@f64 + %1 = load double , double *@f64 %2 = insertelement <2 x double> undef, double %1, i32 0 %3 = insertelement <2 x double> %2, double %1, i32 1 store volatile <2 x double> %3, <2 x double>*@v2f64 @@ -105,7 +105,7 @@ define void @nonconst_v2f64() nounwind { define float @extract_v4f32() nounwind { ; MIPS32-LABEL: extract_v4f32: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], %2 = fadd <4 x float> %1, %1 @@ -123,7 +123,7 @@ define float @extract_v4f32() nounwind { define float @extract_v4f32_elt0() nounwind { ; MIPS32-LABEL: extract_v4f32_elt0: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], %2 = fadd <4 x float> %1, %1 @@ -141,7 +141,7 @@ define float @extract_v4f32_elt0() nounwind { define float @extract_v4f32_elt2() nounwind { ; MIPS32-LABEL: extract_v4f32_elt2: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], %2 = fadd <4 x float> %1, %1 @@ -159,14 +159,14 @@ define float @extract_v4f32_elt2() nounwind { define float @extract_v4f32_vidx() nounwind { ; MIPS32-LABEL: extract_v4f32_vidx: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)( ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = fadd <4 x float> %1, %1 ; MIPS32-DAG: fadd.w [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -180,7 +180,7 @@ define float @extract_v4f32_vidx() nounwind { define double @extract_v2f64() nounwind { ; MIPS32-LABEL: extract_v2f64: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], %2 = fadd <2 x double> %1, %1 @@ -203,7 +203,7 @@ define double @extract_v2f64() nounwind { define double @extract_v2f64_elt0() nounwind { ; MIPS32-LABEL: extract_v2f64_elt0: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], %2 = fadd <2 x double> %1, %1 @@ -224,14 +224,14 @@ define double @extract_v2f64_elt0() nounwind { define double @extract_v2f64_vidx() nounwind { ; MIPS32-LABEL: extract_v2f64_vidx: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)( ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]]) %2 = fadd <2 x double> %1, %1 ; MIPS32-DAG: fadd.d [[R2:\$w[0-9]+]], [[R1]], [[R1]] - %3 = load i32* @i32 + %3 = load i32, i32* @i32 ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -245,7 +245,7 @@ define double @extract_v2f64_vidx() nounwind { define void @insert_v4f32(float %a) nounwind { ; MIPS32-LABEL: insert_v4f32: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], %2 = insertelement <4 x float> %1, float %a, i32 1 @@ -262,7 +262,7 @@ define void @insert_v4f32(float %a) nounwind { define void @insert_v2f64(double %a) nounwind { ; MIPS32-LABEL: insert_v2f64: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], %2 = insertelement <2 x double> %1, double %a, i32 1 @@ -279,11 +279,11 @@ define void @insert_v2f64(double %a) nounwind { define void @insert_v4f32_vidx(float %a) nounwind { ; MIPS32-LABEL: insert_v4f32_vidx: - %1 = load <4 x float>* @v4f32 + %1 = load <4 x float>, <4 x float>* @v4f32 ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v4f32)( ; MIPS32-DAG: ld.w [[R1:\$w[0-9]+]], 0([[PTR_V]]) - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) @@ -305,11 +305,11 @@ define void @insert_v4f32_vidx(float %a) nounwind { define void @insert_v2f64_vidx(double %a) nounwind { ; MIPS32-LABEL: insert_v2f64_vidx: - %1 = load <2 x double>* @v2f64 + %1 = load <2 x double>, <2 x double>* @v2f64 ; MIPS32-DAG: lw [[PTR_V:\$[0-9]+]], %got(v2f64)( ; MIPS32-DAG: ld.d [[R1:\$w[0-9]+]], 0([[PTR_V]]) - %2 = load i32* @i32 + %2 = load i32, i32* @i32 ; MIPS32-DAG: lw [[PTR_I:\$[0-9]+]], %got(i32)( ; MIPS32-DAG: lw [[IDX:\$[0-9]+]], 0([[PTR_I]]) diff --git a/llvm/test/CodeGen/Mips/msa/bit.ll b/llvm/test/CodeGen/Mips/msa/bit.ll index 59ddbe17a33..f0057307bbf 100644 --- a/llvm/test/CodeGen/Mips/msa/bit.ll +++ b/llvm/test/CodeGen/Mips/msa/bit.ll @@ -8,7 +8,7 @@ define void @llvm_mips_sat_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sat_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.sat.s.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_s_b_RES ret void @@ -27,7 +27,7 @@ declare <16 x i8> @llvm.mips.sat.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_sat_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sat_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.sat.s.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_s_h_RES ret void @@ -46,7 +46,7 @@ declare <8 x i16> @llvm.mips.sat.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_sat_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sat_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.sat.s.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_s_w_RES ret void @@ -65,7 +65,7 @@ declare <4 x i32> @llvm.mips.sat.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_sat_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sat_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.sat.s.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_s_d_RES ret void @@ -84,7 +84,7 @@ declare <2 x i64> @llvm.mips.sat.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_sat_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sat_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sat_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.sat.u.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_sat_u_b_RES ret void @@ -103,7 +103,7 @@ declare <16 x i8> @llvm.mips.sat.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_sat_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sat_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sat_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.sat.u.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_sat_u_h_RES ret void @@ -122,7 +122,7 @@ declare <8 x i16> @llvm.mips.sat.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_sat_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sat_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sat_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.sat.u.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_sat_u_w_RES ret void @@ -141,7 +141,7 @@ declare <4 x i32> @llvm.mips.sat.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_sat_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sat_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sat_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.sat.u.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_sat_u_d_RES ret void @@ -160,7 +160,7 @@ declare <2 x i64> @llvm.mips.sat.u.d(<2 x i64>, i32) nounwind define void @llvm_mips_slli_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_slli_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_slli_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.slli.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_slli_b_RES ret void @@ -179,7 +179,7 @@ declare <16 x i8> @llvm.mips.slli.b(<16 x i8>, i32) nounwind define void @llvm_mips_slli_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_slli_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_slli_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.slli.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_slli_h_RES ret void @@ -198,7 +198,7 @@ declare <8 x i16> @llvm.mips.slli.h(<8 x i16>, i32) nounwind define void @llvm_mips_slli_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_slli_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_slli_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.slli.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_slli_w_RES ret void @@ -217,7 +217,7 @@ declare <4 x i32> @llvm.mips.slli.w(<4 x i32>, i32) nounwind define void @llvm_mips_slli_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_slli_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_slli_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.slli.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_slli_d_RES ret void @@ -236,7 +236,7 @@ declare <2 x i64> @llvm.mips.slli.d(<2 x i64>, i32) nounwind define void @llvm_mips_srai_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srai_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srai_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.srai.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_srai_b_RES ret void @@ -255,7 +255,7 @@ declare <16 x i8> @llvm.mips.srai.b(<16 x i8>, i32) nounwind define void @llvm_mips_srai_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srai_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srai_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.srai.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_srai_h_RES ret void @@ -274,7 +274,7 @@ declare <8 x i16> @llvm.mips.srai.h(<8 x i16>, i32) nounwind define void @llvm_mips_srai_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srai_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srai_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.srai.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_srai_w_RES ret void @@ -293,7 +293,7 @@ declare <4 x i32> @llvm.mips.srai.w(<4 x i32>, i32) nounwind define void @llvm_mips_srai_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srai_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srai_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.srai.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_srai_d_RES ret void @@ -312,7 +312,7 @@ declare <2 x i64> @llvm.mips.srai.d(<2 x i64>, i32) nounwind define void @llvm_mips_srari_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srari_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srari_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.srari.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_srari_b_RES ret void @@ -331,7 +331,7 @@ declare <16 x i8> @llvm.mips.srari.b(<16 x i8>, i32) nounwind define void @llvm_mips_srari_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srari_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srari_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.srari.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_srari_h_RES ret void @@ -350,7 +350,7 @@ declare <8 x i16> @llvm.mips.srari.h(<8 x i16>, i32) nounwind define void @llvm_mips_srari_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srari_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srari_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.srari.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_srari_w_RES ret void @@ -369,7 +369,7 @@ declare <4 x i32> @llvm.mips.srari.w(<4 x i32>, i32) nounwind define void @llvm_mips_srari_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srari_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srari_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.srari.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_srari_d_RES ret void @@ -388,7 +388,7 @@ declare <2 x i64> @llvm.mips.srari.d(<2 x i64>, i32) nounwind define void @llvm_mips_srli_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srli_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srli_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.srli.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_srli_b_RES ret void @@ -407,7 +407,7 @@ declare <16 x i8> @llvm.mips.srli.b(<16 x i8>, i32) nounwind define void @llvm_mips_srli_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srli_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srli_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.srli.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_srli_h_RES ret void @@ -426,7 +426,7 @@ declare <8 x i16> @llvm.mips.srli.h(<8 x i16>, i32) nounwind define void @llvm_mips_srli_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srli_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srli_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.srli.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_srli_w_RES ret void @@ -445,7 +445,7 @@ declare <4 x i32> @llvm.mips.srli.w(<4 x i32>, i32) nounwind define void @llvm_mips_srli_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srli_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srli_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.srli.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_srli_d_RES ret void @@ -464,7 +464,7 @@ declare <2 x i64> @llvm.mips.srli.d(<2 x i64>, i32) nounwind define void @llvm_mips_srlri_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_srlri_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_srlri_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.srlri.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_srlri_b_RES ret void @@ -483,7 +483,7 @@ declare <16 x i8> @llvm.mips.srlri.b(<16 x i8>, i32) nounwind define void @llvm_mips_srlri_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_srlri_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_srlri_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.srlri.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_srlri_h_RES ret void @@ -502,7 +502,7 @@ declare <8 x i16> @llvm.mips.srlri.h(<8 x i16>, i32) nounwind define void @llvm_mips_srlri_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_srlri_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_srlri_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.srlri.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_srlri_w_RES ret void @@ -521,7 +521,7 @@ declare <4 x i32> @llvm.mips.srlri.w(<4 x i32>, i32) nounwind define void @llvm_mips_srlri_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_srlri_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_srlri_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.srlri.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_srlri_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/bitcast.ll b/llvm/test/CodeGen/Mips/msa/bitcast.ll index 8e880ecd9af..837cc28aa82 100644 --- a/llvm/test/CodeGen/Mips/msa/bitcast.ll +++ b/llvm/test/CodeGen/Mips/msa/bitcast.ll @@ -5,7 +5,7 @@ define void @v16i8_to_v16i8(<16 x i8>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -29,7 +29,7 @@ entry: define void @v16i8_to_v8i16(<16 x i8>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -56,7 +56,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v16i8_to_v8f16(<16 x i8>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -77,7 +77,7 @@ entry: define void @v16i8_to_v4i32(<16 x i8>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -102,7 +102,7 @@ entry: define void @v16i8_to_v4f32(<16 x i8>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -127,7 +127,7 @@ entry: define void @v16i8_to_v2i64(<16 x i8>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -153,7 +153,7 @@ entry: define void @v16i8_to_v2f64(<16 x i8>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <16 x i8>* %src + %0 = load volatile <16 x i8>, <16 x i8>* %src %1 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %0) %2 = bitcast <16 x i8> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -179,7 +179,7 @@ entry: define void @v8i16_to_v16i8(<8 x i16>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -204,7 +204,7 @@ entry: define void @v8i16_to_v8i16(<8 x i16>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -230,7 +230,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8i16_to_v8f16(<8 x i16>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -251,7 +251,7 @@ entry: define void @v8i16_to_v4i32(<8 x i16>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -276,7 +276,7 @@ entry: define void @v8i16_to_v4f32(<8 x i16>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -301,7 +301,7 @@ entry: define void @v8i16_to_v2i64(<8 x i16>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -326,7 +326,7 @@ entry: define void @v8i16_to_v2f64(<8 x i16>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <8 x i16>* %src + %0 = load volatile <8 x i16>, <8 x i16>* %src %1 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %0) %2 = bitcast <8 x i16> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -354,7 +354,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v16i8(<8 x half>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <16 x i8> %2 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %1, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* %dst @@ -378,7 +378,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v8i16(<8 x half>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <8 x i16> %2 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %1, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* %dst @@ -403,7 +403,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v8f16(<8 x half>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <8 x half> store <8 x half> %1, <8 x half>* %dst ret void @@ -423,7 +423,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v4i32(<8 x half>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <4 x i32> %2 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %1, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* %dst @@ -447,7 +447,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v4f32(<8 x half>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <4 x float> %2 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %1, <4 x float> %1) store <4 x float> %2, <4 x float>* %dst @@ -471,7 +471,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v2i64(<8 x half>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <2 x i64> %2 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %1, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* %dst @@ -495,7 +495,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v8f16_to_v2f64(<8 x half>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <8 x half>* %src + %0 = load volatile <8 x half>, <8 x half>* %src %1 = bitcast <8 x half> %0 to <2 x double> %2 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %1, <2 x double> %1) store <2 x double> %2, <2 x double>* %dst @@ -518,7 +518,7 @@ entry: define void @v4i32_to_v16i8(<4 x i32>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -543,7 +543,7 @@ entry: define void @v4i32_to_v8i16(<4 x i32>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -570,7 +570,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v4i32_to_v8f16(<4 x i32>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -591,7 +591,7 @@ entry: define void @v4i32_to_v4i32(<4 x i32>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -615,7 +615,7 @@ entry: define void @v4i32_to_v4f32(<4 x i32>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -639,7 +639,7 @@ entry: define void @v4i32_to_v2i64(<4 x i32>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -664,7 +664,7 @@ entry: define void @v4i32_to_v2f64(<4 x i32>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <4 x i32>* %src + %0 = load volatile <4 x i32>, <4 x i32>* %src %1 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %0) %2 = bitcast <4 x i32> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -689,7 +689,7 @@ entry: define void @v4f32_to_v16i8(<4 x float>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -714,7 +714,7 @@ entry: define void @v4f32_to_v8i16(<4 x float>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -741,7 +741,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v4f32_to_v8f16(<4 x float>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -762,7 +762,7 @@ entry: define void @v4f32_to_v4i32(<4 x float>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -786,7 +786,7 @@ entry: define void @v4f32_to_v4f32(<4 x float>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -810,7 +810,7 @@ entry: define void @v4f32_to_v2i64(<4 x float>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -835,7 +835,7 @@ entry: define void @v4f32_to_v2f64(<4 x float>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <4 x float>* %src + %0 = load volatile <4 x float>, <4 x float>* %src %1 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %0, <4 x float> %0) %2 = bitcast <4 x float> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -860,7 +860,7 @@ entry: define void @v2i64_to_v16i8(<2 x i64>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -886,7 +886,7 @@ entry: define void @v2i64_to_v8i16(<2 x i64>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -913,7 +913,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v2i64_to_v8f16(<2 x i64>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -934,7 +934,7 @@ entry: define void @v2i64_to_v4i32(<2 x i64>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -959,7 +959,7 @@ entry: define void @v2i64_to_v4f32(<2 x i64>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -984,7 +984,7 @@ entry: define void @v2i64_to_v2i64(<2 x i64>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -1008,7 +1008,7 @@ entry: define void @v2i64_to_v2f64(<2 x i64>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <2 x i64>* %src + %0 = load volatile <2 x i64>, <2 x i64>* %src %1 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %0) %2 = bitcast <2 x i64> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) @@ -1032,7 +1032,7 @@ entry: define void @v2f64_to_v16i8(<2 x double>* %src, <16 x i8>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <16 x i8> %3 = tail call <16 x i8> @llvm.mips.addv.b(<16 x i8> %2, <16 x i8> %2) @@ -1058,7 +1058,7 @@ entry: define void @v2f64_to_v8i16(<2 x double>* %src, <8 x i16>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <8 x i16> %3 = tail call <8 x i16> @llvm.mips.addv.h(<8 x i16> %2, <8 x i16> %2) @@ -1085,7 +1085,7 @@ entry: ; are no operations for v8f16 to put in the way. define void @v2f64_to_v8f16(<2 x double>* %src, <8 x half>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <8 x half> store <8 x half> %2, <8 x half>* %dst @@ -1106,7 +1106,7 @@ entry: define void @v2f64_to_v4i32(<2 x double>* %src, <4 x i32>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <4 x i32> %3 = tail call <4 x i32> @llvm.mips.addv.w(<4 x i32> %2, <4 x i32> %2) @@ -1131,7 +1131,7 @@ entry: define void @v2f64_to_v4f32(<2 x double>* %src, <4 x float>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <4 x float> %3 = tail call <4 x float> @llvm.mips.fadd.w(<4 x float> %2, <4 x float> %2) @@ -1156,7 +1156,7 @@ entry: define void @v2f64_to_v2i64(<2 x double>* %src, <2 x i64>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <2 x i64> %3 = tail call <2 x i64> @llvm.mips.addv.d(<2 x i64> %2, <2 x i64> %2) @@ -1180,7 +1180,7 @@ entry: define void @v2f64_to_v2f64(<2 x double>* %src, <2 x double>* %dst) nounwind { entry: - %0 = load volatile <2 x double>* %src + %0 = load volatile <2 x double>, <2 x double>* %src %1 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %0, <2 x double> %0) %2 = bitcast <2 x double> %1 to <2 x double> %3 = tail call <2 x double> @llvm.mips.fadd.d(<2 x double> %2, <2 x double> %2) diff --git a/llvm/test/CodeGen/Mips/msa/bitwise.ll b/llvm/test/CodeGen/Mips/msa/bitwise.ll index 5d57198a935..2a260b2c573 100644 --- a/llvm/test/CodeGen/Mips/msa/bitwise.ll +++ b/llvm/test/CodeGen/Mips/msa/bitwise.ll @@ -4,9 +4,9 @@ define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: and_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = and <16 x i8> %1, %2 ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -20,9 +20,9 @@ define void @and_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: and_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = and <8 x i16> %1, %2 ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -36,9 +36,9 @@ define void @and_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: and_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = and <4 x i32> %1, %2 ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -52,9 +52,9 @@ define void @and_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: and_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = and <2 x i64> %1, %2 ; CHECK-DAG: and.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -68,7 +68,7 @@ define void @and_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: and_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = and <16 x i8> %1, ; CHECK-DAG: andi.b [[R4:\$w[0-9]+]], [[R1]], 1 @@ -82,7 +82,7 @@ define void @and_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: and_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = and <8 x i16> %1, ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 1 @@ -97,7 +97,7 @@ define void @and_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: and_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = and <4 x i32> %1, ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 1 @@ -112,7 +112,7 @@ define void @and_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: and_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = and <2 x i64> %1, ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 1 @@ -127,9 +127,9 @@ define void @and_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: or_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = or <16 x i8> %1, %2 ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -143,9 +143,9 @@ define void @or_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: or_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = or <8 x i16> %1, %2 ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -159,9 +159,9 @@ define void @or_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: or_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = or <4 x i32> %1, %2 ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -175,9 +175,9 @@ define void @or_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: or_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = or <2 x i64> %1, %2 ; CHECK-DAG: or.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -191,7 +191,7 @@ define void @or_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: or_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = or <16 x i8> %1, ; CHECK-DAG: ori.b [[R4:\$w[0-9]+]], [[R1]], 3 @@ -205,7 +205,7 @@ define void @or_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: or_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = or <8 x i16> %1, ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3 @@ -220,7 +220,7 @@ define void @or_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: or_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = or <4 x i32> %1, ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3 @@ -235,7 +235,7 @@ define void @or_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: or_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = or <2 x i64> %1, ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3 @@ -250,9 +250,9 @@ define void @or_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: nor_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = or <16 x i8> %1, %2 %4 = xor <16 x i8> %3, @@ -267,9 +267,9 @@ define void @nor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: nor_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = or <8 x i16> %1, %2 %4 = xor <8 x i16> %3, @@ -284,9 +284,9 @@ define void @nor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: nor_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = or <4 x i32> %1, %2 %4 = xor <4 x i32> %3, @@ -301,9 +301,9 @@ define void @nor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: nor_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = or <2 x i64> %1, %2 %4 = xor <2 x i64> %3, @@ -318,7 +318,7 @@ define void @nor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: nor_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = or <16 x i8> %1, %3 = xor <16 x i8> %2, @@ -333,7 +333,7 @@ define void @nor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: nor_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = or <8 x i16> %1, %3 = xor <8 x i16> %2, @@ -349,7 +349,7 @@ define void @nor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: nor_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = or <4 x i32> %1, %3 = xor <4 x i32> %2, @@ -365,7 +365,7 @@ define void @nor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: nor_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = or <2 x i64> %1, %3 = xor <2 x i64> %2, @@ -381,9 +381,9 @@ define void @nor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: xor_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = xor <16 x i8> %1, %2 ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -397,9 +397,9 @@ define void @xor_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: xor_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = xor <8 x i16> %1, %2 ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -413,9 +413,9 @@ define void @xor_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: xor_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = xor <4 x i32> %1, %2 ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -429,9 +429,9 @@ define void @xor_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: xor_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = xor <2 x i64> %1, %2 ; CHECK-DAG: xor.v [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -445,7 +445,7 @@ define void @xor_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: xor_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = xor <16 x i8> %1, ; CHECK-DAG: xori.b [[R4:\$w[0-9]+]], [[R1]], 3 @@ -459,7 +459,7 @@ define void @xor_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: xor_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = xor <8 x i16> %1, ; CHECK-DAG: ldi.h [[R3:\$w[0-9]+]], 3 @@ -474,7 +474,7 @@ define void @xor_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: xor_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = xor <4 x i32> %1, ; CHECK-DAG: ldi.w [[R3:\$w[0-9]+]], 3 @@ -489,7 +489,7 @@ define void @xor_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: xor_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = xor <2 x i64> %1, ; CHECK-DAG: ldi.d [[R3:\$w[0-9]+]], 3 @@ -504,9 +504,9 @@ define void @xor_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: sll_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shl <16 x i8> %1, %2 ; CHECK-DAG: sll.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -520,9 +520,9 @@ define void @sll_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: sll_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shl <8 x i16> %1, %2 ; CHECK-DAG: sll.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -536,9 +536,9 @@ define void @sll_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: sll_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shl <4 x i32> %1, %2 ; CHECK-DAG: sll.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -552,9 +552,9 @@ define void @sll_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: sll_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shl <2 x i64> %1, %2 ; CHECK-DAG: sll.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -568,7 +568,7 @@ define void @sll_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: sll_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shl <16 x i8> %1, ; CHECK-DAG: slli.b [[R4:\$w[0-9]+]], [[R1]], 1 @@ -582,7 +582,7 @@ define void @sll_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: sll_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shl <8 x i16> %1, ; CHECK-DAG: slli.h [[R4:\$w[0-9]+]], [[R1]], 1 @@ -596,7 +596,7 @@ define void @sll_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: sll_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shl <4 x i32> %1, ; CHECK-DAG: slli.w [[R4:\$w[0-9]+]], [[R1]], 1 @@ -610,7 +610,7 @@ define void @sll_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: sll_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shl <2 x i64> %1, ; CHECK-DAG: slli.d [[R4:\$w[0-9]+]], [[R1]], 1 @@ -624,9 +624,9 @@ define void @sll_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: sra_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = ashr <16 x i8> %1, %2 ; CHECK-DAG: sra.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -640,9 +640,9 @@ define void @sra_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: sra_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = ashr <8 x i16> %1, %2 ; CHECK-DAG: sra.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -656,9 +656,9 @@ define void @sra_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: sra_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = ashr <4 x i32> %1, %2 ; CHECK-DAG: sra.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -672,9 +672,9 @@ define void @sra_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: sra_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = ashr <2 x i64> %1, %2 ; CHECK-DAG: sra.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -688,7 +688,7 @@ define void @sra_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: sra_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = ashr <16 x i8> %1, ; CHECK-DAG: srai.b [[R4:\$w[0-9]+]], [[R1]], 1 @@ -702,7 +702,7 @@ define void @sra_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: sra_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = ashr <8 x i16> %1, ; CHECK-DAG: srai.h [[R4:\$w[0-9]+]], [[R1]], 1 @@ -716,7 +716,7 @@ define void @sra_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: sra_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = ashr <4 x i32> %1, ; CHECK-DAG: srai.w [[R4:\$w[0-9]+]], [[R1]], 1 @@ -730,7 +730,7 @@ define void @sra_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: sra_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = ashr <2 x i64> %1, ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R1]], 1 @@ -744,9 +744,9 @@ define void @sra_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: srl_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = lshr <16 x i8> %1, %2 ; CHECK-DAG: srl.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -760,9 +760,9 @@ define void @srl_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: srl_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = lshr <8 x i16> %1, %2 ; CHECK-DAG: srl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -776,9 +776,9 @@ define void @srl_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: srl_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = lshr <4 x i32> %1, %2 ; CHECK-DAG: srl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -792,9 +792,9 @@ define void @srl_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: srl_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = lshr <2 x i64> %1, %2 ; CHECK-DAG: srl.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -808,7 +808,7 @@ define void @srl_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: srl_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = lshr <16 x i8> %1, ; CHECK-DAG: srli.b [[R4:\$w[0-9]+]], [[R1]], 1 @@ -822,7 +822,7 @@ define void @srl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: srl_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = lshr <8 x i16> %1, ; CHECK-DAG: srli.h [[R4:\$w[0-9]+]], [[R1]], 1 @@ -836,7 +836,7 @@ define void @srl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: srl_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = lshr <4 x i32> %1, ; CHECK-DAG: srli.w [[R4:\$w[0-9]+]], [[R1]], 1 @@ -850,7 +850,7 @@ define void @srl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: srl_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = lshr <2 x i64> %1, ; CHECK-DAG: srli.d [[R4:\$w[0-9]+]], [[R1]], 1 @@ -864,7 +864,7 @@ define void @srl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: ctpop_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = tail call <16 x i8> @llvm.ctpop.v16i8 (<16 x i8> %1) ; CHECK-DAG: pcnt.b [[R3:\$w[0-9]+]], [[R1]] @@ -878,7 +878,7 @@ define void @ctpop_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: ctpop_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = tail call <8 x i16> @llvm.ctpop.v8i16 (<8 x i16> %1) ; CHECK-DAG: pcnt.h [[R3:\$w[0-9]+]], [[R1]] @@ -892,7 +892,7 @@ define void @ctpop_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: ctpop_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x i32> @llvm.ctpop.v4i32 (<4 x i32> %1) ; CHECK-DAG: pcnt.w [[R3:\$w[0-9]+]], [[R1]] @@ -906,7 +906,7 @@ define void @ctpop_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: ctpop_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x i64> @llvm.ctpop.v2i64 (<2 x i64> %1) ; CHECK-DAG: pcnt.d [[R3:\$w[0-9]+]], [[R1]] @@ -920,7 +920,7 @@ define void @ctpop_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: ctlz_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = tail call <16 x i8> @llvm.ctlz.v16i8 (<16 x i8> %1) ; CHECK-DAG: nlzc.b [[R3:\$w[0-9]+]], [[R1]] @@ -934,7 +934,7 @@ define void @ctlz_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: ctlz_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = tail call <8 x i16> @llvm.ctlz.v8i16 (<8 x i16> %1) ; CHECK-DAG: nlzc.h [[R3:\$w[0-9]+]], [[R1]] @@ -948,7 +948,7 @@ define void @ctlz_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: ctlz_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = tail call <4 x i32> @llvm.ctlz.v4i32 (<4 x i32> %1) ; CHECK-DAG: nlzc.w [[R3:\$w[0-9]+]], [[R1]] @@ -962,7 +962,7 @@ define void @ctlz_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: ctlz_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = tail call <2 x i64> @llvm.ctlz.v2i64 (<2 x i64> %1) ; CHECK-DAG: nlzc.d [[R3:\$w[0-9]+]], [[R1]] @@ -976,11 +976,11 @@ define void @ctlz_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @bsel_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %m) nounwind { ; CHECK: bsel_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %m + %3 = load <16 x i8>, <16 x i8>* %m ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = xor <16 x i8> %3, * %c, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* define void @bsel_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %m) nounwind { ; CHECK: bsel_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %m + %2 = load <16 x i8>, <16 x i8>* %m ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($6) %3 = xor <16 x i8> %2, * %c, <16 x i8>* %a, <16 x i8>* %m) nounwind define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: bsel_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = and <8 x i16> %1, @@ -1048,9 +1048,9 @@ define void @bsel_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: bsel_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = and <4 x i32> %1, %4 = and <4 x i32> %2, @@ -1067,9 +1067,9 @@ define void @bsel_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: bsel_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = and <2 x i64> %1, %4 = and <2 x i64> %2, @@ -1086,9 +1086,9 @@ define void @bsel_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @binsl_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: binsl_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = and <16 x i8> %1, * %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: binsl_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = and <8 x i16> %1, @@ -1130,9 +1130,9 @@ define void @binsl_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: binsl_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = and <4 x i32> %1, %4 = and <4 x i32> %2, @@ -1148,9 +1148,9 @@ define void @binsl_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: binsl_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = and <2 x i64> %1, %4 = and <2 x i64> %2, @@ -1170,9 +1170,9 @@ define void @binsl_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: binsr_v16i8_i: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = and <16 x i8> %1, @@ -1192,9 +1192,9 @@ define void @binsr_v16i8_i(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: binsr_v8i16_i: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = and <8 x i16> %1, @@ -1212,9 +1212,9 @@ define void @binsr_v8i16_i(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: binsr_v4i32_i: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = and <4 x i32> %1, %4 = and <4 x i32> %2, @@ -1230,9 +1230,9 @@ define void @binsr_v4i32_i(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: binsr_v2i64_i: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = and <2 x i64> %1, %4 = and <2 x i64> %2, @@ -1248,9 +1248,9 @@ define void @binsr_v2i64_i(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: bclr_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shl <16 x i8> , %2 %4 = xor <16 x i8> %3, @@ -1266,9 +1266,9 @@ define void @bclr_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: bclr_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shl <8 x i16> , %2 %4 = xor <8 x i16> %3, @@ -1284,9 +1284,9 @@ define void @bclr_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: bclr_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shl <4 x i32> , %2 %4 = xor <4 x i32> %3, @@ -1302,9 +1302,9 @@ define void @bclr_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: bclr_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shl <2 x i64> , %2 %4 = xor <2 x i64> %3, @@ -1320,9 +1320,9 @@ define void @bclr_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: bset_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shl <16 x i8> , %2 %4 = or <16 x i8> %1, %3 @@ -1337,9 +1337,9 @@ define void @bset_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: bset_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shl <8 x i16> , %2 %4 = or <8 x i16> %1, %3 @@ -1354,9 +1354,9 @@ define void @bset_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: bset_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shl <4 x i32> , %2 %4 = or <4 x i32> %1, %3 @@ -1371,9 +1371,9 @@ define void @bset_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: bset_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shl <2 x i64> , %2 %4 = or <2 x i64> %1, %3 @@ -1388,9 +1388,9 @@ define void @bset_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: bneg_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shl <16 x i8> , %2 %4 = xor <16 x i8> %1, %3 @@ -1405,9 +1405,9 @@ define void @bneg_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: bneg_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shl <8 x i16> , %2 %4 = xor <8 x i16> %1, %3 @@ -1422,9 +1422,9 @@ define void @bneg_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: bneg_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shl <4 x i32> , %2 %4 = xor <4 x i32> %1, %3 @@ -1439,9 +1439,9 @@ define void @bneg_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: bneg_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shl <2 x i64> , %2 %4 = xor <2 x i64> %1, %3 @@ -1456,7 +1456,7 @@ define void @bneg_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: bclri_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = xor <16 x i8> , @@ -1473,7 +1473,7 @@ define void @bclri_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: bclri_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = xor <8 x i16> , @@ -1489,7 +1489,7 @@ define void @bclri_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: bclri_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = xor <4 x i32> , @@ -1505,7 +1505,7 @@ define void @bclri_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: bclri_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = xor <2 x i64> , @@ -1521,7 +1521,7 @@ define void @bclri_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: bseti_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = or <16 x i8> %1, ; CHECK-DAG: bseti.b [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1535,7 +1535,7 @@ define void @bseti_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: bseti_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = or <8 x i16> %1, ; CHECK-DAG: bseti.h [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1549,7 +1549,7 @@ define void @bseti_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: bseti_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = or <4 x i32> %1, ; CHECK-DAG: bseti.w [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1563,7 +1563,7 @@ define void @bseti_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: bseti_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = or <2 x i64> %1, ; CHECK-DAG: bseti.d [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1577,7 +1577,7 @@ define void @bseti_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: bnegi_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = xor <16 x i8> %1, ; CHECK-DAG: bnegi.b [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1591,7 +1591,7 @@ define void @bnegi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: bnegi_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = xor <8 x i16> %1, ; CHECK-DAG: bnegi.h [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1605,7 +1605,7 @@ define void @bnegi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: bnegi_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = xor <4 x i32> %1, ; CHECK-DAG: bnegi.w [[R3:\$w[0-9]+]], [[R1]], 3 @@ -1619,7 +1619,7 @@ define void @bnegi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @bnegi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: bnegi_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = xor <2 x i64> %1, ; CHECK-DAG: bnegi.d [[R3:\$w[0-9]+]], [[R1]], 3 diff --git a/llvm/test/CodeGen/Mips/msa/compare.ll b/llvm/test/CodeGen/Mips/msa/compare.ll index 87ca1482da8..bc4f6e7e394 100644 --- a/llvm/test/CodeGen/Mips/msa/compare.ll +++ b/llvm/test/CodeGen/Mips/msa/compare.ll @@ -4,9 +4,9 @@ define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ceq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp eq <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -21,9 +21,9 @@ define void @ceq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ceq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp eq <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -38,9 +38,9 @@ define void @ceq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ceq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp eq <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -55,9 +55,9 @@ define void @ceq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ceq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp eq <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -72,9 +72,9 @@ define void @ceq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: cle_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -89,9 +89,9 @@ define void @cle_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: cle_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -106,9 +106,9 @@ define void @cle_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: cle_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -123,9 +123,9 @@ define void @cle_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: cle_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -140,9 +140,9 @@ define void @cle_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: cle_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -157,9 +157,9 @@ define void @cle_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: cle_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -174,9 +174,9 @@ define void @cle_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: cle_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -191,9 +191,9 @@ define void @cle_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: cle_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -208,9 +208,9 @@ define void @cle_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: clt_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -225,9 +225,9 @@ define void @clt_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: clt_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -242,9 +242,9 @@ define void @clt_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: clt_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -259,9 +259,9 @@ define void @clt_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: clt_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -276,9 +276,9 @@ define void @clt_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: clt_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -293,9 +293,9 @@ define void @clt_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: clt_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -310,9 +310,9 @@ define void @clt_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: clt_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -327,9 +327,9 @@ define void @clt_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: clt_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -345,9 +345,9 @@ define void @clt_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; issues in this area. define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: cne_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ne <16 x i8> %1, %2 %4 = sext <16 x i1> %3 to <16 x i8> @@ -365,9 +365,9 @@ define void @cne_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: cne_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ne <8 x i16> %1, %2 %4 = sext <8 x i1> %3 to <8 x i16> @@ -387,9 +387,9 @@ define void @cne_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: cne_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ne <4 x i32> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -409,9 +409,9 @@ define void @cne_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: cne_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ne <2 x i64> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -429,7 +429,7 @@ define void @cne_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: ceqi_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp eq <16 x i8> %1, %3 = sext <16 x i1> %2 to <16 x i8> @@ -444,7 +444,7 @@ define void @ceqi_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: ceqi_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp eq <8 x i16> %1, %3 = sext <8 x i1> %2 to <8 x i16> @@ -459,7 +459,7 @@ define void @ceqi_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: ceqi_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp eq <4 x i32> %1, %3 = sext <4 x i1> %2 to <4 x i32> @@ -474,7 +474,7 @@ define void @ceqi_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: ceqi_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp eq <2 x i64> %1, %3 = sext <2 x i1> %2 to <2 x i64> @@ -489,7 +489,7 @@ define void @ceqi_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: clei_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <16 x i8> %1, %3 = sext <16 x i1> %2 to <16 x i8> @@ -504,7 +504,7 @@ define void @clei_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: clei_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <8 x i16> %1, %3 = sext <8 x i1> %2 to <8 x i16> @@ -519,7 +519,7 @@ define void @clei_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: clei_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <4 x i32> %1, %3 = sext <4 x i1> %2 to <4 x i32> @@ -534,7 +534,7 @@ define void @clei_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: clei_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <2 x i64> %1, %3 = sext <2 x i1> %2 to <2 x i64> @@ -549,7 +549,7 @@ define void @clei_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: clei_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <16 x i8> %1, %3 = sext <16 x i1> %2 to <16 x i8> @@ -564,7 +564,7 @@ define void @clei_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: clei_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <8 x i16> %1, %3 = sext <8 x i1> %2 to <8 x i16> @@ -579,7 +579,7 @@ define void @clei_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: clei_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <4 x i32> %1, %3 = sext <4 x i1> %2 to <4 x i32> @@ -594,7 +594,7 @@ define void @clei_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: clei_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <2 x i64> %1, %3 = sext <2 x i1> %2 to <2 x i64> @@ -609,7 +609,7 @@ define void @clei_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: clti_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <16 x i8> %1, %3 = sext <16 x i1> %2 to <16 x i8> @@ -624,7 +624,7 @@ define void @clti_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: clti_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <8 x i16> %1, %3 = sext <8 x i1> %2 to <8 x i16> @@ -639,7 +639,7 @@ define void @clti_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: clti_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <4 x i32> %1, %3 = sext <4 x i1> %2 to <4 x i32> @@ -654,7 +654,7 @@ define void @clti_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: clti_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <2 x i64> %1, %3 = sext <2 x i1> %2 to <2 x i64> @@ -669,7 +669,7 @@ define void @clti_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: clti_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <16 x i8> %1, %3 = sext <16 x i1> %2 to <16 x i8> @@ -684,7 +684,7 @@ define void @clti_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: clti_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <8 x i16> %1, %3 = sext <8 x i1> %2 to <8 x i16> @@ -699,7 +699,7 @@ define void @clti_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: clti_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <4 x i32> %1, %3 = sext <4 x i1> %2 to <4 x i32> @@ -714,7 +714,7 @@ define void @clti_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @clti_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: clti_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <2 x i64> %1, %3 = sext <2 x i1> %2 to <2 x i64> @@ -730,11 +730,11 @@ define void @bsel_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: bsel_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %c + %3 = load <16 x i8>, <16 x i8>* %c ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = icmp sgt <16 x i8> %1, %2 ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -752,11 +752,11 @@ define void @bsel_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: bsel_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = load <8 x i16>* %c + %3 = load <8 x i16>, <8 x i16>* %c ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7) %4 = icmp sgt <8 x i16> %1, %2 ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -774,11 +774,11 @@ define void @bsel_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: bsel_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x i32>* %c + %3 = load <4 x i32>, <4 x i32>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = icmp sgt <4 x i32> %1, %2 ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -796,11 +796,11 @@ define void @bsel_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: bsel_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x i64>* %c + %3 = load <2 x i64>, <2 x i64>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = icmp sgt <2 x i64> %1, %2 ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -818,11 +818,11 @@ define void @bsel_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: bsel_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = load <16 x i8>* %c + %3 = load <16 x i8>, <16 x i8>* %c ; CHECK-DAG: ld.b [[R3:\$w[0-9]+]], 0($7) %4 = icmp ugt <16 x i8> %1, %2 ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -840,11 +840,11 @@ define void @bsel_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: bsel_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = load <8 x i16>* %c + %3 = load <8 x i16>, <8 x i16>* %c ; CHECK-DAG: ld.h [[R3:\$w[0-9]+]], 0($7) %4 = icmp ugt <8 x i16> %1, %2 ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -862,11 +862,11 @@ define void @bsel_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: bsel_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x i32>* %c + %3 = load <4 x i32>, <4 x i32>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = icmp ugt <4 x i32> %1, %2 ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -884,11 +884,11 @@ define void @bsel_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: bsel_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x i64>* %c + %3 = load <2 x i64>, <2 x i64>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = icmp ugt <2 x i64> %1, %2 ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -906,9 +906,9 @@ define void @bseli_s_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: bseli_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <16 x i8> %1, %2 ; CHECK-DAG: clt_s.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -925,9 +925,9 @@ define void @bseli_s_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: bseli_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <8 x i16> %1, %2 ; CHECK-DAG: clt_s.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -945,9 +945,9 @@ define void @bseli_s_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: bseli_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <4 x i32> %1, %2 ; CHECK-DAG: clt_s.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -965,9 +965,9 @@ define void @bseli_s_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: bseli_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <2 x i64> %1, %2 ; CHECK-DAG: clt_s.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -985,9 +985,9 @@ define void @bseli_u_v16i8(<16 x i8>* %d, <16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) nounwind { ; CHECK: bseli_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <16 x i8> %1, %2 ; CHECK-DAG: clt_u.b [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -1004,9 +1004,9 @@ define void @bseli_u_v8i16(<8 x i16>* %d, <8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) nounwind { ; CHECK: bseli_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <8 x i16> %1, %2 ; CHECK-DAG: clt_u.h [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -1024,9 +1024,9 @@ define void @bseli_u_v4i32(<4 x i32>* %d, <4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) nounwind { ; CHECK: bseli_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <4 x i32> %1, %2 ; CHECK-DAG: clt_u.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -1044,9 +1044,9 @@ define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) nounwind { ; CHECK: bseli_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <2 x i64> %1, %2 ; CHECK-DAG: clt_u.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -1063,9 +1063,9 @@ define void @bseli_u_v2i64(<2 x i64>* %d, <2 x i64>* %a, <2 x i64>* %b, define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: max_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1080,9 +1080,9 @@ define void @max_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: max_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1097,9 +1097,9 @@ define void @max_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: max_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1114,9 +1114,9 @@ define void @max_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: max_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sgt <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1131,9 +1131,9 @@ define void @max_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: max_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1148,9 +1148,9 @@ define void @max_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: max_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1165,9 +1165,9 @@ define void @max_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: max_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1182,9 +1182,9 @@ define void @max_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: max_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ugt <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1199,9 +1199,9 @@ define void @max_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: max_s_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sge <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1216,9 +1216,9 @@ define void @max_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: max_s_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sge <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1233,9 +1233,9 @@ define void @max_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: max_s_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sge <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1250,9 +1250,9 @@ define void @max_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: max_s_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sge <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1267,9 +1267,9 @@ define void @max_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: max_u_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp uge <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1284,9 +1284,9 @@ define void @max_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: max_u_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp uge <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1301,9 +1301,9 @@ define void @max_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: max_u_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp uge <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1318,9 +1318,9 @@ define void @max_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: max_u_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp uge <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1335,7 +1335,7 @@ define void @max_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: maxi_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp sgt <16 x i8> %1, %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> @@ -1350,7 +1350,7 @@ define void @maxi_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: maxi_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp sgt <8 x i16> %1, %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> @@ -1365,7 +1365,7 @@ define void @maxi_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: maxi_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp sgt <4 x i32> %1, %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> @@ -1380,7 +1380,7 @@ define void @maxi_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: maxi_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp sgt <2 x i64> %1, %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> @@ -1395,7 +1395,7 @@ define void @maxi_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: maxi_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ugt <16 x i8> %1, %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> @@ -1410,7 +1410,7 @@ define void @maxi_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: maxi_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ugt <8 x i16> %1, %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> @@ -1425,7 +1425,7 @@ define void @maxi_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: maxi_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ugt <4 x i32> %1, %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> @@ -1440,7 +1440,7 @@ define void @maxi_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: maxi_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ugt <2 x i64> %1, %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> @@ -1455,7 +1455,7 @@ define void @maxi_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: maxi_s_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp sge <16 x i8> %1, %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> @@ -1470,7 +1470,7 @@ define void @maxi_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: maxi_s_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp sge <8 x i16> %1, %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> @@ -1485,7 +1485,7 @@ define void @maxi_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: maxi_s_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp sge <4 x i32> %1, %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> @@ -1500,7 +1500,7 @@ define void @maxi_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: maxi_s_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp sge <2 x i64> %1, %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> @@ -1515,7 +1515,7 @@ define void @maxi_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: maxi_u_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp uge <16 x i8> %1, %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> @@ -1530,7 +1530,7 @@ define void @maxi_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: maxi_u_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp uge <8 x i16> %1, %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> @@ -1545,7 +1545,7 @@ define void @maxi_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: maxi_u_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp uge <4 x i32> %1, %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> @@ -1560,7 +1560,7 @@ define void @maxi_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: maxi_u_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp uge <2 x i64> %1, %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> @@ -1575,9 +1575,9 @@ define void @maxi_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: min_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1592,9 +1592,9 @@ define void @min_s_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: min_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1609,9 +1609,9 @@ define void @min_s_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: min_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1626,9 +1626,9 @@ define void @min_s_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: min_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp slt <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1643,9 +1643,9 @@ define void @min_s_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: min_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1660,9 +1660,9 @@ define void @min_u_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: min_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1677,9 +1677,9 @@ define void @min_u_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: min_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1694,9 +1694,9 @@ define void @min_u_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: min_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ult <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1711,9 +1711,9 @@ define void @min_u_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: min_s_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1728,9 +1728,9 @@ define void @min_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: min_s_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1745,9 +1745,9 @@ define void @min_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: min_s_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1762,9 +1762,9 @@ define void @min_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: min_s_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp sle <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1779,9 +1779,9 @@ define void @min_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: min_u_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <16 x i8> %1, %2 %4 = select <16 x i1> %3, <16 x i8> %1, <16 x i8> %2 @@ -1796,9 +1796,9 @@ define void @min_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwin define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: min_u_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <8 x i16> %1, %2 %4 = select <8 x i1> %3, <8 x i16> %1, <8 x i16> %2 @@ -1813,9 +1813,9 @@ define void @min_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwin define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: min_u_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <4 x i32> %1, %2 %4 = select <4 x i1> %3, <4 x i32> %1, <4 x i32> %2 @@ -1830,9 +1830,9 @@ define void @min_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwin define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: min_u_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = icmp ule <2 x i64> %1, %2 %4 = select <2 x i1> %3, <2 x i64> %1, <2 x i64> %2 @@ -1847,7 +1847,7 @@ define void @min_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwin define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: mini_s_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <16 x i8> %1, %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> @@ -1862,7 +1862,7 @@ define void @mini_s_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: mini_s_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <8 x i16> %1, %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> @@ -1877,7 +1877,7 @@ define void @mini_s_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: mini_s_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <4 x i32> %1, %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> @@ -1892,7 +1892,7 @@ define void @mini_s_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: mini_s_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp slt <2 x i64> %1, %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> @@ -1907,7 +1907,7 @@ define void @mini_s_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: mini_u_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <16 x i8> %1, %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> @@ -1922,7 +1922,7 @@ define void @mini_u_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: mini_u_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <8 x i16> %1, %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> @@ -1937,7 +1937,7 @@ define void @mini_u_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: mini_u_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <4 x i32> %1, %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> @@ -1952,7 +1952,7 @@ define void @mini_u_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: mini_u_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ult <2 x i64> %1, %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> @@ -1967,7 +1967,7 @@ define void @mini_u_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: mini_s_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <16 x i8> %1, %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> @@ -1982,7 +1982,7 @@ define void @mini_s_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: mini_s_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <8 x i16> %1, %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> @@ -1997,7 +1997,7 @@ define void @mini_s_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: mini_s_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <4 x i32> %1, %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> @@ -2012,7 +2012,7 @@ define void @mini_s_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: mini_s_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp sle <2 x i64> %1, %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> @@ -2027,7 +2027,7 @@ define void @mini_s_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: mini_u_eq_v16i8: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <16 x i8> %1, %3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> @@ -2042,7 +2042,7 @@ define void @mini_u_eq_v16i8(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: mini_u_eq_v8i16: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <8 x i16> %1, %3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> @@ -2057,7 +2057,7 @@ define void @mini_u_eq_v8i16(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: mini_u_eq_v4i32: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <4 x i32> %1, %3 = select <4 x i1> %2, <4 x i32> %1, <4 x i32> @@ -2072,7 +2072,7 @@ define void @mini_u_eq_v4i32(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @mini_u_eq_v2i64(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: mini_u_eq_v2i64: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = icmp ule <2 x i64> %1, %3 = select <2 x i1> %2, <2 x i64> %1, <2 x i64> diff --git a/llvm/test/CodeGen/Mips/msa/compare_float.ll b/llvm/test/CodeGen/Mips/msa/compare_float.ll index e93221b9361..3229d027d95 100644 --- a/llvm/test/CodeGen/Mips/msa/compare_float.ll +++ b/llvm/test/CodeGen/Mips/msa/compare_float.ll @@ -9,8 +9,8 @@ declare <2 x double> @llvm.mips.fmin.d(<2 x double>, <2 x double>) nounwind define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: false_v4f32: - %1 = load <4 x float>* %a - %2 = load <4 x float>* %b + %1 = load <4 x float>, <4 x float>* %a + %2 = load <4 x float>, <4 x float>* %b %3 = fcmp false <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, <4 x i32>* %c @@ -25,8 +25,8 @@ define void @false_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: false_v2f64: - %1 = load <2 x double>* %a - %2 = load <2 x double>* %b + %1 = load <2 x double>, <2 x double>* %a + %2 = load <2 x double>, <2 x double>* %b %3 = fcmp false <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, <2 x i64>* %c @@ -41,9 +41,9 @@ define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) noun define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: oeq_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp oeq <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -58,9 +58,9 @@ define void @oeq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: oeq_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp oeq <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -75,9 +75,9 @@ define void @oeq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: oge_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp oge <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -92,9 +92,9 @@ define void @oge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: oge_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp oge <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -109,9 +109,9 @@ define void @oge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ogt_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ogt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -126,9 +126,9 @@ define void @ogt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ogt_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ogt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -143,9 +143,9 @@ define void @ogt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ole_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ole <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -160,9 +160,9 @@ define void @ole_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ole_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ole <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -177,9 +177,9 @@ define void @ole_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: olt_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp olt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -194,9 +194,9 @@ define void @olt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: olt_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp olt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -211,9 +211,9 @@ define void @olt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: one_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp one <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -228,9 +228,9 @@ define void @one_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: one_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp one <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -245,9 +245,9 @@ define void @one_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ord_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ord <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -262,9 +262,9 @@ define void @ord_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ord_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ord <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -279,9 +279,9 @@ define void @ord_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ueq_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ueq <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -296,9 +296,9 @@ define void @ueq_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ueq_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ueq <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -313,9 +313,9 @@ define void @ueq_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: uge_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp uge <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -330,9 +330,9 @@ define void @uge_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: uge_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp uge <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -347,9 +347,9 @@ define void @uge_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ugt_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ugt <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -364,9 +364,9 @@ define void @ugt_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ugt_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ugt <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -381,9 +381,9 @@ define void @ugt_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ule_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ule <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -398,9 +398,9 @@ define void @ule_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ule_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ule <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -415,9 +415,9 @@ define void @ule_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: ult_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ult <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -432,9 +432,9 @@ define void @ult_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: ult_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ult <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -449,9 +449,9 @@ define void @ult_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: uno_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp uno <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> @@ -466,9 +466,9 @@ define void @uno_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: uno_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp uno <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> @@ -483,8 +483,8 @@ define void @uno_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwi define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: true_v4f32: - %1 = load <4 x float>* %a - %2 = load <4 x float>* %b + %1 = load <4 x float>, <4 x float>* %a + %2 = load <4 x float>, <4 x float>* %b %3 = fcmp true <4 x float> %1, %2 %4 = sext <4 x i1> %3 to <4 x i32> store <4 x i32> %4, <4 x i32>* %c @@ -499,8 +499,8 @@ define void @true_v4f32(<4 x i32>* %c, <4 x float>* %a, <4 x float>* %b) nounwin define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: true_v2f64: - %1 = load <2 x double>* %a - %2 = load <2 x double>* %b + %1 = load <2 x double>, <2 x double>* %a + %2 = load <2 x double>, <2 x double>* %b %3 = fcmp true <2 x double> %1, %2 %4 = sext <2 x i1> %3 to <2 x i64> store <2 x i64> %4, <2 x i64>* %c @@ -516,11 +516,11 @@ define void @bsel_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c) nounwind { ; CHECK: bsel_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = load <4 x float>* %c + %3 = load <4 x float>, <4 x float>* %c ; CHECK-DAG: ld.w [[R3:\$w[0-9]+]], 0($7) %4 = fcmp ogt <4 x float> %1, %2 ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -538,11 +538,11 @@ define void @bsel_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, <2 x double>* %c) nounwind { ; CHECK: bsel_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = load <2 x double>* %c + %3 = load <2 x double>, <2 x double>* %c ; CHECK-DAG: ld.d [[R3:\$w[0-9]+]], 0($7) %4 = fcmp ogt <2 x double> %1, %2 ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -560,9 +560,9 @@ define void @bseli_v4f32(<4 x float>* %d, <4 x float>* %a, <4 x float>* %b, <4 x float>* %c) nounwind { ; CHECK: bseli_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ogt <4 x float> %1, %2 ; CHECK-DAG: fclt.w [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -580,9 +580,9 @@ define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, <2 x double>* %c) nounwind { ; CHECK: bseli_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = fcmp ogt <2 x double> %1, %2 ; CHECK-DAG: fclt.d [[R4:\$w[0-9]+]], [[R2]], [[R1]] @@ -599,9 +599,9 @@ define void @bseli_v2f64(<2 x double>* %d, <2 x double>* %a, <2 x double>* %b, define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: max_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = tail call <4 x float> @llvm.mips.fmax.w(<4 x float> %1, <4 x float> %2) ; CHECK-DAG: fmax.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -615,9 +615,9 @@ define void @max_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: max_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = tail call <2 x double> @llvm.mips.fmax.d(<2 x double> %1, <2 x double> %2) ; CHECK-DAG: fmax.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -631,9 +631,9 @@ define void @max_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nou define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwind { ; CHECK: min_v4f32: - %1 = load <4 x float>* %a + %1 = load <4 x float>, <4 x float>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x float>* %b + %2 = load <4 x float>, <4 x float>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = tail call <4 x float> @llvm.mips.fmin.w(<4 x float> %1, <4 x float> %2) ; CHECK-DAG: fmin.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -647,9 +647,9 @@ define void @min_v4f32(<4 x float>* %c, <4 x float>* %a, <4 x float>* %b) nounwi define void @min_v2f64(<2 x double>* %c, <2 x double>* %a, <2 x double>* %b) nounwind { ; CHECK: min_v2f64: - %1 = load <2 x double>* %a + %1 = load <2 x double>, <2 x double>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x double>* %b + %2 = load <2 x double>, <2 x double>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = tail call <2 x double> @llvm.mips.fmin.d(<2 x double> %1, <2 x double> %2) ; CHECK-DAG: fmin.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] diff --git a/llvm/test/CodeGen/Mips/msa/elm_copy.ll b/llvm/test/CodeGen/Mips/msa/elm_copy.ll index 0dd75fa3db1..2a0d74f4452 100644 --- a/llvm/test/CodeGen/Mips/msa/elm_copy.ll +++ b/llvm/test/CodeGen/Mips/msa/elm_copy.ll @@ -15,7 +15,7 @@ define void @llvm_mips_copy_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_copy_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_s_b_ARG1 %1 = tail call i32 @llvm.mips.copy.s.b(<16 x i8> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_s_b_RES ret void @@ -38,7 +38,7 @@ declare i32 @llvm.mips.copy.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_copy_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_copy_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_s_h_ARG1 %1 = tail call i32 @llvm.mips.copy.s.h(<8 x i16> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_s_h_RES ret void @@ -61,7 +61,7 @@ declare i32 @llvm.mips.copy.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_copy_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_copy_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_s_w_ARG1 %1 = tail call i32 @llvm.mips.copy.s.w(<4 x i32> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_s_w_RES ret void @@ -84,7 +84,7 @@ declare i32 @llvm.mips.copy.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_copy_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_copy_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_s_d_ARG1 %1 = tail call i64 @llvm.mips.copy.s.d(<2 x i64> %0, i32 1) store i64 %1, i64* @llvm_mips_copy_s_d_RES ret void @@ -112,7 +112,7 @@ declare i64 @llvm.mips.copy.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_copy_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_copy_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_copy_u_b_ARG1 %1 = tail call i32 @llvm.mips.copy.u.b(<16 x i8> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_u_b_RES ret void @@ -135,7 +135,7 @@ declare i32 @llvm.mips.copy.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_copy_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_copy_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_copy_u_h_ARG1 %1 = tail call i32 @llvm.mips.copy.u.h(<8 x i16> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_u_h_RES ret void @@ -158,7 +158,7 @@ declare i32 @llvm.mips.copy.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_copy_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_copy_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_copy_u_w_ARG1 %1 = tail call i32 @llvm.mips.copy.u.w(<4 x i32> %0, i32 1) store i32 %1, i32* @llvm_mips_copy_u_w_RES ret void @@ -181,7 +181,7 @@ declare i32 @llvm.mips.copy.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_copy_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_copy_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_copy_u_d_ARG1 %1 = tail call i64 @llvm.mips.copy.u.d(<2 x i64> %0, i32 1) store i64 %1, i64* @llvm_mips_copy_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/elm_insv.ll b/llvm/test/CodeGen/Mips/msa/elm_insv.ll index c746e523def..46e6289189d 100644 --- a/llvm/test/CodeGen/Mips/msa/elm_insv.ll +++ b/llvm/test/CodeGen/Mips/msa/elm_insv.ll @@ -16,8 +16,8 @@ define void @llvm_mips_insert_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_insert_b_ARG1 - %1 = load i32* @llvm_mips_insert_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insert_b_ARG1 + %1 = load i32, i32* @llvm_mips_insert_b_ARG3 %2 = tail call <16 x i8> @llvm.mips.insert.b(<16 x i8> %0, i32 1, i32 %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_insert_b_RES ret void @@ -38,8 +38,8 @@ declare <16 x i8> @llvm.mips.insert.b(<16 x i8>, i32, i32) nounwind define void @llvm_mips_insert_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_insert_h_ARG1 - %1 = load i32* @llvm_mips_insert_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insert_h_ARG1 + %1 = load i32, i32* @llvm_mips_insert_h_ARG3 %2 = tail call <8 x i16> @llvm.mips.insert.h(<8 x i16> %0, i32 1, i32 %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_insert_h_RES ret void @@ -60,8 +60,8 @@ declare <8 x i16> @llvm.mips.insert.h(<8 x i16>, i32, i32) nounwind define void @llvm_mips_insert_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_insert_w_ARG1 - %1 = load i32* @llvm_mips_insert_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insert_w_ARG1 + %1 = load i32, i32* @llvm_mips_insert_w_ARG3 %2 = tail call <4 x i32> @llvm.mips.insert.w(<4 x i32> %0, i32 1, i32 %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_insert_w_RES ret void @@ -82,8 +82,8 @@ declare <4 x i32> @llvm.mips.insert.w(<4 x i32>, i32, i32) nounwind define void @llvm_mips_insert_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_insert_d_ARG1 - %1 = load i64* @llvm_mips_insert_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insert_d_ARG1 + %1 = load i64, i64* @llvm_mips_insert_d_ARG3 %2 = tail call <2 x i64> @llvm.mips.insert.d(<2 x i64> %0, i32 1, i64 %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_insert_d_RES ret void @@ -110,8 +110,8 @@ declare <2 x i64> @llvm.mips.insert.d(<2 x i64>, i32, i64) nounwind define void @llvm_mips_insve_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_insve_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_insve_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_insve_b_ARG3 %2 = tail call <16 x i8> @llvm.mips.insve.b(<16 x i8> %0, i32 1, <16 x i8> %1) store <16 x i8> %2, <16 x i8>* @llvm_mips_insve_b_RES ret void @@ -136,8 +136,8 @@ declare <16 x i8> @llvm.mips.insve.b(<16 x i8>, i32, <16 x i8>) nounwind define void @llvm_mips_insve_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_insve_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_insve_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_insve_h_ARG3 %2 = tail call <8 x i16> @llvm.mips.insve.h(<8 x i16> %0, i32 1, <8 x i16> %1) store <8 x i16> %2, <8 x i16>* @llvm_mips_insve_h_RES ret void @@ -162,8 +162,8 @@ declare <8 x i16> @llvm.mips.insve.h(<8 x i16>, i32, <8 x i16>) nounwind define void @llvm_mips_insve_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_insve_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_insve_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_insve_w_ARG3 %2 = tail call <4 x i32> @llvm.mips.insve.w(<4 x i32> %0, i32 1, <4 x i32> %1) store <4 x i32> %2, <4 x i32>* @llvm_mips_insve_w_RES ret void @@ -188,8 +188,8 @@ declare <4 x i32> @llvm.mips.insve.w(<4 x i32>, i32, <4 x i32>) nounwind define void @llvm_mips_insve_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_insve_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_insve_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_insve_d_ARG3 %2 = tail call <2 x i64> @llvm.mips.insve.d(<2 x i64> %0, i32 1, <2 x i64> %1) store <2 x i64> %2, <2 x i64>* @llvm_mips_insve_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/elm_move.ll b/llvm/test/CodeGen/Mips/msa/elm_move.ll index 98c06c732c3..9665b6d688f 100644 --- a/llvm/test/CodeGen/Mips/msa/elm_move.ll +++ b/llvm/test/CodeGen/Mips/msa/elm_move.ll @@ -9,7 +9,7 @@ define void @llvm_mips_move_vb_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_move_vb_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_move_vb_ARG1 %1 = tail call <16 x i8> @llvm.mips.move.v(<16 x i8> %0) store <16 x i8> %1, <16 x i8>* @llvm_mips_move_vb_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll b/llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll index 00a6544b120..87f15f1a8c9 100644 --- a/llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll +++ b/llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll @@ -10,8 +10,8 @@ define void @llvm_mips_sldi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_sldi_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_sldi_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_sldi_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.sldi.b(<16 x i8> %0, <16 x i8> %1, i32 1) store <16 x i8> %2, <16 x i8>* @llvm_mips_sldi_b_RES ret void @@ -31,8 +31,8 @@ declare <16 x i8> @llvm.mips.sldi.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_sldi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_sldi_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_sldi_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_sldi_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.sldi.h(<8 x i16> %0, <8 x i16> %1, i32 1) store <8 x i16> %2, <8 x i16>* @llvm_mips_sldi_h_RES ret void @@ -52,8 +52,8 @@ declare <8 x i16> @llvm.mips.sldi.h(<8 x i16>, <8 x i16>, i32) nounwind define void @llvm_mips_sldi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_sldi_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_sldi_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_sldi_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.sldi.w(<4 x i32> %0, <4 x i32> %1, i32 1) store <4 x i32> %2, <4 x i32>* @llvm_mips_sldi_w_RES ret void @@ -73,8 +73,8 @@ declare <4 x i32> @llvm.mips.sldi.w(<4 x i32>, <4 x i32>, i32) nounwind define void @llvm_mips_sldi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_sldi_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_sldi_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_sldi_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.sldi.d(<2 x i64> %0, <2 x i64> %1, i32 1) store <2 x i64> %2, <2 x i64>* @llvm_mips_sldi_d_RES ret void @@ -93,7 +93,7 @@ declare <2 x i64> @llvm.mips.sldi.d(<2 x i64>, <2 x i64>, i32) nounwind define void @llvm_mips_splati_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_splati_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_splati_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.splati.b(<16 x i8> %0, i32 1) store <16 x i8> %1, <16 x i8>* @llvm_mips_splati_b_RES ret void @@ -112,7 +112,7 @@ declare <16 x i8> @llvm.mips.splati.b(<16 x i8>, i32) nounwind define void @llvm_mips_splati_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_splati_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_splati_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.splati.h(<8 x i16> %0, i32 1) store <8 x i16> %1, <8 x i16>* @llvm_mips_splati_h_RES ret void @@ -131,7 +131,7 @@ declare <8 x i16> @llvm.mips.splati.h(<8 x i16>, i32) nounwind define void @llvm_mips_splati_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_splati_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_splati_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.splati.w(<4 x i32> %0, i32 1) store <4 x i32> %1, <4 x i32>* @llvm_mips_splati_w_RES ret void @@ -150,7 +150,7 @@ declare <4 x i32> @llvm.mips.splati.w(<4 x i32>, i32) nounwind define void @llvm_mips_splati_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_splati_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_splati_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.splati.d(<2 x i64> %0, i32 1) store <2 x i64> %1, <2 x i64>* @llvm_mips_splati_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/frameindex.ll b/llvm/test/CodeGen/Mips/msa/frameindex.ll index 3c0119008ce..afd28ae184d 100644 --- a/llvm/test/CodeGen/Mips/msa/frameindex.ll +++ b/llvm/test/CodeGen/Mips/msa/frameindex.ll @@ -5,7 +5,7 @@ define void @loadstore_v16i8_near() nounwind { ; MIPS32-AE: loadstore_v16i8_near: %1 = alloca <16 x i8> - %2 = load volatile <16 x i8>* %1 + %2 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0($sp) store volatile <16 x i8> %2, <16 x i8>* %1 ; MIPS32-AE: st.b [[R1]], 0($sp) @@ -20,7 +20,7 @@ define void @loadstore_v16i8_just_under_simm10() nounwind { %1 = alloca <16 x i8> %2 = alloca [496 x i8] ; Push the frame right up to 512 bytes - %3 = load volatile <16 x i8>* %1 + %3 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 496($sp) store volatile <16 x i8> %3, <16 x i8>* %1 ; MIPS32-AE: st.b [[R1]], 496($sp) @@ -35,7 +35,7 @@ define void @loadstore_v16i8_just_over_simm10() nounwind { %1 = alloca <16 x i8> %2 = alloca [497 x i8] ; Push the frame just over 512 bytes - %3 = load volatile <16 x i8>* %1 + %3 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512 ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <16 x i8> %3, <16 x i8>* %1 @@ -52,7 +52,7 @@ define void @loadstore_v16i8_just_under_simm16() nounwind { %1 = alloca <16 x i8> %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes - %3 = load volatile <16 x i8>* %1 + %3 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -71,7 +71,7 @@ define void @loadstore_v16i8_just_over_simm16() nounwind { %1 = alloca <16 x i8> %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes - %3 = load volatile <16 x i8>* %1 + %3 = load volatile <16 x i8>, <16 x i8>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.b [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -88,7 +88,7 @@ define void @loadstore_v8i16_near() nounwind { ; MIPS32-AE: loadstore_v8i16_near: %1 = alloca <8 x i16> - %2 = load volatile <8 x i16>* %1 + %2 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0($sp) store volatile <8 x i16> %2, <8 x i16>* %1 ; MIPS32-AE: st.h [[R1]], 0($sp) @@ -106,7 +106,7 @@ define void @loadstore_v8i16_unaligned() nounwind { %4 = bitcast i8* %3 to [2 x <8 x i16>]* %5 = getelementptr [2 x <8 x i16>], [2 x <8 x i16>]* %4, i32 0, i32 0 - %6 = load volatile <8 x i16>* %5 + %6 = load volatile <8 x i16>, <8 x i16>* %5 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1 ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <8 x i16> %6, <8 x i16>* %5 @@ -123,7 +123,7 @@ define void @loadstore_v8i16_just_under_simm10() nounwind { %1 = alloca <8 x i16> %2 = alloca [1008 x i8] ; Push the frame right up to 1024 bytes - %3 = load volatile <8 x i16>* %1 + %3 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 1008($sp) store volatile <8 x i16> %3, <8 x i16>* %1 ; MIPS32-AE: st.h [[R1]], 1008($sp) @@ -138,7 +138,7 @@ define void @loadstore_v8i16_just_over_simm10() nounwind { %1 = alloca <8 x i16> %2 = alloca [1009 x i8] ; Push the frame just over 1024 bytes - %3 = load volatile <8 x i16>* %1 + %3 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024 ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <8 x i16> %3, <8 x i16>* %1 @@ -155,7 +155,7 @@ define void @loadstore_v8i16_just_under_simm16() nounwind { %1 = alloca <8 x i16> %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes - %3 = load volatile <8 x i16>* %1 + %3 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -174,7 +174,7 @@ define void @loadstore_v8i16_just_over_simm16() nounwind { %1 = alloca <8 x i16> %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes - %3 = load volatile <8 x i16>* %1 + %3 = load volatile <8 x i16>, <8 x i16>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.h [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -191,7 +191,7 @@ define void @loadstore_v4i32_near() nounwind { ; MIPS32-AE: loadstore_v4i32_near: %1 = alloca <4 x i32> - %2 = load volatile <4 x i32>* %1 + %2 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0($sp) store volatile <4 x i32> %2, <4 x i32>* %1 ; MIPS32-AE: st.w [[R1]], 0($sp) @@ -209,7 +209,7 @@ define void @loadstore_v4i32_unaligned() nounwind { %4 = bitcast i8* %3 to [2 x <4 x i32>]* %5 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %4, i32 0, i32 0 - %6 = load volatile <4 x i32>* %5 + %6 = load volatile <4 x i32>, <4 x i32>* %5 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1 ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <4 x i32> %6, <4 x i32>* %5 @@ -226,7 +226,7 @@ define void @loadstore_v4i32_just_under_simm10() nounwind { %1 = alloca <4 x i32> %2 = alloca [2032 x i8] ; Push the frame right up to 2048 bytes - %3 = load volatile <4 x i32>* %1 + %3 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 2032($sp) store volatile <4 x i32> %3, <4 x i32>* %1 ; MIPS32-AE: st.w [[R1]], 2032($sp) @@ -241,7 +241,7 @@ define void @loadstore_v4i32_just_over_simm10() nounwind { %1 = alloca <4 x i32> %2 = alloca [2033 x i8] ; Push the frame just over 2048 bytes - %3 = load volatile <4 x i32>* %1 + %3 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048 ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <4 x i32> %3, <4 x i32>* %1 @@ -258,7 +258,7 @@ define void @loadstore_v4i32_just_under_simm16() nounwind { %1 = alloca <4 x i32> %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes - %3 = load volatile <4 x i32>* %1 + %3 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -277,7 +277,7 @@ define void @loadstore_v4i32_just_over_simm16() nounwind { %1 = alloca <4 x i32> %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes - %3 = load volatile <4 x i32>* %1 + %3 = load volatile <4 x i32>, <4 x i32>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.w [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -294,7 +294,7 @@ define void @loadstore_v2i64_near() nounwind { ; MIPS32-AE: loadstore_v2i64_near: %1 = alloca <2 x i64> - %2 = load volatile <2 x i64>* %1 + %2 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0($sp) store volatile <2 x i64> %2, <2 x i64>* %1 ; MIPS32-AE: st.d [[R1]], 0($sp) @@ -312,7 +312,7 @@ define void @loadstore_v2i64_unaligned() nounwind { %4 = bitcast i8* %3 to [2 x <2 x i64>]* %5 = getelementptr [2 x <2 x i64>], [2 x <2 x i64>]* %4, i32 0, i32 0 - %6 = load volatile <2 x i64>* %5 + %6 = load volatile <2 x i64>, <2 x i64>* %5 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1 ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <2 x i64> %6, <2 x i64>* %5 @@ -329,7 +329,7 @@ define void @loadstore_v2i64_just_under_simm10() nounwind { %1 = alloca <2 x i64> %2 = alloca [4080 x i8] ; Push the frame right up to 4096 bytes - %3 = load volatile <2 x i64>* %1 + %3 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 4080($sp) store volatile <2 x i64> %3, <2 x i64>* %1 ; MIPS32-AE: st.d [[R1]], 4080($sp) @@ -344,7 +344,7 @@ define void @loadstore_v2i64_just_over_simm10() nounwind { %1 = alloca <2 x i64> %2 = alloca [4081 x i8] ; Push the frame just over 4096 bytes - %3 = load volatile <2 x i64>* %1 + %3 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096 ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) store volatile <2 x i64> %3, <2 x i64>* %1 @@ -361,7 +361,7 @@ define void @loadstore_v2i64_just_under_simm16() nounwind { %1 = alloca <2 x i64> %2 = alloca [32752 x i8] ; Push the frame right up to 32768 bytes - %3 = load volatile <2 x i64>* %1 + %3 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) @@ -380,7 +380,7 @@ define void @loadstore_v2i64_just_over_simm16() nounwind { %1 = alloca <2 x i64> %2 = alloca [32753 x i8] ; Push the frame just over 32768 bytes - %3 = load volatile <2 x i64>* %1 + %3 = load volatile <2 x i64>, <2 x i64>* %1 ; MIPS32-AE: ori [[R2:\$([0-9]+|gp)]], $zero, 32768 ; MIPS32-AE: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]] ; MIPS32-AE: ld.d [[R1:\$w[0-9]+]], 0([[BASE]]) diff --git a/llvm/test/CodeGen/Mips/msa/i10.ll b/llvm/test/CodeGen/Mips/msa/i10.ll index c5a96174a73..204884bbf02 100644 --- a/llvm/test/CodeGen/Mips/msa/i10.ll +++ b/llvm/test/CodeGen/Mips/msa/i10.ll @@ -7,7 +7,7 @@ define i32 @llvm_mips_bnz_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bnz_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_b_ARG1 %1 = tail call i32 @llvm.mips.bnz.b(<16 x i8> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false @@ -28,7 +28,7 @@ declare i32 @llvm.mips.bnz.b(<16 x i8>) nounwind define i32 @llvm_mips_bnz_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bnz_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnz_h_ARG1 %1 = tail call i32 @llvm.mips.bnz.h(<8 x i16> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false @@ -49,7 +49,7 @@ declare i32 @llvm.mips.bnz.h(<8 x i16>) nounwind define i32 @llvm_mips_bnz_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bnz_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnz_w_ARG1 %1 = tail call i32 @llvm.mips.bnz.w(<4 x i32> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false @@ -70,7 +70,7 @@ declare i32 @llvm.mips.bnz.w(<4 x i32>) nounwind define i32 @llvm_mips_bnz_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bnz_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnz_d_ARG1 %1 = tail call i32 @llvm.mips.bnz.d(<2 x i64> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false diff --git a/llvm/test/CodeGen/Mips/msa/i5-a.ll b/llvm/test/CodeGen/Mips/msa/i5-a.ll index 0b507208f42..f9486b17e0a 100644 --- a/llvm/test/CodeGen/Mips/msa/i5-a.ll +++ b/llvm/test/CodeGen/Mips/msa/i5-a.ll @@ -9,7 +9,7 @@ define void @llvm_mips_addvi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_addvi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_addvi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.addvi.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_addvi_b_RES ret void @@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32) nounwind define void @llvm_mips_addvi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_addvi_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_addvi_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.addvi.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_addvi_h_RES ret void @@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32) nounwind define void @llvm_mips_addvi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_addvi_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_addvi_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.addvi.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_addvi_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32) nounwind define void @llvm_mips_addvi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_addvi_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_addvi_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.addvi.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_addvi_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/i5-b.ll b/llvm/test/CodeGen/Mips/msa/i5-b.ll index da6be669f0d..40ab095f680 100644 --- a/llvm/test/CodeGen/Mips/msa/i5-b.ll +++ b/llvm/test/CodeGen/Mips/msa/i5-b.ll @@ -9,7 +9,7 @@ define void @llvm_mips_bclri_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bclri_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bclri_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.bclri.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_bclri_b_RES ret void @@ -29,7 +29,7 @@ declare <16 x i8> @llvm.mips.bclri.b(<16 x i8>, i32) nounwind define void @llvm_mips_bclri_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bclri_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bclri_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.bclri.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_bclri_h_RES ret void @@ -48,7 +48,7 @@ declare <8 x i16> @llvm.mips.bclri.h(<8 x i16>, i32) nounwind define void @llvm_mips_bclri_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bclri_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bclri_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.bclri.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_bclri_w_RES ret void @@ -67,7 +67,7 @@ declare <4 x i32> @llvm.mips.bclri.w(<4 x i32>, i32) nounwind define void @llvm_mips_bclri_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bclri_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bclri_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.bclri.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_bclri_d_RES ret void @@ -87,8 +87,8 @@ declare <2 x i64> @llvm.mips.bclri.d(<2 x i64>, i32) nounwind define void @llvm_mips_binsli_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_binsli_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_binsli_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsli_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.binsli.b(<16 x i8> %0, <16 x i8> %1, i32 7) store <16 x i8> %2, <16 x i8>* @llvm_mips_binsli_b_RES ret void @@ -112,8 +112,8 @@ declare <16 x i8> @llvm.mips.binsli.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_binsli_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_binsli_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_binsli_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsli_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.binsli.h(<8 x i16> %0, <8 x i16> %1, i32 7) store <8 x i16> %2, <8 x i16>* @llvm_mips_binsli_h_RES ret void @@ -137,8 +137,8 @@ declare <8 x i16> @llvm.mips.binsli.h(<8 x i16>, <8 x i16>, i32) nounwind define void @llvm_mips_binsli_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_binsli_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_binsli_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsli_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.binsli.w(<4 x i32> %0, <4 x i32> %1, i32 7) store <4 x i32> %2, <4 x i32>* @llvm_mips_binsli_w_RES ret void @@ -162,8 +162,8 @@ declare <4 x i32> @llvm.mips.binsli.w(<4 x i32>, <4 x i32>, i32) nounwind define void @llvm_mips_binsli_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_binsli_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_binsli_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsli_d_ARG2 ; TODO: We use a particularly wide mask here to work around a legalization ; issue. If the mask doesn't fit within a 10-bit immediate, it gets ; legalized into a constant pool. We should add a test to cover the @@ -191,8 +191,8 @@ declare <2 x i64> @llvm.mips.binsli.d(<2 x i64>, <2 x i64>, i32) nounwind define void @llvm_mips_binsri_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_binsri_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_binsri_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_binsri_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.binsri.b(<16 x i8> %0, <16 x i8> %1, i32 7) store <16 x i8> %2, <16 x i8>* @llvm_mips_binsri_b_RES ret void @@ -216,8 +216,8 @@ declare <16 x i8> @llvm.mips.binsri.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_binsri_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_binsri_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_binsri_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_binsri_h_ARG2 %2 = tail call <8 x i16> @llvm.mips.binsri.h(<8 x i16> %0, <8 x i16> %1, i32 7) store <8 x i16> %2, <8 x i16>* @llvm_mips_binsri_h_RES ret void @@ -241,8 +241,8 @@ declare <8 x i16> @llvm.mips.binsri.h(<8 x i16>, <8 x i16>, i32) nounwind define void @llvm_mips_binsri_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_binsri_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_binsri_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_binsri_w_ARG2 %2 = tail call <4 x i32> @llvm.mips.binsri.w(<4 x i32> %0, <4 x i32> %1, i32 7) store <4 x i32> %2, <4 x i32>* @llvm_mips_binsri_w_RES ret void @@ -266,8 +266,8 @@ declare <4 x i32> @llvm.mips.binsri.w(<4 x i32>, <4 x i32>, i32) nounwind define void @llvm_mips_binsri_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_binsri_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_binsri_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_binsri_d_ARG2 %2 = tail call <2 x i64> @llvm.mips.binsri.d(<2 x i64> %0, <2 x i64> %1, i32 7) store <2 x i64> %2, <2 x i64>* @llvm_mips_binsri_d_RES ret void @@ -290,7 +290,7 @@ declare <2 x i64> @llvm.mips.binsri.d(<2 x i64>, <2 x i64>, i32) nounwind define void @llvm_mips_bnegi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bnegi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnegi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.bnegi.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_bnegi_b_RES ret void @@ -309,7 +309,7 @@ declare <16 x i8> @llvm.mips.bnegi.b(<16 x i8>, i32) nounwind define void @llvm_mips_bnegi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bnegi_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bnegi_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.bnegi.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_bnegi_h_RES ret void @@ -328,7 +328,7 @@ declare <8 x i16> @llvm.mips.bnegi.h(<8 x i16>, i32) nounwind define void @llvm_mips_bnegi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bnegi_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bnegi_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.bnegi.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_bnegi_w_RES ret void @@ -347,7 +347,7 @@ declare <4 x i32> @llvm.mips.bnegi.w(<4 x i32>, i32) nounwind define void @llvm_mips_bnegi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bnegi_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bnegi_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.bnegi.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_bnegi_d_RES ret void @@ -366,7 +366,7 @@ declare <2 x i64> @llvm.mips.bnegi.d(<2 x i64>, i32) nounwind define void @llvm_mips_bseti_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bseti_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseti_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.bseti.b(<16 x i8> %0, i32 7) store <16 x i8> %1, <16 x i8>* @llvm_mips_bseti_b_RES ret void @@ -385,7 +385,7 @@ declare <16 x i8> @llvm.mips.bseti.b(<16 x i8>, i32) nounwind define void @llvm_mips_bseti_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bseti_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bseti_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.bseti.h(<8 x i16> %0, i32 7) store <8 x i16> %1, <8 x i16>* @llvm_mips_bseti_h_RES ret void @@ -404,7 +404,7 @@ declare <8 x i16> @llvm.mips.bseti.h(<8 x i16>, i32) nounwind define void @llvm_mips_bseti_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bseti_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bseti_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.bseti.w(<4 x i32> %0, i32 7) store <4 x i32> %1, <4 x i32>* @llvm_mips_bseti_w_RES ret void @@ -423,7 +423,7 @@ declare <4 x i32> @llvm.mips.bseti.w(<4 x i32>, i32) nounwind define void @llvm_mips_bseti_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bseti_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bseti_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.bseti.d(<2 x i64> %0, i32 7) store <2 x i64> %1, <2 x i64>* @llvm_mips_bseti_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/i5-c.ll b/llvm/test/CodeGen/Mips/msa/i5-c.ll index bf1578f30f3..815825013ea 100644 --- a/llvm/test/CodeGen/Mips/msa/i5-c.ll +++ b/llvm/test/CodeGen/Mips/msa/i5-c.ll @@ -9,7 +9,7 @@ define void @llvm_mips_ceqi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ceqi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ceqi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.ceqi.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_ceqi_b_RES ret void @@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.ceqi.b(<16 x i8>, i32) nounwind define void @llvm_mips_ceqi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_ceqi_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_ceqi_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.ceqi.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_ceqi_h_RES ret void @@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.ceqi.h(<8 x i16>, i32) nounwind define void @llvm_mips_ceqi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_ceqi_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_ceqi_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.ceqi.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_ceqi_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.ceqi.w(<4 x i32>, i32) nounwind define void @llvm_mips_ceqi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_ceqi_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_ceqi_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.ceqi.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_ceqi_d_RES ret void @@ -85,7 +85,7 @@ declare <2 x i64> @llvm.mips.ceqi.d(<2 x i64>, i32) nounwind define void @llvm_mips_clei_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clei_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.clei.s.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_s_b_RES ret void @@ -104,7 +104,7 @@ declare <16 x i8> @llvm.mips.clei.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_clei_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clei_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.clei.s.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_s_h_RES ret void @@ -123,7 +123,7 @@ declare <8 x i16> @llvm.mips.clei.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_clei_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clei_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.clei.s.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_s_w_RES ret void @@ -142,7 +142,7 @@ declare <4 x i32> @llvm.mips.clei.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_clei_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clei_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.clei.s.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_s_d_RES ret void @@ -161,7 +161,7 @@ declare <2 x i64> @llvm.mips.clei.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_clei_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clei_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clei_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.clei.u.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_clei_u_b_RES ret void @@ -180,7 +180,7 @@ declare <16 x i8> @llvm.mips.clei.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_clei_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clei_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clei_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.clei.u.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_clei_u_h_RES ret void @@ -199,7 +199,7 @@ declare <8 x i16> @llvm.mips.clei.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_clei_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clei_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clei_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.clei.u.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_clei_u_w_RES ret void @@ -218,7 +218,7 @@ declare <4 x i32> @llvm.mips.clei.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_clei_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clei_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clei_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.clei.u.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_clei_u_d_RES ret void @@ -237,7 +237,7 @@ declare <2 x i64> @llvm.mips.clei.u.d(<2 x i64>, i32) nounwind define void @llvm_mips_clti_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clti_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.clti.s.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_s_b_RES ret void @@ -256,7 +256,7 @@ declare <16 x i8> @llvm.mips.clti.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_clti_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clti_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.clti.s.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_s_h_RES ret void @@ -275,7 +275,7 @@ declare <8 x i16> @llvm.mips.clti.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_clti_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clti_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.clti.s.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_s_w_RES ret void @@ -294,7 +294,7 @@ declare <4 x i32> @llvm.mips.clti.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_clti_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clti_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.clti.s.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_s_d_RES ret void @@ -313,7 +313,7 @@ declare <2 x i64> @llvm.mips.clti.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_clti_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_clti_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_clti_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.clti.u.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_clti_u_b_RES ret void @@ -332,7 +332,7 @@ declare <16 x i8> @llvm.mips.clti.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_clti_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_clti_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_clti_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.clti.u.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_clti_u_h_RES ret void @@ -351,7 +351,7 @@ declare <8 x i16> @llvm.mips.clti.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_clti_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_clti_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_clti_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.clti.u.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_clti_u_w_RES ret void @@ -370,7 +370,7 @@ declare <4 x i32> @llvm.mips.clti.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_clti_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_clti_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_clti_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.clti.u.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_clti_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/i5-m.ll b/llvm/test/CodeGen/Mips/msa/i5-m.ll index 27663494324..ba6e9d2384a 100644 --- a/llvm/test/CodeGen/Mips/msa/i5-m.ll +++ b/llvm/test/CodeGen/Mips/msa/i5-m.ll @@ -9,7 +9,7 @@ define void @llvm_mips_maxi_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_maxi_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.maxi.s.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_s_b_RES ret void @@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.maxi.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_maxi_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_maxi_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.maxi.s.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_s_h_RES ret void @@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.maxi.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_maxi_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_maxi_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.maxi.s.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_s_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.maxi.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_maxi_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_maxi_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.maxi.s.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_s_d_RES ret void @@ -85,7 +85,7 @@ declare <2 x i64> @llvm.mips.maxi.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_maxi_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_maxi_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_maxi_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.maxi.u.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_maxi_u_b_RES ret void @@ -104,7 +104,7 @@ declare <16 x i8> @llvm.mips.maxi.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_maxi_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_maxi_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_maxi_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.maxi.u.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_maxi_u_h_RES ret void @@ -123,7 +123,7 @@ declare <8 x i16> @llvm.mips.maxi.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_maxi_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_maxi_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_maxi_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.maxi.u.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_maxi_u_w_RES ret void @@ -142,7 +142,7 @@ declare <4 x i32> @llvm.mips.maxi.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_maxi_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_maxi_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_maxi_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.maxi.u.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_maxi_u_d_RES ret void @@ -161,7 +161,7 @@ declare <2 x i64> @llvm.mips.maxi.u.d(<2 x i64>, i32) nounwind define void @llvm_mips_mini_s_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mini_s_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_s_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.mini.s.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_s_b_RES ret void @@ -180,7 +180,7 @@ declare <16 x i8> @llvm.mips.mini.s.b(<16 x i8>, i32) nounwind define void @llvm_mips_mini_s_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mini_s_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_s_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.mini.s.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_s_h_RES ret void @@ -199,7 +199,7 @@ declare <8 x i16> @llvm.mips.mini.s.h(<8 x i16>, i32) nounwind define void @llvm_mips_mini_s_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mini_s_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_s_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.mini.s.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_s_w_RES ret void @@ -218,7 +218,7 @@ declare <4 x i32> @llvm.mips.mini.s.w(<4 x i32>, i32) nounwind define void @llvm_mips_mini_s_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mini_s_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_s_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.mini.s.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_s_d_RES ret void @@ -237,7 +237,7 @@ declare <2 x i64> @llvm.mips.mini.s.d(<2 x i64>, i32) nounwind define void @llvm_mips_mini_u_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_mini_u_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_mini_u_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.mini.u.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_mini_u_b_RES ret void @@ -256,7 +256,7 @@ declare <16 x i8> @llvm.mips.mini.u.b(<16 x i8>, i32) nounwind define void @llvm_mips_mini_u_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_mini_u_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_mini_u_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.mini.u.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_mini_u_h_RES ret void @@ -275,7 +275,7 @@ declare <8 x i16> @llvm.mips.mini.u.h(<8 x i16>, i32) nounwind define void @llvm_mips_mini_u_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_mini_u_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_mini_u_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.mini.u.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_mini_u_w_RES ret void @@ -294,7 +294,7 @@ declare <4 x i32> @llvm.mips.mini.u.w(<4 x i32>, i32) nounwind define void @llvm_mips_mini_u_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_mini_u_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_mini_u_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.mini.u.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_mini_u_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/i5-s.ll b/llvm/test/CodeGen/Mips/msa/i5-s.ll index 184172f63b8..db331b1476c 100644 --- a/llvm/test/CodeGen/Mips/msa/i5-s.ll +++ b/llvm/test/CodeGen/Mips/msa/i5-s.ll @@ -9,7 +9,7 @@ define void @llvm_mips_subvi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_subvi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_subvi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.subvi.b(<16 x i8> %0, i32 14) store <16 x i8> %1, <16 x i8>* @llvm_mips_subvi_b_RES ret void @@ -28,7 +28,7 @@ declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32) nounwind define void @llvm_mips_subvi_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_subvi_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_subvi_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.subvi.h(<8 x i16> %0, i32 14) store <8 x i16> %1, <8 x i16>* @llvm_mips_subvi_h_RES ret void @@ -47,7 +47,7 @@ declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32) nounwind define void @llvm_mips_subvi_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_subvi_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_subvi_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.subvi.w(<4 x i32> %0, i32 14) store <4 x i32> %1, <4 x i32>* @llvm_mips_subvi_w_RES ret void @@ -66,7 +66,7 @@ declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32) nounwind define void @llvm_mips_subvi_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_subvi_d_ARG1 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_subvi_d_ARG1 %1 = tail call <2 x i64> @llvm.mips.subvi.d(<2 x i64> %0, i32 14) store <2 x i64> %1, <2 x i64>* @llvm_mips_subvi_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll b/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll index 7cc55f2904b..991bb8436b3 100644 --- a/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll +++ b/llvm/test/CodeGen/Mips/msa/i5_ld_st.ll @@ -81,7 +81,7 @@ declare <2 x i64> @llvm.mips.ld.d(i8*, i32) nounwind define void @llvm_mips_st_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_st_b_ARG + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_st_b_ARG %1 = bitcast <16 x i8>* @llvm_mips_st_b_RES to i8* tail call void @llvm.mips.st.b(<16 x i8> %0, i8* %1, i32 16) ret void @@ -99,7 +99,7 @@ declare void @llvm.mips.st.b(<16 x i8>, i8*, i32) nounwind define void @llvm_mips_st_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_st_h_ARG + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_st_h_ARG %1 = bitcast <8 x i16>* @llvm_mips_st_h_RES to i8* tail call void @llvm.mips.st.h(<8 x i16> %0, i8* %1, i32 16) ret void @@ -117,7 +117,7 @@ declare void @llvm.mips.st.h(<8 x i16>, i8*, i32) nounwind define void @llvm_mips_st_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_st_w_ARG + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_st_w_ARG %1 = bitcast <4 x i32>* @llvm_mips_st_w_RES to i8* tail call void @llvm.mips.st.w(<4 x i32> %0, i8* %1, i32 16) ret void @@ -135,7 +135,7 @@ declare void @llvm.mips.st.w(<4 x i32>, i8*, i32) nounwind define void @llvm_mips_st_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_st_d_ARG + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_st_d_ARG %1 = bitcast <2 x i64>* @llvm_mips_st_d_RES to i8* tail call void @llvm.mips.st.d(<2 x i64> %0, i8* %1, i32 16) ret void diff --git a/llvm/test/CodeGen/Mips/msa/i8.ll b/llvm/test/CodeGen/Mips/msa/i8.ll index d2931a72fea..4af9c588fde 100644 --- a/llvm/test/CodeGen/Mips/msa/i8.ll +++ b/llvm/test/CodeGen/Mips/msa/i8.ll @@ -8,7 +8,7 @@ define void @llvm_mips_andi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_andi_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_andi_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.andi.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_andi_b_RES ret void @@ -28,8 +28,8 @@ declare <16 x i8> @llvm.mips.andi.b(<16 x i8>, i32) nounwind define void @llvm_mips_bmnzi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bmnzi_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnzi_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bmnzi.b(<16 x i8> %0, <16 x i8> %1, i32 25) store <16 x i8> %2, <16 x i8>* @llvm_mips_bmnzi_b_RES ret void @@ -52,8 +52,8 @@ declare <16 x i8> @llvm.mips.bmnzi.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_bmzi_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bmzi_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bmzi_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmzi_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bmzi.b(<16 x i8> %0, <16 x i8> %1, i32 25) store <16 x i8> %2, <16 x i8>* @llvm_mips_bmzi_b_RES ret void @@ -77,8 +77,8 @@ declare <16 x i8> @llvm.mips.bmzi.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_bseli_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bseli_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bseli_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bseli_b_ARG2 %2 = tail call <16 x i8> @llvm.mips.bseli.b(<16 x i8> %0, <16 x i8> %1, i32 25) store <16 x i8> %2, <16 x i8>* @llvm_mips_bseli_b_RES ret void @@ -100,7 +100,7 @@ declare <16 x i8> @llvm.mips.bseli.b(<16 x i8>, <16 x i8>, i32) nounwind define void @llvm_mips_nori_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_nori_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nori_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.nori.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_nori_b_RES ret void @@ -119,7 +119,7 @@ declare <16 x i8> @llvm.mips.nori.b(<16 x i8>, i32) nounwind define void @llvm_mips_ori_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_ori_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_ori_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.ori.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_ori_b_RES ret void @@ -138,7 +138,7 @@ declare <16 x i8> @llvm.mips.ori.b(<16 x i8>, i32) nounwind define void @llvm_mips_shf_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_shf_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_shf_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.shf.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_shf_b_RES ret void @@ -157,7 +157,7 @@ declare <16 x i8> @llvm.mips.shf.b(<16 x i8>, i32) nounwind define void @llvm_mips_shf_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_shf_h_ARG1 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_shf_h_ARG1 %1 = tail call <8 x i16> @llvm.mips.shf.h(<8 x i16> %0, i32 25) store <8 x i16> %1, <8 x i16>* @llvm_mips_shf_h_RES ret void @@ -176,7 +176,7 @@ declare <8 x i16> @llvm.mips.shf.h(<8 x i16>, i32) nounwind define void @llvm_mips_shf_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_shf_w_ARG1 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_shf_w_ARG1 %1 = tail call <4 x i32> @llvm.mips.shf.w(<4 x i32> %0, i32 25) store <4 x i32> %1, <4 x i32>* @llvm_mips_shf_w_RES ret void @@ -195,7 +195,7 @@ declare <4 x i32> @llvm.mips.shf.w(<4 x i32>, i32) nounwind define void @llvm_mips_xori_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_xori_b_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xori_b_ARG1 %1 = tail call <16 x i8> @llvm.mips.xori.b(<16 x i8> %0, i32 25) store <16 x i8> %1, <16 x i8>* @llvm_mips_xori_b_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/inline-asm.ll b/llvm/test/CodeGen/Mips/msa/inline-asm.ll index 4a34273f3c0..85da87b6f8a 100644 --- a/llvm/test/CodeGen/Mips/msa/inline-asm.ll +++ b/llvm/test/CodeGen/Mips/msa/inline-asm.ll @@ -16,7 +16,7 @@ entry: define void @test2() nounwind { entry: ; CHECK-LABEL: test2: - %0 = load <4 x i32>* @v4i32_r + %0 = load <4 x i32>, <4 x i32>* @v4i32_r %1 = call <4 x i32> asm "addvi.w ${0:w}, ${1:w}, 1", "=f,f"(<4 x i32> %0) ; CHECK: addvi.w $w{{[1-3]?[0-9]}}, $w{{[1-3]?[0-9]}}, 1 store <4 x i32> %1, <4 x i32>* @v4i32_r @@ -26,7 +26,7 @@ entry: define void @test3() nounwind { entry: ; CHECK-LABEL: test3: - %0 = load <4 x i32>* @v4i32_r + %0 = load <4 x i32>, <4 x i32>* @v4i32_r %1 = call <4 x i32> asm sideeffect "addvi.w ${0:w}, ${1:w}, 1", "=f,f,~{$w0}"(<4 x i32> %0) ; CHECK: addvi.w $w{{([1-9]|[1-3][0-9])}}, $w{{([1-9]|[1-3][0-9])}}, 1 store <4 x i32> %1, <4 x i32>* @v4i32_r diff --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll index 4beaaa9c184..beb361bc9f3 100644 --- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll +++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca <1 x double> %A1 = alloca double %A = alloca i32 - %L = load i8* %0 + %L = load i8, i8* %0 store i8 77, i8* %0 %E = extractelement <8 x i64> zeroinitializer, i32 2 %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> @@ -24,7 +24,7 @@ BB: br label %CF CF: ; preds = %CF, %CF78, %BB - %L5 = load i8* %Sl + %L5 = load i8, i8* %Sl store i8 %L, i8* %Sl %E6 = extractelement <8 x i32> zeroinitializer, i32 2 %Shuff7 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> @@ -33,7 +33,7 @@ CF: ; preds = %CF, %CF78, %BB %FC = sitofp <8 x i64> zeroinitializer to <8 x float> %Sl9 = select i1 %Cmp, i8 77, i8 77 %Cmp10 = icmp uge <8 x i64> %Shuff, zeroinitializer - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 %Sl9, i8* %0 %E12 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> @@ -42,7 +42,7 @@ CF: ; preds = %CF, %CF78, %BB %Tr = trunc <8 x i64> %Shuff to <8 x i32> %Sl16 = select i1 %Cmp, i8 77, i8 %5 %Cmp17 = icmp ult <8 x i1> %Cmp10, %Cmp10 - %L18 = load i8* %Sl + %L18 = load i8, i8* %Sl store i8 -1, i8* %Sl %E19 = extractelement <8 x i32> zeroinitializer, i32 3 %Shuff20 = shufflevector <8 x float> %FC, <8 x float> %FC, <8 x i32> @@ -54,7 +54,7 @@ CF: ; preds = %CF, %CF78, %BB br i1 %Cmp25, label %CF, label %CF78 CF78: ; preds = %CF - %L26 = load i8* %Sl + %L26 = load i8, i8* %Sl store i32 50347, i32* %A %E27 = extractelement <8 x i1> %Cmp10, i32 2 br i1 %E27, label %CF, label %CF77 @@ -65,7 +65,7 @@ CF77: ; preds = %CF77, %CF81, %CF78 %B30 = urem <8 x i32> %Tr, zeroinitializer %Tr31 = trunc i32 0 to i16 %Sl32 = select i1 %Cmp, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer - %L33 = load i8* %Sl + %L33 = load i8, i8* %Sl store i8 %L26, i8* %Sl %E34 = extractelement <4 x i32> zeroinitializer, i32 0 %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> %B, <1 x i32> undef @@ -73,7 +73,7 @@ CF77: ; preds = %CF77, %CF81, %CF78 %B37 = srem <1 x i16> %I29, zeroinitializer %FC38 = sitofp <8 x i32> %B30 to <8 x double> %Sl39 = select i1 %Cmp, double 0.000000e+00, double %Sl24 - %L40 = load i8* %Sl + %L40 = load i8, i8* %Sl store i8 %Sl16, i8* %Sl %E41 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff42 = shufflevector <8 x i1> %Cmp17, <8 x i1> %Cmp10, <8 x i32> @@ -85,7 +85,7 @@ CF77: ; preds = %CF77, %CF81, %CF78 br i1 %Cmp46, label %CF77, label %CF80 CF80: ; preds = %CF80, %CF77 - %L47 = load i64* %PC + %L47 = load i64, i64* %PC store i8 77, i8* %Sl %E48 = extractelement <8 x i64> zeroinitializer, i32 2 %Shuff49 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff7, <8 x i32> @@ -97,7 +97,7 @@ CF80: ; preds = %CF80, %CF77 br i1 %Cmp54, label %CF80, label %CF81 CF81: ; preds = %CF80 - %L55 = load i8* %Sl + %L55 = load i8, i8* %Sl store i8 %Sl16, i8* %Sl %E56 = extractelement <1 x i16> %B, i32 0 %Shuff57 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> @@ -105,7 +105,7 @@ CF81: ; preds = %CF80 %B59 = srem i32 %E19, %E19 %Sl60 = select i1 %Cmp, i8 77, i8 77 %Cmp61 = icmp ult <1 x i16> zeroinitializer, %B - %L62 = load i8* %Sl + %L62 = load i8, i8* %Sl store i64 %L47, i64* %PC52 %E63 = extractelement <4 x i32> %I43, i32 2 %Shuff64 = shufflevector <4 x i1> zeroinitializer, <4 x i1> zeroinitializer, <4 x i32> @@ -117,7 +117,7 @@ CF81: ; preds = %CF80 br i1 %Cmp69, label %CF77, label %CF79 CF79: ; preds = %CF81 - %L70 = load i32* %A + %L70 = load i32, i32* %A store i64 %4, i64* %PC %E71 = extractelement <4 x i32> zeroinitializer, i32 0 %Shuff72 = shufflevector <8 x i32> zeroinitializer, <8 x i32> %B44, <8 x i32> diff --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll index f9cab037e7c..bdf6eafdf4e 100644 --- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll +++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca i64 %A1 = alloca i32 %A = alloca <2 x i64> - %L = load i8* %0 + %L = load i8, i8* %0 store i8 -1, i8* %0 %E = extractelement <2 x i32> zeroinitializer, i32 0 %Shuff = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> @@ -22,7 +22,7 @@ BB: %B = lshr i8 %L, -69 %ZE = fpext float 0xBF2AA5FE80000000 to double %Sl = select i1 true, <1 x i64> , <1 x i64> - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 -69, i8* %0 %E6 = extractelement <16 x i64> , i32 14 %Shuff7 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> @@ -31,7 +31,7 @@ BB: %FC = uitofp i32 %3 to double %Sl10 = select i1 true, <1 x i1> zeroinitializer, <1 x i1> zeroinitializer %Cmp = icmp ne <1 x i64> %I, - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 %L11, i8* %0 %E12 = extractelement <1 x i64> , i32 0 %Shuff13 = shufflevector <1 x i64> %Sl, <1 x i64> , <1 x i32> @@ -42,7 +42,7 @@ BB: br label %CF74 CF74: ; preds = %CF74, %CF80, %CF76, %BB - %L18 = load i8* %0 + %L18 = load i8, i8* %0 store i8 -69, i8* %0 %E19 = extractelement <1 x i64> %Sl, i32 0 %Shuff20 = shufflevector <8 x i8> , <8 x i8> , <8 x i32> @@ -50,7 +50,7 @@ CF74: ; preds = %CF74, %CF80, %CF76, %B22 = urem i32 135673, %3 %FC23 = sitofp i8 %L to float %Sl24 = select i1 true, i8 %B, i8 %L18 - %L25 = load i8* %0 + %L25 = load i8, i8* %0 store i8 %L, i8* %0 %E26 = extractelement <2 x i32> %Shuff, i32 1 %Shuff27 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> @@ -62,7 +62,7 @@ CF74: ; preds = %CF74, %CF80, %CF76, br i1 %Cmp31, label %CF74, label %CF80 CF80: ; preds = %CF74 - %L32 = load i8* %0 + %L32 = load i8, i8* %0 store i8 -1, i8* %0 %E33 = extractelement <2 x i32> zeroinitializer, i32 1 %Shuff34 = shufflevector <1 x i64> %Shuff13, <1 x i64> , <1 x i32> zeroinitializer @@ -70,7 +70,7 @@ CF80: ; preds = %CF74 %FC36 = sitofp <1 x i1> %Cmp to <1 x float> %Sl37 = select i1 true, <8 x i8> %Shuff20, <8 x i8> %Cmp38 = icmp sgt <2 x i32> %I21, %Shuff27 - %L39 = load i8* %0 + %L39 = load i8, i8* %0 store i8 %Sl24, i8* %0 %E40 = extractelement <8 x i64> zeroinitializer, i32 1 %Shuff41 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Cmp38, <2 x i32> @@ -81,7 +81,7 @@ CF80: ; preds = %CF74 br i1 %Cmp45, label %CF74, label %CF76 CF76: ; preds = %CF80 - %L46 = load i8* %0 + %L46 = load i8, i8* %0 store i8 %L39, i8* %0 %E47 = extractelement <2 x i32> %Shuff27, i32 0 %Shuff48 = shufflevector <1 x i1> %Sl10, <1 x i1> %Sl10, <1 x i32> @@ -92,7 +92,7 @@ CF76: ; preds = %CF80 br i1 %Cmp52, label %CF74, label %CF75 CF75: ; preds = %CF75, %CF76 - %L53 = load i8* %0 + %L53 = load i8, i8* %0 store i8 %L18, i8* %0 %E54 = extractelement <8 x i8> %Shuff20, i32 5 %Shuff55 = shufflevector <2 x i32> %Shuff, <2 x i32> zeroinitializer, <2 x i32> @@ -103,7 +103,7 @@ CF75: ; preds = %CF75, %CF76 br i1 %Cmp59, label %CF75, label %CF78 CF78: ; preds = %CF75 - %L60 = load i8* %0 + %L60 = load i8, i8* %0 store i8 -69, i8* %0 %E61 = extractelement <2 x i32> zeroinitializer, i32 0 %Shuff62 = shufflevector <2 x i32> %Shuff7, <2 x i32> %I21, <2 x i32> @@ -115,7 +115,7 @@ CF78: ; preds = %CF75 br label %CF CF: ; preds = %CF, %CF78 - %L68 = load i8* %0 + %L68 = load i8, i8* %0 store i64 %B57, i64* %2 %E69 = extractelement <2 x i1> %Shuff41, i32 1 br i1 %E69, label %CF, label %CF77 diff --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll index e14f405320c..8f23a8ca517 100644 --- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll +++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll @@ -13,7 +13,7 @@ BB: %A2 = alloca i8 %A1 = alloca i32 %A = alloca i8 - %L = load i8* %0 + %L = load i8, i8* %0 store i8 %5, i8* %0 %E = extractelement <2 x i16> zeroinitializer, i32 0 %Shuff = shufflevector <1 x i8> , <1 x i8> , <1 x i32> undef @@ -25,7 +25,7 @@ BB: br label %CF83 CF83: ; preds = %BB - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 85, i8* %0 %E6 = extractelement <1 x i8> , i32 0 %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> @@ -37,7 +37,7 @@ CF83: ; preds = %BB br label %CF CF: ; preds = %CF, %CF81, %CF83 - %L13 = load i8* %0 + %L13 = load i8, i8* %0 store i8 0, i8* %0 %E14 = extractelement <2 x i64> zeroinitializer, i32 0 %Shuff15 = shufflevector <4 x i64> , <4 x i64> , <4 x i32> @@ -52,7 +52,7 @@ CF80: ; preds = %CF80, %CF br i1 %Cmp19, label %CF80, label %CF81 CF81: ; preds = %CF80 - %L20 = load i8* %0 + %L20 = load i8, i8* %0 store i8 85, i8* %0 %E21 = extractelement <1 x i8> , i32 0 %Shuff22 = shufflevector <1 x i8> , <1 x i8> %Shuff, <1 x i32> zeroinitializer @@ -60,7 +60,7 @@ CF81: ; preds = %CF80 %FC24 = fptoui <4 x float> %FC to <4 x i16> %Sl25 = select i1 %Cmp, <2 x i32> zeroinitializer, <2 x i32> %Cmp26 = icmp ult <4 x i64> %I16, %Shuff15 - %L27 = load i8* %0 + %L27 = load i8, i8* %0 store i8 %L, i8* %0 %E28 = extractelement <1 x i8> , i32 0 %Shuff29 = shufflevector <8 x i16> zeroinitializer, <8 x i16> zeroinitializer, <8 x i32> @@ -68,7 +68,7 @@ CF81: ; preds = %CF80 %B31 = mul i8 %E28, 85 %PC = bitcast i32* %A3 to i32* %Sl32 = select i1 %Cmp12, float %FC10, float 0x4712BFE680000000 - %L33 = load i32* %PC + %L33 = load i32, i32* %PC store i32 %L33, i32* %PC %E34 = extractelement <2 x i16> zeroinitializer, i32 1 %Shuff35 = shufflevector <1 x i8> %Shuff, <1 x i8> , <1 x i32> zeroinitializer @@ -79,7 +79,7 @@ CF81: ; preds = %CF80 br i1 %Cmp39, label %CF, label %CF77 CF77: ; preds = %CF77, %CF81 - %L40 = load i32* %PC + %L40 = load i32, i32* %PC store i32 %3, i32* %PC %E41 = extractelement <2 x i32> zeroinitializer, i32 0 %Shuff42 = shufflevector <2 x i32> , <2 x i32> zeroinitializer, <2 x i32> @@ -88,7 +88,7 @@ CF77: ; preds = %CF77, %CF81 %Se = sext i32 %3 to i64 %Sl45 = select i1 true, <1 x i8> %Shuff, <1 x i8> %I43 %Cmp46 = icmp sge <1 x i8> %I36, %Shuff - %L47 = load i32* %PC + %L47 = load i32, i32* %PC store i32 %L33, i32* %PC %E48 = extractelement <2 x i16> zeroinitializer, i32 0 %Shuff49 = shufflevector <1 x i8> , <1 x i8> , <1 x i32> @@ -100,7 +100,7 @@ CF77: ; preds = %CF77, %CF81 br i1 %Cmp54, label %CF77, label %CF78 CF78: ; preds = %CF78, %CF77 - %L55 = load i32* %PC + %L55 = load i32, i32* %PC store i32 %L33, i32* %PC %E56 = extractelement <8 x i16> %Shuff29, i32 4 %Shuff57 = shufflevector <1 x i8> , <1 x i8> , <1 x i32> @@ -111,7 +111,7 @@ CF78: ; preds = %CF78, %CF77 br i1 %Cmp60, label %CF78, label %CF79 CF79: ; preds = %CF79, %CF78 - %L61 = load i32* %PC + %L61 = load i32, i32* %PC store i32 %L33, i32* %A3 %E62 = extractelement <4 x i64> %Shuff15, i32 1 %Shuff63 = shufflevector <8 x i16> %Shuff29, <8 x i16> %Shuff29, <8 x i32> @@ -123,7 +123,7 @@ CF79: ; preds = %CF79, %CF78 br i1 %Cmp68, label %CF79, label %CF82 CF82: ; preds = %CF79 - %L69 = load i32* %PC + %L69 = load i32, i32* %PC store i32 %L33, i32* %PC %E70 = extractelement <8 x i16> zeroinitializer, i32 3 %Shuff71 = shufflevector <4 x i64> %Shuff15, <4 x i64> , <4 x i32> diff --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll index 1a03e55d9d5..e3cf7964497 100644 --- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll +++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca i64 %A1 = alloca i64 %A = alloca double - %L = load i8* %0 + %L = load i8, i8* %0 store i8 -101, i8* %0 %E = extractelement <4 x i32> , i32 0 %Shuff = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> @@ -22,7 +22,7 @@ BB: %B = and i64 116376, 57247 %FC = uitofp i8 7 to double %Sl = select i1 false, <8 x i8> , <8 x i8> - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 %L, i8* %0 %E6 = extractelement <4 x i32> , i32 3 %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> @@ -33,7 +33,7 @@ BB: br label %CF CF: ; preds = %CF, %BB - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 -87, i8* %0 %E12 = extractelement <4 x i64> zeroinitializer, i32 0 %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> zeroinitializer, <8 x i32> @@ -45,7 +45,7 @@ CF: ; preds = %CF, %BB br i1 %Cmp18, label %CF, label %CF80 CF80: ; preds = %CF80, %CF88, %CF - %L19 = load i8* %0 + %L19 = load i8, i8* %0 store i8 -101, i8* %0 %E20 = extractelement <4 x i64> zeroinitializer, i32 0 %Shuff21 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff7, <4 x i32> @@ -56,7 +56,7 @@ CF80: ; preds = %CF80, %CF88, %CF br i1 %Cmp25, label %CF80, label %CF83 CF83: ; preds = %CF83, %CF80 - %L26 = load i8* %0 + %L26 = load i8, i8* %0 store i8 -87, i8* %0 %E27 = extractelement <4 x i32> , i32 0 %Shuff28 = shufflevector <4 x i32> , <4 x i32> , <4 x i32> @@ -68,7 +68,7 @@ CF83: ; preds = %CF83, %CF80 br i1 %Cmp33, label %CF83, label %CF88 CF88: ; preds = %CF83 - %L34 = load i8* %0 + %L34 = load i8, i8* %0 store i8 -87, i8* %0 %E35 = extractelement <8 x i64> %Shuff, i32 7 %Shuff36 = shufflevector <4 x i32> , <4 x i32> %Shuff28, <4 x i32> @@ -80,7 +80,7 @@ CF88: ; preds = %CF83 br i1 %Cmp40, label %CF80, label %CF81 CF81: ; preds = %CF81, %CF85, %CF87, %CF88 - %L41 = load i8* %0 + %L41 = load i8, i8* %0 store i8 %L34, i8* %0 %E42 = extractelement <8 x i64> %Shuff13, i32 6 %Shuff43 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> @@ -92,7 +92,7 @@ CF81: ; preds = %CF81, %CF85, %CF87, br i1 %Cmp47, label %CF81, label %CF85 CF85: ; preds = %CF81 - %L48 = load i8* %0 + %L48 = load i8, i8* %0 store i8 -101, i8* %0 %E49 = extractelement <8 x i8> , i32 2 %Shuff50 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> @@ -101,7 +101,7 @@ CF85: ; preds = %CF81 %FC53 = uitofp i8 %L48 to double %Sl54 = select i1 %Cmp47, i32 %3, i32 %Sl24 %Cmp55 = icmp ne <8 x i64> %Shuff13, zeroinitializer - %L56 = load i8* %0 + %L56 = load i8, i8* %0 store i8 %L11, i8* %0 %E57 = extractelement <4 x i64> %Shuff21, i32 1 %Shuff58 = shufflevector <8 x i64> %Shuff, <8 x i64> zeroinitializer, <8 x i32> @@ -113,7 +113,7 @@ CF85: ; preds = %CF81 CF84: ; preds = %CF84, %CF85 %Sl62 = select i1 false, i8 %L, i8 %L48 %Cmp63 = icmp ne <8 x i64> %I, zeroinitializer - %L64 = load i8* %0 + %L64 = load i8, i8* %0 store i8 %5, i8* %0 %E65 = extractelement <8 x i1> %Cmp55, i32 0 br i1 %E65, label %CF84, label %CF87 @@ -125,7 +125,7 @@ CF87: ; preds = %CF84 %ZE69 = zext <8 x i8> %Sl32 to <8 x i64> %Sl70 = select i1 %Tr61, i64 %E20, i64 %E12 %Cmp71 = icmp slt <8 x i64> %I, %Shuff - %L72 = load i8* %0 + %L72 = load i8, i8* %0 store i8 %L72, i8* %0 %E73 = extractelement <8 x i1> %Cmp55, i32 6 br i1 %E73, label %CF81, label %CF82 diff --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll index 96547d90cb4..6f338107825 100644 --- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll +++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca double %A1 = alloca float %A = alloca double - %L = load i8* %0 + %L = load i8, i8* %0 store i8 -123, i8* %0 %E = extractelement <4 x i64> zeroinitializer, i32 1 %Shuff = shufflevector <4 x i32> , <4 x i32> zeroinitializer, <4 x i32> @@ -22,7 +22,7 @@ BB: %BC = bitcast i64 181325 to double %Sl = select i1 false, <2 x i32> zeroinitializer, <2 x i32> zeroinitializer %Cmp = icmp ne <4 x i64> zeroinitializer, zeroinitializer - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 %L, i8* %0 %E6 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff7 = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> @@ -33,7 +33,7 @@ BB: br label %CF80 CF80: ; preds = %BB - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 -123, i8* %0 %E12 = extractelement <2 x i16> zeroinitializer, i32 1 %Shuff13 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> @@ -42,7 +42,7 @@ CF80: ; preds = %BB %PC = bitcast i1* %A4 to i64* %Sl16 = select i1 %Cmp10, <4 x i32> zeroinitializer, <4 x i32> %Cmp17 = icmp ule <4 x i32> , %Sl16 - %L18 = load double* %A2 + %L18 = load double, double* %A2 store i64 498254, i64* %PC %E19 = extractelement <4 x i64> zeroinitializer, i32 0 %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> @@ -51,7 +51,7 @@ CF80: ; preds = %BB %ZE = zext <2 x i1> %Shuff20 to <2 x i32> %Sl23 = select i1 %Cmp10, <2 x i1> %Shuff20, <2 x i1> zeroinitializer %Cmp24 = icmp ult <2 x i32> zeroinitializer, zeroinitializer - %L25 = load i8* %0 + %L25 = load i8, i8* %0 store i8 %L25, i8* %0 %E26 = extractelement <4 x i8> , i32 3 %Shuff27 = shufflevector <4 x i32> %Shuff, <4 x i32> %I14, <4 x i32> @@ -63,7 +63,7 @@ CF80: ; preds = %BB CF79: ; preds = %CF80 %Sl30 = select i1 false, i8 %B29, i8 -123 %Cmp31 = icmp sge <2 x i1> %I, %I - %L32 = load i64* %PC + %L32 = load i64, i64* %PC store i8 -123, i8* %0 %E33 = extractelement <8 x i64> , i32 2 %Shuff34 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> @@ -75,7 +75,7 @@ CF79: ; preds = %CF80 br label %CF CF: ; preds = %CF, %CF79 - %L40 = load double* %A + %L40 = load double, double* %A store i1 %Cmp39, i1* %PC37 %E41 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff42 = shufflevector <2 x i32> zeroinitializer, <2 x i32> %ZE, <2 x i32> @@ -90,7 +90,7 @@ CF77: ; preds = %CF77, %CF br i1 %Cmp46, label %CF77, label %CF78 CF78: ; preds = %CF78, %CF83, %CF82, %CF77 - %L47 = load i64* %PC + %L47 = load i64, i64* %PC store i8 -123, i8* %0 %E48 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff49 = shufflevector <4 x i32> , <4 x i32> zeroinitializer, <4 x i32> @@ -105,7 +105,7 @@ CF83: ; preds = %CF78 br i1 %Cmp54, label %CF78, label %CF82 CF82: ; preds = %CF83 - %L55 = load i64* %PC + %L55 = load i64, i64* %PC store i64 %L32, i64* %PC %E56 = extractelement <2 x i16> %Shuff7, i32 1 %Shuff57 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> @@ -114,7 +114,7 @@ CF82: ; preds = %CF83 %FC = sitofp i64 498254 to double %Sl60 = select i1 false, i64 %E6, i64 -1 %Cmp61 = icmp sgt <4 x i32> %Shuff27, %I43 - %L62 = load i64* %PC + %L62 = load i64, i64* %PC store i64 %Sl9, i64* %PC %E63 = extractelement <2 x i32> %ZE, i32 0 %Shuff64 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff13, <4 x i32> @@ -126,7 +126,7 @@ CF82: ; preds = %CF83 CF81: ; preds = %CF82 %Cmp69 = icmp ne <8 x i64> , %B36 - %L70 = load i8* %0 + %L70 = load i8, i8* %0 store i64 %L55, i64* %PC %E71 = extractelement <4 x i32> %Shuff49, i32 1 %Shuff72 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %Shuff34, <4 x i32> diff --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll index bef75f3645c..181f72abd37 100644 --- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll +++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca float %A1 = alloca double %A = alloca double - %L = load i8* %0 + %L = load i8, i8* %0 store i8 97, i8* %0 %E = extractelement <16 x i64> , i32 14 %Shuff = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> @@ -22,7 +22,7 @@ BB: %Tr = trunc <1 x i64> zeroinitializer to <1 x i8> %Sl = select i1 false, double* %A1, double* %A %Cmp = icmp ne <2 x i64> zeroinitializer, zeroinitializer - %L5 = load double* %Sl + %L5 = load double, double* %Sl store float -4.374162e+06, float* %A2 %E6 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff7 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I, <4 x i32> @@ -34,7 +34,7 @@ BB: br label %CF72 CF72: ; preds = %CF72, %CF80, %CF78, %BB - %L11 = load double* %Sl + %L11 = load double, double* %Sl store double 0.000000e+00, double* %Sl %E12 = extractelement <2 x i1> zeroinitializer, i32 0 br i1 %E12, label %CF72, label %CF80 @@ -49,7 +49,7 @@ CF80: ; preds = %CF72 br i1 %Cmp17, label %CF72, label %CF77 CF77: ; preds = %CF77, %CF80 - %L18 = load double* %Sl + %L18 = load double, double* %Sl store double 0.000000e+00, double* %Sl %E19 = extractelement <2 x i1> zeroinitializer, i32 0 br i1 %E19, label %CF77, label %CF78 @@ -60,7 +60,7 @@ CF78: ; preds = %CF77 %B22 = sdiv <4 x i64> %Shuff7, zeroinitializer %FC = uitofp i8 97 to double %Sl23 = select i1 %Cmp10, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer - %L24 = load double* %Sl + %L24 = load double, double* %Sl store float %Sl16, float* %PC %E25 = extractelement <2 x i1> %Shuff, i32 1 br i1 %E25, label %CF72, label %CF76 @@ -71,7 +71,7 @@ CF76: ; preds = %CF78 %B28 = mul <4 x i64> %I27, zeroinitializer %ZE = zext <8 x i1> zeroinitializer to <8 x i64> %Sl29 = select i1 %Cmp17, float -4.374162e+06, float -4.374162e+06 - %L30 = load i8* %0 + %L30 = load i8, i8* %0 store double %L5, double* %Sl %E31 = extractelement <8 x i1> zeroinitializer, i32 5 br label %CF @@ -85,7 +85,7 @@ CF: ; preds = %CF, %CF81, %CF76 br i1 %Cmp36, label %CF, label %CF74 CF74: ; preds = %CF74, %CF - %L37 = load float* %PC + %L37 = load float, float* %PC store double 0.000000e+00, double* %Sl %E38 = extractelement <2 x i1> %Sl23, i32 1 br i1 %E38, label %CF74, label %CF75 @@ -95,7 +95,7 @@ CF75: ; preds = %CF75, %CF82, %CF74 %I40 = insertelement <4 x i64> zeroinitializer, i64 %4, i32 2 %Sl41 = select i1 %Cmp10, i32 0, i32 %3 %Cmp42 = icmp ne <1 x i64> zeroinitializer, zeroinitializer - %L43 = load double* %Sl + %L43 = load double, double* %Sl store i64 %4, i64* %2 %E44 = extractelement <2 x i1> %Shuff20, i32 1 br i1 %E44, label %CF75, label %CF82 @@ -109,7 +109,7 @@ CF82: ; preds = %CF75 br i1 %Cmp49, label %CF75, label %CF81 CF81: ; preds = %CF82 - %L50 = load i8* %0 + %L50 = load i8, i8* %0 store double %L43, double* %Sl %E51 = extractelement <4 x i64> %Shuff7, i32 3 %Shuff52 = shufflevector <4 x float> %BC34, <4 x float> %BC34, <4 x i32> @@ -117,7 +117,7 @@ CF81: ; preds = %CF82 %B54 = fdiv double %L24, %L43 %BC55 = bitcast <4 x i64> zeroinitializer to <4 x double> %Sl56 = select i1 false, i8 %5, i8 97 - %L57 = load i8* %0 + %L57 = load i8, i8* %0 store i8 %L50, i8* %0 %E58 = extractelement <2 x i1> %Shuff20, i32 1 br i1 %E58, label %CF, label %CF73 @@ -129,7 +129,7 @@ CF73: ; preds = %CF73, %CF81 %PC62 = bitcast double* %A3 to float* %Sl63 = select i1 %Cmp10, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer %Cmp64 = icmp ne <2 x i1> %Cmp, %Shuff - %L65 = load double* %A1 + %L65 = load double, double* %A1 store float -4.374162e+06, float* %PC62 %E66 = extractelement <8 x i1> %I21, i32 3 br i1 %E66, label %CF73, label %CF79 diff --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll index 697871df797..c0bc9056348 100644 --- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll +++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll @@ -14,7 +14,7 @@ BB: %A2 = alloca <1 x double> %A1 = alloca <8 x double> %A = alloca i64 - %L = load i8* %0 + %L = load i8, i8* %0 store i64 33695, i64* %A %E = extractelement <4 x i32> zeroinitializer, i32 3 %Shuff = shufflevector <2 x i32> , <2 x i32> , <2 x i32> @@ -22,7 +22,7 @@ BB: %B = lshr <8 x i32> , %ZE = fpext float 0x3B64A2B880000000 to double %Sl = select i1 true, i16 -1, i16 -11642 - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 0, i8* %0 %E6 = extractelement <4 x i32> zeroinitializer, i32 2 %Shuff7 = shufflevector <8 x i1> zeroinitializer, <8 x i1> zeroinitializer, <8 x i32> @@ -31,7 +31,7 @@ BB: %BC = bitcast <2 x i32> to <2 x float> %Sl10 = select i1 true, i32* %1, i32* %1 %Cmp = icmp sge <8 x i64> zeroinitializer, zeroinitializer - %L11 = load i32* %Sl10 + %L11 = load i32, i32* %Sl10 store <1 x double> zeroinitializer, <1 x double>* %A2 %E12 = extractelement <4 x i16> zeroinitializer, i32 0 %Shuff13 = shufflevector <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i32> undef @@ -43,7 +43,7 @@ BB: br label %CF75 CF75: ; preds = %CF75, %BB - %L19 = load i32* %Sl10 + %L19 = load i32, i32* %Sl10 store i32 %L11, i32* %Sl10 %E20 = extractelement <4 x i32> zeroinitializer, i32 1 %Shuff21 = shufflevector <4 x i32> zeroinitializer, <4 x i32> %I8, <4 x i32> @@ -55,7 +55,7 @@ CF75: ; preds = %CF75, %BB br i1 %Cmp26, label %CF75, label %CF76 CF76: ; preds = %CF75 - %L27 = load i32* %Sl10 + %L27 = load i32, i32* %Sl10 store i32 439732, i32* %Sl10 %E28 = extractelement <4 x i32> %Shuff21, i32 3 %Shuff29 = shufflevector <8 x i32> , <8 x i32> , <8 x i32> @@ -65,7 +65,7 @@ CF76: ; preds = %CF75 br label %CF74 CF74: ; preds = %CF74, %CF80, %CF78, %CF76 - %L33 = load i64* %2 + %L33 = load i64, i64* %2 store i32 71140, i32* %Sl10 %E34 = extractelement <4 x i32> zeroinitializer, i32 1 %Shuff35 = shufflevector <1 x i16> zeroinitializer, <1 x i16> zeroinitializer, <1 x i32> undef @@ -76,7 +76,7 @@ CF74: ; preds = %CF74, %CF80, %CF78, br i1 %Cmp39, label %CF74, label %CF80 CF80: ; preds = %CF74 - %L40 = load i8* %0 + %L40 = load i8, i8* %0 store i32 0, i32* %Sl10 %E41 = extractelement <8 x i64> zeroinitializer, i32 1 %Shuff42 = shufflevector <1 x i16> %I14, <1 x i16> %I14, <1 x i32> undef @@ -86,7 +86,7 @@ CF80: ; preds = %CF74 br i1 %Sl44, label %CF74, label %CF78 CF78: ; preds = %CF80 - %L45 = load i32* %Sl10 + %L45 = load i32, i32* %Sl10 store i8 %L5, i8* %0 %E46 = extractelement <8 x i1> %Shuff7, i32 2 br i1 %E46, label %CF74, label %CF77 @@ -101,7 +101,7 @@ CF77: ; preds = %CF77, %CF78 br i1 %Cmp52, label %CF77, label %CF79 CF79: ; preds = %CF77 - %L53 = load i32* %Sl10 + %L53 = load i32, i32* %Sl10 store i8 %L40, i8* %0 %E54 = extractelement <4 x i32> zeroinitializer, i32 1 %Shuff55 = shufflevector <4 x i32> %Shuff21, <4 x i32> %I8, <4 x i32> @@ -109,7 +109,7 @@ CF79: ; preds = %CF77 %Tr = trunc <1 x i64> %Shuff13 to <1 x i16> %Sl57 = select i1 %Cmp18, <2 x i32> , <2 x i32> %Cmp58 = icmp uge <4 x i32> , %I56 - %L59 = load i8* %0 + %L59 = load i8, i8* %0 store <1 x double> zeroinitializer, <1 x double>* %A2 %E60 = extractelement <4 x i32> zeroinitializer, i32 0 %Shuff61 = shufflevector <4 x i32> %I8, <4 x i32> %I8, <4 x i32> @@ -121,7 +121,7 @@ CF79: ; preds = %CF77 br label %CF CF: ; preds = %CF79 - %L66 = load i32* %Sl10 + %L66 = load i32, i32* %Sl10 store i32 %E6, i32* %PC %E67 = extractelement <4 x i32> , i32 2 %Shuff68 = shufflevector <4 x i32> %Sl64, <4 x i32> %I8, <4 x i32> diff --git a/llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll b/llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll index dc4200ad428..a3150e9a67d 100644 --- a/llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll +++ b/llvm/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll @@ -14,14 +14,14 @@ BB: %A2 = alloca <4 x i1> %A1 = alloca <4 x i16> %A = alloca <2 x i32> - %L = load i8* %0 + %L = load i8, i8* %0 store i8 %L, i8* %0 %E = extractelement <4 x i32> zeroinitializer, i32 0 %Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> %I = insertelement <2 x i1> zeroinitializer, i1 false, i32 1 %FC = sitofp <4 x i32> zeroinitializer to <4 x double> %Sl = select i1 false, <4 x i64> %Shuff, <4 x i64> %Shuff - %L5 = load i8* %0 + %L5 = load i8, i8* %0 store i8 %5, i8* %0 %E6 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff7 = shufflevector <2 x i1> %I, <2 x i1> %I, <2 x i32> @@ -30,7 +30,7 @@ BB: %FC9 = fptoui float 0x406DB70180000000 to i64 %Sl10 = select i1 false, <8 x i32> , <8 x i32> %Cmp = icmp ult <4 x i64> zeroinitializer, zeroinitializer - %L11 = load i8* %0 + %L11 = load i8, i8* %0 store i8 %L, i8* %0 %E12 = extractelement <4 x i64> zeroinitializer, i32 2 %Shuff13 = shufflevector <4 x i32> zeroinitializer, <4 x i32> zeroinitializer, <4 x i32> @@ -42,7 +42,7 @@ BB: br label %CF CF: ; preds = %CF, %CF79, %CF84, %BB - %L18 = load i8* %0 + %L18 = load i8, i8* %0 store i8 %L, i8* %0 %E19 = extractelement <4 x i64> %Sl, i32 3 %Shuff20 = shufflevector <2 x i1> %Shuff7, <2 x i1> %I, <2 x i32> @@ -54,7 +54,7 @@ CF: ; preds = %CF, %CF79, %CF84, % br i1 %Cmp25, label %CF, label %CF79 CF79: ; preds = %CF - %L26 = load i8* %0 + %L26 = load i8, i8* %0 store i8 %L26, i8* %0 %E27 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff28 = shufflevector <16 x i32> , <16 x i32> , <16 x i32> @@ -65,7 +65,7 @@ CF79: ; preds = %CF br i1 %Cmp32, label %CF, label %CF78 CF78: ; preds = %CF78, %CF79 - %L33 = load i8* %0 + %L33 = load i8, i8* %0 store i8 %L, i8* %0 %E34 = extractelement <16 x i32> %Shuff28, i32 1 %Shuff35 = shufflevector <4 x i64> zeroinitializer, <4 x i64> %I21, <4 x i32> @@ -76,7 +76,7 @@ CF78: ; preds = %CF78, %CF79 br i1 %Cmp38, label %CF78, label %CF80 CF80: ; preds = %CF80, %CF82, %CF78 - %L39 = load i8* %0 + %L39 = load i8, i8* %0 store i8 %L, i8* %0 %E40 = extractelement <2 x i1> %Shuff20, i32 1 br i1 %E40, label %CF80, label %CF82 @@ -87,7 +87,7 @@ CF82: ; preds = %CF80 %B43 = sub i32 %E, 0 %Sl44 = select i1 %Cmp32, <16 x i32> %Shuff28, <16 x i32> %Shuff28 %Cmp45 = icmp sgt <4 x i64> zeroinitializer, %I21 - %L46 = load i8* %0 + %L46 = load i8, i8* %0 store i8 %L11, i8* %0 %E47 = extractelement <8 x i32> %Sl16, i32 4 %Shuff48 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %Shuff7, <2 x i32> @@ -99,7 +99,7 @@ CF82: ; preds = %CF80 CF81: ; preds = %CF81, %CF82 %Sl52 = select i1 false, float -6.749110e+06, float 0x406DB70180000000 %Cmp53 = icmp uge <2 x i32> , - %L54 = load i8* %0 + %L54 = load i8, i8* %0 store i8 %L5, i8* %0 %E55 = extractelement <8 x i32> zeroinitializer, i32 7 %Shuff56 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> @@ -108,7 +108,7 @@ CF81: ; preds = %CF81, %CF82 %FC59 = fptoui <4 x double> %I36 to <4 x i16> %Sl60 = select i1 %Cmp17, <2 x i1> %I, <2 x i1> %I57 %Cmp61 = icmp ule <8 x i32> %B50, - %L62 = load i8* %0 + %L62 = load i8, i8* %0 store i8 %L33, i8* %0 %E63 = extractelement <4 x i64> %Shuff, i32 2 %Shuff64 = shufflevector <4 x i64> %Shuff56, <4 x i64> %Shuff56, <4 x i32> @@ -126,7 +126,7 @@ CF84: ; preds = %CF83 br i1 %Cmp69, label %CF, label %CF77 CF77: ; preds = %CF84 - %L70 = load i8* %0 + %L70 = load i8, i8* %0 store i8 %L, i8* %0 %E71 = extractelement <4 x i64> %Shuff, i32 0 %Shuff72 = shufflevector <2 x i1> zeroinitializer, <2 x i1> %I, <2 x i32> diff --git a/llvm/test/CodeGen/Mips/msa/shuffle.ll b/llvm/test/CodeGen/Mips/msa/shuffle.ll index faeec5d58dd..7feed927026 100644 --- a/llvm/test/CodeGen/Mips/msa/shuffle.ll +++ b/llvm/test/CodeGen/Mips/msa/shuffle.ll @@ -4,7 +4,7 @@ define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -20,7 +20,7 @@ define void @vshf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_1: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1] @@ -34,8 +34,8 @@ define void @vshf_v16i8_1(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_2: - %1 = load <16 x i8>* %a - %2 = load <16 x i8>* %b + %1 = load <16 x i8>, <16 x i8>* %a + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -51,9 +51,9 @@ define void @vshf_v16i8_2(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_3: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -71,7 +71,7 @@ define void @vshf_v16i8_3(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: vshf_v16i8_4: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> ; CHECK-DAG: splati.b [[R3:\$w[0-9]+]], [[R1]][1] @@ -85,7 +85,7 @@ define void @vshf_v16i8_4(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -101,7 +101,7 @@ define void @vshf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_1: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1] @@ -115,8 +115,8 @@ define void @vshf_v8i16_1(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_2: - %1 = load <8 x i16>* %a - %2 = load <8 x i16>* %b + %1 = load <8 x i16>, <8 x i16>* %a + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -132,9 +132,9 @@ define void @vshf_v8i16_2(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_3: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -152,7 +152,7 @@ define void @vshf_v8i16_3(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: vshf_v8i16_4: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> %1, <8 x i32> ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][1] @@ -169,7 +169,7 @@ define void @vshf_v8i16_4(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27 @@ -183,7 +183,7 @@ define void @vshf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_1: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85 @@ -197,8 +197,8 @@ define void @vshf_v4i32_1(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_2: - %1 = load <4 x i32>* %a - %2 = load <4 x i32>* %b + %1 = load <4 x i32>, <4 x i32>* %a + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R2]], 36 @@ -212,9 +212,9 @@ define void @vshf_v4i32_2(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_3: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -232,7 +232,7 @@ define void @vshf_v4i32_3(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: vshf_v4i32_4: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> %1, <4 x i32> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 85 @@ -246,7 +246,7 @@ define void @vshf_v4i32_4(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -262,7 +262,7 @@ define void @vshf_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_1: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1] @@ -276,8 +276,8 @@ define void @vshf_v2i64_1(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_2: - %1 = load <2 x i64>* %a - %2 = load <2 x i64>* %b + %1 = load <2 x i64>, <2 x i64>* %a + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -293,9 +293,9 @@ define void @vshf_v2i64_2(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_3: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> ; CHECK-DAG: addiu [[PTR_A:\$[0-9]+]], {{.*}}, %lo($ @@ -313,7 +313,7 @@ define void @vshf_v2i64_3(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: vshf_v2i64_4: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> %1, <2 x i32> ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1] @@ -327,7 +327,7 @@ define void @vshf_v2i64_4(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: shf_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> ; CHECK-DAG: shf.b [[R3:\$w[0-9]+]], [[R1]], 45 @@ -341,7 +341,7 @@ define void @shf_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: shf_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> ; CHECK-DAG: shf.h [[R3:\$w[0-9]+]], [[R1]], 27 @@ -355,7 +355,7 @@ define void @shf_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: shf_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> ; CHECK-DAG: shf.w [[R3:\$w[0-9]+]], [[R1]], 27 @@ -371,9 +371,9 @@ define void @shf_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ilvev_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> @@ -388,9 +388,9 @@ define void @ilvev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ilvev_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> ; CHECK-DAG: ilvev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -404,9 +404,9 @@ define void @ilvev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ilvev_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> ; CHECK-DAG: ilvev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -420,9 +420,9 @@ define void @ilvev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ilvev_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> ; CHECK-DAG: ilvev.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -436,9 +436,9 @@ define void @ilvev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ilvod_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> @@ -453,9 +453,9 @@ define void @ilvod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ilvod_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> ; CHECK-DAG: ilvod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -469,9 +469,9 @@ define void @ilvod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ilvod_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> ; CHECK-DAG: ilvod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -485,9 +485,9 @@ define void @ilvod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ilvod_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> ; CHECK-DAG: ilvod.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -501,9 +501,9 @@ define void @ilvod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ilvl_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> @@ -518,9 +518,9 @@ define void @ilvl_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ilvl_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> ; CHECK-DAG: ilvl.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -534,9 +534,9 @@ define void @ilvl_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ilvl_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> ; CHECK-DAG: ilvl.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -550,9 +550,9 @@ define void @ilvl_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ilvl_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> ; ilvl.d and ilvev.d are equivalent for v2i64 @@ -567,9 +567,9 @@ define void @ilvl_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: ilvr_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> @@ -584,9 +584,9 @@ define void @ilvr_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: ilvr_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> ; CHECK-DAG: ilvr.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -600,9 +600,9 @@ define void @ilvr_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: ilvr_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> ; CHECK-DAG: ilvr.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -616,9 +616,9 @@ define void @ilvr_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: ilvr_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> ; ilvr.d and ilvod.d are equivalent for v2i64 @@ -633,9 +633,9 @@ define void @ilvr_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: pckev_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> @@ -650,9 +650,9 @@ define void @pckev_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: pckev_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> ; CHECK-DAG: pckev.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -666,9 +666,9 @@ define void @pckev_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: pckev_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> ; CHECK-DAG: pckev.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -682,9 +682,9 @@ define void @pckev_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: pckev_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> ; pckev.d and ilvev.d are equivalent for v2i64 @@ -699,9 +699,9 @@ define void @pckev_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind { ; CHECK: pckod_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) - %2 = load <16 x i8>* %b + %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> @@ -716,9 +716,9 @@ define void @pckod_v16i8_0(<16 x i8>* %c, <16 x i8>* %a, <16 x i8>* %b) nounwind define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind { ; CHECK: pckod_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) - %2 = load <8 x i16>* %b + %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> ; CHECK-DAG: pckod.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -732,9 +732,9 @@ define void @pckod_v8i16_0(<8 x i16>* %c, <8 x i16>* %a, <8 x i16>* %b) nounwind define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind { ; CHECK: pckod_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) - %2 = load <4 x i32>* %b + %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> ; CHECK-DAG: pckod.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] @@ -748,9 +748,9 @@ define void @pckod_v4i32_0(<4 x i32>* %c, <4 x i32>* %a, <4 x i32>* %b) nounwind define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind { ; CHECK: pckod_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) - %2 = load <2 x i64>* %b + %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <2 x i32> ; pckod.d and ilvod.d are equivalent for v2i64 @@ -765,7 +765,7 @@ define void @pckod_v2i64_0(<2 x i64>* %c, <2 x i64>* %a, <2 x i64>* %b) nounwind define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind { ; CHECK: splati_v16i8_0: - %1 = load <16 x i8>* %a + %1 = load <16 x i8>, <16 x i8>* %a ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> @@ -780,7 +780,7 @@ define void @splati_v16i8_0(<16 x i8>* %c, <16 x i8>* %a) nounwind { define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind { ; CHECK: splati_v8i16_0: - %1 = load <8 x i16>* %a + %1 = load <8 x i16>, <8 x i16>* %a ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> ; CHECK-DAG: splati.h [[R3:\$w[0-9]+]], [[R1]][4] @@ -794,7 +794,7 @@ define void @splati_v8i16_0(<8 x i16>* %c, <8 x i16>* %a) nounwind { define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind { ; CHECK: splati_v4i32_0: - %1 = load <4 x i32>* %a + %1 = load <4 x i32>, <4 x i32>* %a ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> ; shf.w and splati.w are equivalent @@ -809,7 +809,7 @@ define void @splati_v4i32_0(<4 x i32>* %c, <4 x i32>* %a) nounwind { define void @splati_v2i64_0(<2 x i64>* %c, <2 x i64>* %a) nounwind { ; CHECK: splati_v2i64_0: - %1 = load <2 x i64>* %a + %1 = load <2 x i64>, <2 x i64>* %a ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> ; CHECK-DAG: splati.d [[R3:\$w[0-9]+]], [[R1]][1] diff --git a/llvm/test/CodeGen/Mips/msa/spill.ll b/llvm/test/CodeGen/Mips/msa/spill.ll index 085a16e80ae..8c9a7991235 100644 --- a/llvm/test/CodeGen/Mips/msa/spill.ll +++ b/llvm/test/CodeGen/Mips/msa/spill.ll @@ -39,40 +39,40 @@ entry: %p31 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 31 %p32 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 32 %p33 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 33 - %0 = load <16 x i8>* %p0, align 16 - %1 = load <16 x i8>* %p1, align 16 - %2 = load <16 x i8>* %p2, align 16 - %3 = load <16 x i8>* %p3, align 16 - %4 = load <16 x i8>* %p4, align 16 - %5 = load <16 x i8>* %p5, align 16 - %6 = load <16 x i8>* %p6, align 16 - %7 = load <16 x i8>* %p7, align 16 - %8 = load <16 x i8>* %p8, align 16 - %9 = load <16 x i8>* %p9, align 16 - %10 = load <16 x i8>* %p10, align 16 - %11 = load <16 x i8>* %p11, align 16 - %12 = load <16 x i8>* %p12, align 16 - %13 = load <16 x i8>* %p13, align 16 - %14 = load <16 x i8>* %p14, align 16 - %15 = load <16 x i8>* %p15, align 16 - %16 = load <16 x i8>* %p16, align 16 - %17 = load <16 x i8>* %p17, align 16 - %18 = load <16 x i8>* %p18, align 16 - %19 = load <16 x i8>* %p19, align 16 - %20 = load <16 x i8>* %p20, align 16 - %21 = load <16 x i8>* %p21, align 16 - %22 = load <16 x i8>* %p22, align 16 - %23 = load <16 x i8>* %p23, align 16 - %24 = load <16 x i8>* %p24, align 16 - %25 = load <16 x i8>* %p25, align 16 - %26 = load <16 x i8>* %p26, align 16 - %27 = load <16 x i8>* %p27, align 16 - %28 = load <16 x i8>* %p28, align 16 - %29 = load <16 x i8>* %p29, align 16 - %30 = load <16 x i8>* %p30, align 16 - %31 = load <16 x i8>* %p31, align 16 - %32 = load <16 x i8>* %p32, align 16 - %33 = load <16 x i8>* %p33, align 16 + %0 = load <16 x i8>, <16 x i8>* %p0, align 16 + %1 = load <16 x i8>, <16 x i8>* %p1, align 16 + %2 = load <16 x i8>, <16 x i8>* %p2, align 16 + %3 = load <16 x i8>, <16 x i8>* %p3, align 16 + %4 = load <16 x i8>, <16 x i8>* %p4, align 16 + %5 = load <16 x i8>, <16 x i8>* %p5, align 16 + %6 = load <16 x i8>, <16 x i8>* %p6, align 16 + %7 = load <16 x i8>, <16 x i8>* %p7, align 16 + %8 = load <16 x i8>, <16 x i8>* %p8, align 16 + %9 = load <16 x i8>, <16 x i8>* %p9, align 16 + %10 = load <16 x i8>, <16 x i8>* %p10, align 16 + %11 = load <16 x i8>, <16 x i8>* %p11, align 16 + %12 = load <16 x i8>, <16 x i8>* %p12, align 16 + %13 = load <16 x i8>, <16 x i8>* %p13, align 16 + %14 = load <16 x i8>, <16 x i8>* %p14, align 16 + %15 = load <16 x i8>, <16 x i8>* %p15, align 16 + %16 = load <16 x i8>, <16 x i8>* %p16, align 16 + %17 = load <16 x i8>, <16 x i8>* %p17, align 16 + %18 = load <16 x i8>, <16 x i8>* %p18, align 16 + %19 = load <16 x i8>, <16 x i8>* %p19, align 16 + %20 = load <16 x i8>, <16 x i8>* %p20, align 16 + %21 = load <16 x i8>, <16 x i8>* %p21, align 16 + %22 = load <16 x i8>, <16 x i8>* %p22, align 16 + %23 = load <16 x i8>, <16 x i8>* %p23, align 16 + %24 = load <16 x i8>, <16 x i8>* %p24, align 16 + %25 = load <16 x i8>, <16 x i8>* %p25, align 16 + %26 = load <16 x i8>, <16 x i8>* %p26, align 16 + %27 = load <16 x i8>, <16 x i8>* %p27, align 16 + %28 = load <16 x i8>, <16 x i8>* %p28, align 16 + %29 = load <16 x i8>, <16 x i8>* %p29, align 16 + %30 = load <16 x i8>, <16 x i8>* %p30, align 16 + %31 = load <16 x i8>, <16 x i8>* %p31, align 16 + %32 = load <16 x i8>, <16 x i8>* %p32, align 16 + %33 = load <16 x i8>, <16 x i8>* %p33, align 16 %r1 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %0, <16 x i8> %1) %r2 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r1, <16 x i8> %2) %r3 = call <16 x i8> @llvm.mips.addv.b(<16 x i8> %r2, <16 x i8> %3) @@ -188,40 +188,40 @@ entry: %p31 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 31 %p32 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 32 %p33 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 33 - %0 = load <8 x i16>* %p0, align 16 - %1 = load <8 x i16>* %p1, align 16 - %2 = load <8 x i16>* %p2, align 16 - %3 = load <8 x i16>* %p3, align 16 - %4 = load <8 x i16>* %p4, align 16 - %5 = load <8 x i16>* %p5, align 16 - %6 = load <8 x i16>* %p6, align 16 - %7 = load <8 x i16>* %p7, align 16 - %8 = load <8 x i16>* %p8, align 16 - %9 = load <8 x i16>* %p9, align 16 - %10 = load <8 x i16>* %p10, align 16 - %11 = load <8 x i16>* %p11, align 16 - %12 = load <8 x i16>* %p12, align 16 - %13 = load <8 x i16>* %p13, align 16 - %14 = load <8 x i16>* %p14, align 16 - %15 = load <8 x i16>* %p15, align 16 - %16 = load <8 x i16>* %p16, align 16 - %17 = load <8 x i16>* %p17, align 16 - %18 = load <8 x i16>* %p18, align 16 - %19 = load <8 x i16>* %p19, align 16 - %20 = load <8 x i16>* %p20, align 16 - %21 = load <8 x i16>* %p21, align 16 - %22 = load <8 x i16>* %p22, align 16 - %23 = load <8 x i16>* %p23, align 16 - %24 = load <8 x i16>* %p24, align 16 - %25 = load <8 x i16>* %p25, align 16 - %26 = load <8 x i16>* %p26, align 16 - %27 = load <8 x i16>* %p27, align 16 - %28 = load <8 x i16>* %p28, align 16 - %29 = load <8 x i16>* %p29, align 16 - %30 = load <8 x i16>* %p30, align 16 - %31 = load <8 x i16>* %p31, align 16 - %32 = load <8 x i16>* %p32, align 16 - %33 = load <8 x i16>* %p33, align 16 + %0 = load <8 x i16>, <8 x i16>* %p0, align 16 + %1 = load <8 x i16>, <8 x i16>* %p1, align 16 + %2 = load <8 x i16>, <8 x i16>* %p2, align 16 + %3 = load <8 x i16>, <8 x i16>* %p3, align 16 + %4 = load <8 x i16>, <8 x i16>* %p4, align 16 + %5 = load <8 x i16>, <8 x i16>* %p5, align 16 + %6 = load <8 x i16>, <8 x i16>* %p6, align 16 + %7 = load <8 x i16>, <8 x i16>* %p7, align 16 + %8 = load <8 x i16>, <8 x i16>* %p8, align 16 + %9 = load <8 x i16>, <8 x i16>* %p9, align 16 + %10 = load <8 x i16>, <8 x i16>* %p10, align 16 + %11 = load <8 x i16>, <8 x i16>* %p11, align 16 + %12 = load <8 x i16>, <8 x i16>* %p12, align 16 + %13 = load <8 x i16>, <8 x i16>* %p13, align 16 + %14 = load <8 x i16>, <8 x i16>* %p14, align 16 + %15 = load <8 x i16>, <8 x i16>* %p15, align 16 + %16 = load <8 x i16>, <8 x i16>* %p16, align 16 + %17 = load <8 x i16>, <8 x i16>* %p17, align 16 + %18 = load <8 x i16>, <8 x i16>* %p18, align 16 + %19 = load <8 x i16>, <8 x i16>* %p19, align 16 + %20 = load <8 x i16>, <8 x i16>* %p20, align 16 + %21 = load <8 x i16>, <8 x i16>* %p21, align 16 + %22 = load <8 x i16>, <8 x i16>* %p22, align 16 + %23 = load <8 x i16>, <8 x i16>* %p23, align 16 + %24 = load <8 x i16>, <8 x i16>* %p24, align 16 + %25 = load <8 x i16>, <8 x i16>* %p25, align 16 + %26 = load <8 x i16>, <8 x i16>* %p26, align 16 + %27 = load <8 x i16>, <8 x i16>* %p27, align 16 + %28 = load <8 x i16>, <8 x i16>* %p28, align 16 + %29 = load <8 x i16>, <8 x i16>* %p29, align 16 + %30 = load <8 x i16>, <8 x i16>* %p30, align 16 + %31 = load <8 x i16>, <8 x i16>* %p31, align 16 + %32 = load <8 x i16>, <8 x i16>* %p32, align 16 + %33 = load <8 x i16>, <8 x i16>* %p33, align 16 %r1 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %0, <8 x i16> %1) %r2 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r1, <8 x i16> %2) %r3 = call <8 x i16> @llvm.mips.addv.h(<8 x i16> %r2, <8 x i16> %3) @@ -337,40 +337,40 @@ entry: %p31 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 31 %p32 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 32 %p33 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 33 - %0 = load <4 x i32>* %p0, align 16 - %1 = load <4 x i32>* %p1, align 16 - %2 = load <4 x i32>* %p2, align 16 - %3 = load <4 x i32>* %p3, align 16 - %4 = load <4 x i32>* %p4, align 16 - %5 = load <4 x i32>* %p5, align 16 - %6 = load <4 x i32>* %p6, align 16 - %7 = load <4 x i32>* %p7, align 16 - %8 = load <4 x i32>* %p8, align 16 - %9 = load <4 x i32>* %p9, align 16 - %10 = load <4 x i32>* %p10, align 16 - %11 = load <4 x i32>* %p11, align 16 - %12 = load <4 x i32>* %p12, align 16 - %13 = load <4 x i32>* %p13, align 16 - %14 = load <4 x i32>* %p14, align 16 - %15 = load <4 x i32>* %p15, align 16 - %16 = load <4 x i32>* %p16, align 16 - %17 = load <4 x i32>* %p17, align 16 - %18 = load <4 x i32>* %p18, align 16 - %19 = load <4 x i32>* %p19, align 16 - %20 = load <4 x i32>* %p20, align 16 - %21 = load <4 x i32>* %p21, align 16 - %22 = load <4 x i32>* %p22, align 16 - %23 = load <4 x i32>* %p23, align 16 - %24 = load <4 x i32>* %p24, align 16 - %25 = load <4 x i32>* %p25, align 16 - %26 = load <4 x i32>* %p26, align 16 - %27 = load <4 x i32>* %p27, align 16 - %28 = load <4 x i32>* %p28, align 16 - %29 = load <4 x i32>* %p29, align 16 - %30 = load <4 x i32>* %p30, align 16 - %31 = load <4 x i32>* %p31, align 16 - %32 = load <4 x i32>* %p32, align 16 - %33 = load <4 x i32>* %p33, align 16 + %0 = load <4 x i32>, <4 x i32>* %p0, align 16 + %1 = load <4 x i32>, <4 x i32>* %p1, align 16 + %2 = load <4 x i32>, <4 x i32>* %p2, align 16 + %3 = load <4 x i32>, <4 x i32>* %p3, align 16 + %4 = load <4 x i32>, <4 x i32>* %p4, align 16 + %5 = load <4 x i32>, <4 x i32>* %p5, align 16 + %6 = load <4 x i32>, <4 x i32>* %p6, align 16 + %7 = load <4 x i32>, <4 x i32>* %p7, align 16 + %8 = load <4 x i32>, <4 x i32>* %p8, align 16 + %9 = load <4 x i32>, <4 x i32>* %p9, align 16 + %10 = load <4 x i32>, <4 x i32>* %p10, align 16 + %11 = load <4 x i32>, <4 x i32>* %p11, align 16 + %12 = load <4 x i32>, <4 x i32>* %p12, align 16 + %13 = load <4 x i32>, <4 x i32>* %p13, align 16 + %14 = load <4 x i32>, <4 x i32>* %p14, align 16 + %15 = load <4 x i32>, <4 x i32>* %p15, align 16 + %16 = load <4 x i32>, <4 x i32>* %p16, align 16 + %17 = load <4 x i32>, <4 x i32>* %p17, align 16 + %18 = load <4 x i32>, <4 x i32>* %p18, align 16 + %19 = load <4 x i32>, <4 x i32>* %p19, align 16 + %20 = load <4 x i32>, <4 x i32>* %p20, align 16 + %21 = load <4 x i32>, <4 x i32>* %p21, align 16 + %22 = load <4 x i32>, <4 x i32>* %p22, align 16 + %23 = load <4 x i32>, <4 x i32>* %p23, align 16 + %24 = load <4 x i32>, <4 x i32>* %p24, align 16 + %25 = load <4 x i32>, <4 x i32>* %p25, align 16 + %26 = load <4 x i32>, <4 x i32>* %p26, align 16 + %27 = load <4 x i32>, <4 x i32>* %p27, align 16 + %28 = load <4 x i32>, <4 x i32>* %p28, align 16 + %29 = load <4 x i32>, <4 x i32>* %p29, align 16 + %30 = load <4 x i32>, <4 x i32>* %p30, align 16 + %31 = load <4 x i32>, <4 x i32>* %p31, align 16 + %32 = load <4 x i32>, <4 x i32>* %p32, align 16 + %33 = load <4 x i32>, <4 x i32>* %p33, align 16 %r1 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %0, <4 x i32> %1) %r2 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r1, <4 x i32> %2) %r3 = call <4 x i32> @llvm.mips.addv.w(<4 x i32> %r2, <4 x i32> %3) @@ -486,40 +486,40 @@ entry: %p31 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 31 %p32 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 32 %p33 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 33 - %0 = load <2 x i64>* %p0, align 16 - %1 = load <2 x i64>* %p1, align 16 - %2 = load <2 x i64>* %p2, align 16 - %3 = load <2 x i64>* %p3, align 16 - %4 = load <2 x i64>* %p4, align 16 - %5 = load <2 x i64>* %p5, align 16 - %6 = load <2 x i64>* %p6, align 16 - %7 = load <2 x i64>* %p7, align 16 - %8 = load <2 x i64>* %p8, align 16 - %9 = load <2 x i64>* %p9, align 16 - %10 = load <2 x i64>* %p10, align 16 - %11 = load <2 x i64>* %p11, align 16 - %12 = load <2 x i64>* %p12, align 16 - %13 = load <2 x i64>* %p13, align 16 - %14 = load <2 x i64>* %p14, align 16 - %15 = load <2 x i64>* %p15, align 16 - %16 = load <2 x i64>* %p16, align 16 - %17 = load <2 x i64>* %p17, align 16 - %18 = load <2 x i64>* %p18, align 16 - %19 = load <2 x i64>* %p19, align 16 - %20 = load <2 x i64>* %p20, align 16 - %21 = load <2 x i64>* %p21, align 16 - %22 = load <2 x i64>* %p22, align 16 - %23 = load <2 x i64>* %p23, align 16 - %24 = load <2 x i64>* %p24, align 16 - %25 = load <2 x i64>* %p25, align 16 - %26 = load <2 x i64>* %p26, align 16 - %27 = load <2 x i64>* %p27, align 16 - %28 = load <2 x i64>* %p28, align 16 - %29 = load <2 x i64>* %p29, align 16 - %30 = load <2 x i64>* %p30, align 16 - %31 = load <2 x i64>* %p31, align 16 - %32 = load <2 x i64>* %p32, align 16 - %33 = load <2 x i64>* %p33, align 16 + %0 = load <2 x i64>, <2 x i64>* %p0, align 16 + %1 = load <2 x i64>, <2 x i64>* %p1, align 16 + %2 = load <2 x i64>, <2 x i64>* %p2, align 16 + %3 = load <2 x i64>, <2 x i64>* %p3, align 16 + %4 = load <2 x i64>, <2 x i64>* %p4, align 16 + %5 = load <2 x i64>, <2 x i64>* %p5, align 16 + %6 = load <2 x i64>, <2 x i64>* %p6, align 16 + %7 = load <2 x i64>, <2 x i64>* %p7, align 16 + %8 = load <2 x i64>, <2 x i64>* %p8, align 16 + %9 = load <2 x i64>, <2 x i64>* %p9, align 16 + %10 = load <2 x i64>, <2 x i64>* %p10, align 16 + %11 = load <2 x i64>, <2 x i64>* %p11, align 16 + %12 = load <2 x i64>, <2 x i64>* %p12, align 16 + %13 = load <2 x i64>, <2 x i64>* %p13, align 16 + %14 = load <2 x i64>, <2 x i64>* %p14, align 16 + %15 = load <2 x i64>, <2 x i64>* %p15, align 16 + %16 = load <2 x i64>, <2 x i64>* %p16, align 16 + %17 = load <2 x i64>, <2 x i64>* %p17, align 16 + %18 = load <2 x i64>, <2 x i64>* %p18, align 16 + %19 = load <2 x i64>, <2 x i64>* %p19, align 16 + %20 = load <2 x i64>, <2 x i64>* %p20, align 16 + %21 = load <2 x i64>, <2 x i64>* %p21, align 16 + %22 = load <2 x i64>, <2 x i64>* %p22, align 16 + %23 = load <2 x i64>, <2 x i64>* %p23, align 16 + %24 = load <2 x i64>, <2 x i64>* %p24, align 16 + %25 = load <2 x i64>, <2 x i64>* %p25, align 16 + %26 = load <2 x i64>, <2 x i64>* %p26, align 16 + %27 = load <2 x i64>, <2 x i64>* %p27, align 16 + %28 = load <2 x i64>, <2 x i64>* %p28, align 16 + %29 = load <2 x i64>, <2 x i64>* %p29, align 16 + %30 = load <2 x i64>, <2 x i64>* %p30, align 16 + %31 = load <2 x i64>, <2 x i64>* %p31, align 16 + %32 = load <2 x i64>, <2 x i64>* %p32, align 16 + %33 = load <2 x i64>, <2 x i64>* %p33, align 16 %r1 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %0, <2 x i64> %1) %r2 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r1, <2 x i64> %2) %r3 = call <2 x i64> @llvm.mips.addv.d(<2 x i64> %r2, <2 x i64> %3) diff --git a/llvm/test/CodeGen/Mips/msa/vec.ll b/llvm/test/CodeGen/Mips/msa/vec.ll index d5b97f52fb8..8790923ce72 100644 --- a/llvm/test/CodeGen/Mips/msa/vec.ll +++ b/llvm/test/CodeGen/Mips/msa/vec.ll @@ -9,8 +9,8 @@ define void @llvm_mips_and_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) @@ -32,8 +32,8 @@ entry: define void @llvm_mips_and_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) @@ -55,8 +55,8 @@ entry: define void @llvm_mips_and_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) @@ -78,8 +78,8 @@ entry: define void @llvm_mips_and_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.and.v(<16 x i8> %2, <16 x i8> %3) @@ -97,8 +97,8 @@ entry: ; define void @and_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_and_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_and_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_and_v_b_ARG2 %2 = and <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_and_v_b_RES ret void @@ -113,8 +113,8 @@ entry: ; define void @and_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_and_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_and_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_and_v_h_ARG2 %2 = and <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_and_v_h_RES ret void @@ -130,8 +130,8 @@ entry: define void @and_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_and_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_and_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_and_v_w_ARG2 %2 = and <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_and_v_w_RES ret void @@ -147,8 +147,8 @@ entry: define void @and_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_and_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_and_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_and_v_d_ARG2 %2 = and <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_and_v_d_RES ret void @@ -168,9 +168,9 @@ entry: define void @llvm_mips_bmnz_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_bmnz_v_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmnz_v_b_ARG3 %3 = bitcast <16 x i8> %0 to <16 x i8> %4 = bitcast <16 x i8> %1 to <16 x i8> %5 = bitcast <16 x i8> %2 to <16 x i8> @@ -198,9 +198,9 @@ entry: define void @llvm_mips_bmnz_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_bmnz_v_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmnz_v_h_ARG3 %3 = bitcast <8 x i16> %0 to <16 x i8> %4 = bitcast <8 x i16> %1 to <16 x i8> %5 = bitcast <8 x i16> %2 to <16 x i8> @@ -228,9 +228,9 @@ entry: define void @llvm_mips_bmnz_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_bmnz_v_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmnz_v_w_ARG3 %3 = bitcast <4 x i32> %0 to <16 x i8> %4 = bitcast <4 x i32> %1 to <16 x i8> %5 = bitcast <4 x i32> %2 to <16 x i8> @@ -258,9 +258,9 @@ entry: define void @llvm_mips_bmnz_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_bmnz_v_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmnz_v_d_ARG3 %3 = bitcast <2 x i64> %0 to <16 x i8> %4 = bitcast <2 x i64> %1 to <16 x i8> %5 = bitcast <2 x i64> %2 to <16 x i8> @@ -288,9 +288,9 @@ entry: define void @llvm_mips_bmz_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_bmz_v_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bmz_v_b_ARG3 %3 = bitcast <16 x i8> %0 to <16 x i8> %4 = bitcast <16 x i8> %1 to <16 x i8> %5 = bitcast <16 x i8> %2 to <16 x i8> @@ -319,9 +319,9 @@ entry: define void @llvm_mips_bmz_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_bmz_v_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bmz_v_h_ARG3 %3 = bitcast <8 x i16> %0 to <16 x i8> %4 = bitcast <8 x i16> %1 to <16 x i8> %5 = bitcast <8 x i16> %2 to <16 x i8> @@ -350,9 +350,9 @@ entry: define void @llvm_mips_bmz_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_bmz_v_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bmz_v_w_ARG3 %3 = bitcast <4 x i32> %0 to <16 x i8> %4 = bitcast <4 x i32> %1 to <16 x i8> %5 = bitcast <4 x i32> %2 to <16 x i8> @@ -381,9 +381,9 @@ entry: define void @llvm_mips_bmz_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_bmz_v_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bmz_v_d_ARG3 %3 = bitcast <2 x i64> %0 to <16 x i8> %4 = bitcast <2 x i64> %1 to <16 x i8> %5 = bitcast <2 x i64> %2 to <16 x i8> @@ -412,9 +412,9 @@ entry: define void @llvm_mips_bsel_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG2 - %2 = load <16 x i8>* @llvm_mips_bsel_v_b_ARG3 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG2 + %2 = load <16 x i8>, <16 x i8>* @llvm_mips_bsel_v_b_ARG3 %3 = bitcast <16 x i8> %0 to <16 x i8> %4 = bitcast <16 x i8> %1 to <16 x i8> %5 = bitcast <16 x i8> %2 to <16 x i8> @@ -443,9 +443,9 @@ entry: define void @llvm_mips_bsel_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG2 - %2 = load <8 x i16>* @llvm_mips_bsel_v_h_ARG3 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG2 + %2 = load <8 x i16>, <8 x i16>* @llvm_mips_bsel_v_h_ARG3 %3 = bitcast <8 x i16> %0 to <16 x i8> %4 = bitcast <8 x i16> %1 to <16 x i8> %5 = bitcast <8 x i16> %2 to <16 x i8> @@ -474,9 +474,9 @@ entry: define void @llvm_mips_bsel_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG2 - %2 = load <4 x i32>* @llvm_mips_bsel_v_w_ARG3 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG2 + %2 = load <4 x i32>, <4 x i32>* @llvm_mips_bsel_v_w_ARG3 %3 = bitcast <4 x i32> %0 to <16 x i8> %4 = bitcast <4 x i32> %1 to <16 x i8> %5 = bitcast <4 x i32> %2 to <16 x i8> @@ -505,9 +505,9 @@ entry: define void @llvm_mips_bsel_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG2 - %2 = load <2 x i64>* @llvm_mips_bsel_v_d_ARG3 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG2 + %2 = load <2 x i64>, <2 x i64>* @llvm_mips_bsel_v_d_ARG3 %3 = bitcast <2 x i64> %0 to <16 x i8> %4 = bitcast <2 x i64> %1 to <16 x i8> %5 = bitcast <2 x i64> %2 to <16 x i8> @@ -535,8 +535,8 @@ entry: define void @llvm_mips_nor_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_nor_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_nor_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_nor_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) @@ -558,8 +558,8 @@ entry: define void @llvm_mips_nor_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_nor_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_nor_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_nor_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) @@ -581,8 +581,8 @@ entry: define void @llvm_mips_nor_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_nor_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_nor_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_nor_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) @@ -604,8 +604,8 @@ entry: define void @llvm_mips_nor_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_nor_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_nor_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_nor_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.nor.v(<16 x i8> %2, <16 x i8> %3) @@ -627,8 +627,8 @@ entry: define void @llvm_mips_or_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) @@ -650,8 +650,8 @@ entry: define void @llvm_mips_or_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) @@ -673,8 +673,8 @@ entry: define void @llvm_mips_or_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) @@ -696,8 +696,8 @@ entry: define void @llvm_mips_or_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.or.v(<16 x i8> %2, <16 x i8> %3) @@ -715,8 +715,8 @@ entry: ; define void @or_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_or_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_or_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_or_v_b_ARG2 %2 = or <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_or_v_b_RES ret void @@ -731,8 +731,8 @@ entry: ; define void @or_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_or_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_or_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_or_v_h_ARG2 %2 = or <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_or_v_h_RES ret void @@ -748,8 +748,8 @@ entry: define void @or_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_or_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_or_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_or_v_w_ARG2 %2 = or <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_or_v_w_RES ret void @@ -765,8 +765,8 @@ entry: define void @or_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_or_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_or_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_or_v_d_ARG2 %2 = or <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_or_v_d_RES ret void @@ -785,8 +785,8 @@ entry: define void @llvm_mips_xor_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2 %2 = bitcast <16 x i8> %0 to <16 x i8> %3 = bitcast <16 x i8> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) @@ -808,8 +808,8 @@ entry: define void @llvm_mips_xor_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2 %2 = bitcast <8 x i16> %0 to <16 x i8> %3 = bitcast <8 x i16> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) @@ -831,8 +831,8 @@ entry: define void @llvm_mips_xor_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2 %2 = bitcast <4 x i32> %0 to <16 x i8> %3 = bitcast <4 x i32> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) @@ -854,8 +854,8 @@ entry: define void @llvm_mips_xor_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2 %2 = bitcast <2 x i64> %0 to <16 x i8> %3 = bitcast <2 x i64> %1 to <16 x i8> %4 = tail call <16 x i8> @llvm.mips.xor.v(<16 x i8> %2, <16 x i8> %3) @@ -873,8 +873,8 @@ entry: ; define void @xor_v_b_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_xor_v_b_ARG1 - %1 = load <16 x i8>* @llvm_mips_xor_v_b_ARG2 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG1 + %1 = load <16 x i8>, <16 x i8>* @llvm_mips_xor_v_b_ARG2 %2 = xor <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_xor_v_b_RES ret void @@ -889,8 +889,8 @@ entry: ; define void @xor_v_h_test() nounwind { entry: - %0 = load <8 x i16>* @llvm_mips_xor_v_h_ARG1 - %1 = load <8 x i16>* @llvm_mips_xor_v_h_ARG2 + %0 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG1 + %1 = load <8 x i16>, <8 x i16>* @llvm_mips_xor_v_h_ARG2 %2 = xor <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_xor_v_h_RES ret void @@ -906,8 +906,8 @@ entry: define void @xor_v_w_test() nounwind { entry: - %0 = load <4 x i32>* @llvm_mips_xor_v_w_ARG1 - %1 = load <4 x i32>* @llvm_mips_xor_v_w_ARG2 + %0 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG1 + %1 = load <4 x i32>, <4 x i32>* @llvm_mips_xor_v_w_ARG2 %2 = xor <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_xor_v_w_RES ret void @@ -923,8 +923,8 @@ entry: define void @xor_v_d_test() nounwind { entry: - %0 = load <2 x i64>* @llvm_mips_xor_v_d_ARG1 - %1 = load <2 x i64>* @llvm_mips_xor_v_d_ARG2 + %0 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG1 + %1 = load <2 x i64>, <2 x i64>* @llvm_mips_xor_v_d_ARG2 %2 = xor <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_xor_v_d_RES ret void diff --git a/llvm/test/CodeGen/Mips/msa/vecs10.ll b/llvm/test/CodeGen/Mips/msa/vecs10.ll index e22e0755ef0..f442f772744 100644 --- a/llvm/test/CodeGen/Mips/msa/vecs10.ll +++ b/llvm/test/CodeGen/Mips/msa/vecs10.ll @@ -7,7 +7,7 @@ define i32 @llvm_mips_bnz_v_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bnz_v_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bnz_v_ARG1 %1 = tail call i32 @llvm.mips.bnz.v(<16 x i8> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false @@ -28,7 +28,7 @@ declare i32 @llvm.mips.bnz.v(<16 x i8>) nounwind define i32 @llvm_mips_bz_v_test() nounwind { entry: - %0 = load <16 x i8>* @llvm_mips_bz_v_ARG1 + %0 = load <16 x i8>, <16 x i8>* @llvm_mips_bz_v_ARG1 %1 = tail call i32 @llvm.mips.bz.v(<16 x i8> %0) %2 = icmp eq i32 %1, 0 br i1 %2, label %true, label %false diff --git a/llvm/test/CodeGen/Mips/mul.ll b/llvm/test/CodeGen/Mips/mul.ll index 4ce801b1c9f..3231f9cac38 100644 --- a/llvm/test/CodeGen/Mips/mul.ll +++ b/llvm/test/CodeGen/Mips/mul.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i32* @iiii, align 4 - %1 = load i32* @jjjj, align 4 + %0 = load i32, i32* @iiii, align 4 + %1 = load i32, i32* @jjjj, align 4 %mul = mul nsw i32 %1, %0 ; 16: mult ${{[0-9]+}}, ${{[0-9]+}} ; 16: mflo ${{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/mulll.ll b/llvm/test/CodeGen/Mips/mulll.ll index e37b9197df8..6e5ba647b8b 100644 --- a/llvm/test/CodeGen/Mips/mulll.ll +++ b/llvm/test/CodeGen/Mips/mulll.ll @@ -6,8 +6,8 @@ define void @test() nounwind { entry: - %0 = load i64* @iiii, align 8 - %1 = load i64* @jjjj, align 8 + %0 = load i64, i64* @iiii, align 8 + %1 = load i64, i64* @jjjj, align 8 %mul = mul nsw i64 %1, %0 store i64 %mul, i64* @kkkk, align 8 ; 16: multu ${{[0-9]+}}, ${{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/mulull.ll b/llvm/test/CodeGen/Mips/mulull.ll index 4d23c693184..c1334484fb6 100644 --- a/llvm/test/CodeGen/Mips/mulull.ll +++ b/llvm/test/CodeGen/Mips/mulull.ll @@ -7,8 +7,8 @@ define void @test() nounwind { entry: - %0 = load i64* @iiii, align 8 - %1 = load i64* @jjjj, align 8 + %0 = load i64, i64* @iiii, align 8 + %1 = load i64, i64* @jjjj, align 8 %mul = mul nsw i64 %1, %0 store i64 %mul, i64* @kkkk, align 8 ; 16: multu ${{[0-9]+}}, ${{[0-9]+}} diff --git a/llvm/test/CodeGen/Mips/nacl-align.ll b/llvm/test/CodeGen/Mips/nacl-align.ll index 892a7edd17c..ec8f3f06afd 100644 --- a/llvm/test/CodeGen/Mips/nacl-align.ll +++ b/llvm/test/CodeGen/Mips/nacl-align.ll @@ -68,7 +68,7 @@ default: define i32 @test2(i32 %i) { entry: %elementptr = getelementptr inbounds [2 x i8*], [2 x i8*]* @bb_array, i32 0, i32 %i - %0 = load i8** %elementptr, align 4 + %0 = load i8*, i8** %elementptr, align 4 indirectbr i8* %0, [label %bb1, label %bb2] bb1: diff --git a/llvm/test/CodeGen/Mips/nacl-branch-delay.ll b/llvm/test/CodeGen/Mips/nacl-branch-delay.ll index d251eee0752..2927f39a416 100644 --- a/llvm/test/CodeGen/Mips/nacl-branch-delay.ll +++ b/llvm/test/CodeGen/Mips/nacl-branch-delay.ll @@ -10,7 +10,7 @@ declare void @f2() define void @test1() { - %1 = load i32* @x, align 4 + %1 = load i32, i32* @x, align 4 call void @f1(i32 %1) ret void diff --git a/llvm/test/CodeGen/Mips/nacl-reserved-regs.ll b/llvm/test/CodeGen/Mips/nacl-reserved-regs.ll index ae21283b1fb..efe2a663a3c 100644 --- a/llvm/test/CodeGen/Mips/nacl-reserved-regs.ll +++ b/llvm/test/CodeGen/Mips/nacl-reserved-regs.ll @@ -5,22 +5,22 @@ @var = external global i32 define void @f() { - %val1 = load volatile i32* @var - %val2 = load volatile i32* @var - %val3 = load volatile i32* @var - %val4 = load volatile i32* @var - %val5 = load volatile i32* @var - %val6 = load volatile i32* @var - %val7 = load volatile i32* @var - %val8 = load volatile i32* @var - %val9 = load volatile i32* @var - %val10 = load volatile i32* @var - %val11 = load volatile i32* @var - %val12 = load volatile i32* @var - %val13 = load volatile i32* @var - %val14 = load volatile i32* @var - %val15 = load volatile i32* @var - %val16 = load volatile i32* @var + %val1 = load volatile i32, i32* @var + %val2 = load volatile i32, i32* @var + %val3 = load volatile i32, i32* @var + %val4 = load volatile i32, i32* @var + %val5 = load volatile i32, i32* @var + %val6 = load volatile i32, i32* @var + %val7 = load volatile i32, i32* @var + %val8 = load volatile i32, i32* @var + %val9 = load volatile i32, i32* @var + %val10 = load volatile i32, i32* @var + %val11 = load volatile i32, i32* @var + %val12 = load volatile i32, i32* @var + %val13 = load volatile i32, i32* @var + %val14 = load volatile i32, i32* @var + %val15 = load volatile i32, i32* @var + %val16 = load volatile i32, i32* @var store volatile i32 %val1, i32* @var store volatile i32 %val2, i32* @var store volatile i32 %val3, i32* @var diff --git a/llvm/test/CodeGen/Mips/neg1.ll b/llvm/test/CodeGen/Mips/neg1.ll index 281e6262156..c24d78b16ec 100644 --- a/llvm/test/CodeGen/Mips/neg1.ll +++ b/llvm/test/CodeGen/Mips/neg1.ll @@ -5,7 +5,7 @@ define i32 @main() nounwind { entry: - %0 = load i32* @i, align 4 + %0 = load i32, i32* @i, align 4 %sub = sub nsw i32 0, %0 ; 16: neg ${{[0-9]+}}, ${{[0-9]+}} %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([5 x i8]* @.str, i32 0, i32 0), i32 %sub) diff --git a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll index 30dd1ff82d7..cf79557cc97 100644 --- a/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll +++ b/llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll @@ -8,7 +8,7 @@ entry: ; Force the float into an odd-numbered register using named registers and ; load the vector. %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a) - %0 = load volatile <4 x float>* @v4f32 + %0 = load volatile <4 x float>, <4 x float>* @v4f32 ; Clobber all except $f12/$w12 and $f13 ; @@ -42,7 +42,7 @@ entry: ; Force the float into an odd-numbered register using named registers and ; load the vector. %b = call float asm sideeffect "mov.s $0, $1", "={$f13},{$f12}" (float %a) - %0 = load volatile <4 x float>* @v4f32 + %0 = load volatile <4 x float>, <4 x float>* @v4f32 ; Clobber all except $f12/$w12 and $f13 ; @@ -73,7 +73,7 @@ entry: define float @msa_extract_0() { entry: - %0 = load volatile <4 x float>* @v4f32 + %0 = load volatile <4 x float>, <4 x float>* @v4f32 %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0) ; Clobber all except $f12, and $f13 @@ -101,7 +101,7 @@ entry: define float @msa_extract_1() { entry: - %0 = load volatile <4 x float>* @v4f32 + %0 = load volatile <4 x float>, <4 x float>* @v4f32 %1 = call <4 x float> asm sideeffect "move.v $0, $1", "={$w13},{$w12}" (<4 x float> %0) ; Clobber all except $f13 diff --git a/llvm/test/CodeGen/Mips/nomips16.ll b/llvm/test/CodeGen/Mips/nomips16.ll index 5f7d74e4197..418d8ead2c3 100644 --- a/llvm/test/CodeGen/Mips/nomips16.ll +++ b/llvm/test/CodeGen/Mips/nomips16.ll @@ -6,7 +6,7 @@ ; Function Attrs: nounwind define void @foo() #0 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %conv = fpext float %0 to double %add = fadd double %conv, 1.500000e+00 %conv1 = fptrunc double %add to float @@ -20,7 +20,7 @@ entry: ; Function Attrs: nounwind define void @nofoo() #1 { entry: - %0 = load float* @x, align 4 + %0 = load float, float* @x, align 4 %conv = fpext float %0 to double %add = fadd double %conv, 3.900000e+00 %conv1 = fptrunc double %add to float diff --git a/llvm/test/CodeGen/Mips/not1.ll b/llvm/test/CodeGen/Mips/not1.ll index 2163b236c56..52d29f01d06 100644 --- a/llvm/test/CodeGen/Mips/not1.ll +++ b/llvm/test/CodeGen/Mips/not1.ll @@ -6,7 +6,7 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %neg = xor i32 %0, -1 ; 16: not ${{[0-9]+}}, ${{[0-9]+}} %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %neg) diff --git a/llvm/test/CodeGen/Mips/o32_cc_byval.ll b/llvm/test/CodeGen/Mips/o32_cc_byval.ll index dde5caa75d0..108c663ab1c 100644 --- a/llvm/test/CodeGen/Mips/o32_cc_byval.ll +++ b/llvm/test/CodeGen/Mips/o32_cc_byval.ll @@ -62,17 +62,17 @@ entry: ; CHECK: mfc1 $6, $f[[F0]] %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5 - %tmp = load i32* %i2, align 4 + %tmp = load i32, i32* %i2, align 4 %d = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 4 - %tmp1 = load double* %d, align 8 + %tmp1 = load double, double* %d, align 8 %ll = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 3 - %tmp2 = load i64* %ll, align 8 + %tmp2 = load i64, i64* %ll, align 8 %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2 - %tmp3 = load i32* %i, align 4 + %tmp3 = load i32, i32* %i, align 4 %s = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1 - %tmp4 = load i16* %s, align 2 + %tmp4 = load i16, i16* %s, align 2 %c = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 0 - %tmp5 = load i8* %c, align 1 + %tmp5 = load i8, i8* %c, align 1 tail call void @callee4(i32 %tmp, double %tmp1, i64 %tmp2, i32 %tmp3, i16 signext %tmp4, i8 signext %tmp5, float %f) nounwind ret void } @@ -91,9 +91,9 @@ entry: ; CHECK: sw $[[R0]], 24($sp) %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0 - %tmp = load i32* %arrayidx, align 4 + %tmp = load i32, i32* %arrayidx, align 4 %arrayidx2 = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 3 - %tmp3 = load i32* %arrayidx2, align 4 + %tmp3 = load i32, i32* %arrayidx2, align 4 tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp3, i16 signext 4, i8 signext 5, float 6.000000e+00) nounwind ret void } @@ -111,11 +111,11 @@ entry: ; CHECK: sw $[[R1]], 24($sp) %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2 - %tmp = load i32* %i, align 4 + %tmp = load i32, i32* %i, align 4 %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5 - %tmp1 = load i32* %i2, align 4 + %tmp1 = load i32, i32* %i2, align 4 %c = getelementptr inbounds %struct.S3, %struct.S3* %s3, i32 0, i32 0 - %tmp2 = load i8* %c, align 1 + %tmp2 = load i8, i8* %c, align 1 tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp1, i16 signext 4, i8 signext %tmp2, float 6.000000e+00) nounwind ret void } diff --git a/llvm/test/CodeGen/Mips/o32_cc_vararg.ll b/llvm/test/CodeGen/Mips/o32_cc_vararg.ll index 10972e884ac..b4597a3214e 100644 --- a/llvm/test/CodeGen/Mips/o32_cc_vararg.ll +++ b/llvm/test/CodeGen/Mips/o32_cc_vararg.ll @@ -24,7 +24,7 @@ entry: store i32 %0, i32* %b, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %b, align 4 + %tmp = load i32, i32* %b, align 4 ret i32 %tmp ; CHECK-LABEL: va1: @@ -50,7 +50,7 @@ entry: store double %0, double* %b, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %b, align 8 + %tmp = load double, double* %b, align 8 ret double %tmp ; CHECK-LABEL: va2: @@ -78,7 +78,7 @@ entry: store i32 %0, i32* %b, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %b, align 4 + %tmp = load i32, i32* %b, align 4 ret i32 %tmp ; CHECK-LABEL: va3: @@ -101,7 +101,7 @@ entry: store double %0, double* %b, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %b, align 8 + %tmp = load double, double* %b, align 8 ret double %tmp ; CHECK-LABEL: va4: @@ -129,7 +129,7 @@ entry: store i32 %0, i32* %d, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %d, align 4 + %tmp = load i32, i32* %d, align 4 ret i32 %tmp ; CHECK-LABEL: va5: @@ -155,7 +155,7 @@ entry: store double %0, double* %d, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %d, align 8 + %tmp = load double, double* %d, align 8 ret double %tmp ; CHECK-LABEL: va6: @@ -183,7 +183,7 @@ entry: store i32 %0, i32* %c, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %c, align 4 + %tmp = load i32, i32* %c, align 4 ret i32 %tmp ; CHECK-LABEL: va7: @@ -206,7 +206,7 @@ entry: store double %0, double* %c, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %c, align 8 + %tmp = load double, double* %c, align 8 ret double %tmp ; CHECK-LABEL: va8: @@ -232,7 +232,7 @@ entry: store i32 %0, i32* %d, align 4 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load i32* %d, align 4 + %tmp = load i32, i32* %d, align 4 ret i32 %tmp ; CHECK-LABEL: va9: @@ -257,7 +257,7 @@ entry: store double %0, double* %d, align 8 %ap2 = bitcast i8** %ap to i8* call void @llvm.va_end(i8* %ap2) - %tmp = load double* %d, align 8 + %tmp = load double, double* %d, align 8 ret double %tmp ; CHECK-LABEL: va10: diff --git a/llvm/test/CodeGen/Mips/optimize-pic-o0.ll b/llvm/test/CodeGen/Mips/optimize-pic-o0.ll index 554d49e728c..454bc851484 100644 --- a/llvm/test/CodeGen/Mips/optimize-pic-o0.ll +++ b/llvm/test/CodeGen/Mips/optimize-pic-o0.ll @@ -10,7 +10,7 @@ entry: br label %for.cond for.cond: ; preds = %for.inc, %entry - %0 = load i32* %i, align 4 + %0 = load i32, i32* %i, align 4 %cmp = icmp slt i32 %0, 10 br i1 %cmp, label %for.body, label %for.end @@ -20,13 +20,13 @@ for.body: ; preds = %for.cond br label %for.inc for.inc: ; preds = %for.body - %1 = load i32* %i, align 4 + %1 = load i32, i32* %i, align 4 %inc = add nsw i32 %1, 1 store i32 %inc, i32* %i, align 4 br label %for.cond for.end: ; preds = %for.cond - %2 = load i32* %retval + %2 = load i32, i32* %retval ret i32 %2 } diff --git a/llvm/test/CodeGen/Mips/or1.ll b/llvm/test/CodeGen/Mips/or1.ll index b1c36961f92..719356cedeb 100644 --- a/llvm/test/CodeGen/Mips/or1.ll +++ b/llvm/test/CodeGen/Mips/or1.ll @@ -6,8 +6,8 @@ define i32 @main() nounwind { entry: - %0 = load i32* @x, align 4 - %1 = load i32* @y, align 4 + %0 = load i32, i32* @x, align 4 + %1 = load i32, i32* @y, align 4 %or = or i32 %0, %1 ; 16: or ${{[0-9]+}}, ${{[0-9]+}} %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0), i32 %or) diff --git a/llvm/test/CodeGen/Mips/prevent-hoisting.ll b/llvm/test/CodeGen/Mips/prevent-hoisting.ll index 3d902431a00..8a84ff01bff 100644 --- a/llvm/test/CodeGen/Mips/prevent-hoisting.ll +++ b/llvm/test/CodeGen/Mips/prevent-hoisting.ll @@ -46,7 +46,7 @@ define void @readLumaCoeff8x8_CABAC(%struct.img_par* %img, i32 %b8) { - %1 = load i32* undef, align 4 + %1 = load i32, i32* undef, align 4 br i1 false, label %2, label %3 ;