From a79ac14fa68297f9888bc70a10df5ed9b8864e38 Mon Sep 17 00:00:00 2001 From: David Blaikie Date: Fri, 27 Feb 2015 21:17:42 +0000 Subject: [opaque pointer type] Add textual IR support for explicit type parameter to load instruction Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 llvm-svn: 230794 --- llvm/test/CodeGen/AArch64/128bit_load_store.ll | 6 +- llvm/test/CodeGen/AArch64/PBQP-chain.ll | 48 +- llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll | 4 +- llvm/test/CodeGen/AArch64/PBQP-csr.ll | 18 +- llvm/test/CodeGen/AArch64/Redundantstore.ll | 2 +- llvm/test/CodeGen/AArch64/a57-csel.ll | 2 +- .../aarch64-2014-08-11-MachineCombinerCrash.ll | 2 +- .../AArch64/aarch64-2014-12-02-combine-soften.ll | 2 +- .../AArch64/aarch64-a57-fp-load-balancing.ll | 88 +-- .../aarch64-address-type-promotion-assertion.ll | 2 +- .../AArch64/aarch64-address-type-promotion.ll | 4 +- llvm/test/CodeGen/AArch64/aarch64-be-bv.ll | 64 +- .../AArch64/aarch64-fix-cortex-a53-835769.ll | 58 +- llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll | 8 +- llvm/test/CodeGen/AArch64/aarch64-smull.ll | 96 +-- llvm/test/CodeGen/AArch64/addsub-shifted.ll | 20 +- llvm/test/CodeGen/AArch64/addsub.ll | 18 +- llvm/test/CodeGen/AArch64/addsub_ext.ll | 16 +- llvm/test/CodeGen/AArch64/alloca.ll | 2 +- llvm/test/CodeGen/AArch64/and-mask-removal.ll | 4 +- .../CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll | 2 +- .../CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll | 2 +- .../AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll | 2 +- .../CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll | 10 +- .../CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll | 16 +- llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll | 34 +- llvm/test/CodeGen/AArch64/arm64-abi.ll | 12 +- llvm/test/CodeGen/AArch64/arm64-abi_align.ll | 52 +- .../CodeGen/AArch64/arm64-addr-mode-folding.ll | 36 +- .../CodeGen/AArch64/arm64-addr-type-promotion.ll | 14 +- llvm/test/CodeGen/AArch64/arm64-addrmode.ll | 34 +- .../AArch64/arm64-alloc-no-stack-realign.ll | 4 +- .../AArch64/arm64-alloca-frame-pointer-offset.ll | 10 +- llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-atomic-128.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-atomic.ll | 34 +- llvm/test/CodeGen/AArch64/arm64-basic-pic.ll | 8 +- llvm/test/CodeGen/AArch64/arm64-bcc.ll | 4 +- .../AArch64/arm64-big-endian-bitconverts.ll | 168 ++--- .../CodeGen/AArch64/arm64-big-endian-varargs.ll | 8 +- .../AArch64/arm64-big-endian-vector-caller.ll | 168 ++--- .../test/CodeGen/AArch64/arm64-bitfield-extract.ll | 40 +- llvm/test/CodeGen/AArch64/arm64-blockaddress.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-cast-opt.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll | 74 +- llvm/test/CodeGen/AArch64/arm64-ccmp.ll | 2 +- .../CodeGen/AArch64/arm64-code-model-large-abs.ll | 8 +- .../AArch64/arm64-collect-loh-garbage-crash.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-collect-loh.ll | 10 +- .../CodeGen/AArch64/arm64-complex-copy-noneon.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-const-addr.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-cse.ll | 4 +- .../AArch64/arm64-dagcombiner-dead-indexed-load.ll | 2 +- .../AArch64/arm64-dagcombiner-load-slicing.ll | 18 +- llvm/test/CodeGen/AArch64/arm64-dup.ll | 16 +- llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-elf-globals.ll | 16 +- llvm/test/CodeGen/AArch64/arm64-ext.ll | 46 +- llvm/test/CodeGen/AArch64/arm64-extend.ll | 2 +- .../CodeGen/AArch64/arm64-extload-knownzero.ll | 2 +- .../CodeGen/AArch64/arm64-fast-isel-addr-offset.ll | 8 +- llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll | 18 +- llvm/test/CodeGen/AArch64/arm64-fast-isel-call.ll | 4 +- .../CodeGen/AArch64/arm64-fast-isel-conversion.ll | 26 +- llvm/test/CodeGen/AArch64/arm64-fast-isel-gv.ll | 4 +- .../CodeGen/AArch64/arm64-fast-isel-indirectbr.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-fast-isel-ret.ll | 10 +- llvm/test/CodeGen/AArch64/arm64-fast-isel.ll | 10 +- .../arm64-fastisel-gep-promote-before-add.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-fmuladd.ll | 42 +- llvm/test/CodeGen/AArch64/arm64-fold-address.ll | 20 +- llvm/test/CodeGen/AArch64/arm64-fold-lsl.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-fp128-folding.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-fp128.ll | 48 +- .../CodeGen/AArch64/arm64-i16-subreg-extract.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-indexed-memory.ll | 62 +- .../CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll | 4 +- .../CodeGen/AArch64/arm64-indexed-vector-ldst.ll | 124 ++-- llvm/test/CodeGen/AArch64/arm64-inline-asm.ll | 10 +- llvm/test/CodeGen/AArch64/arm64-ld1.ll | 56 +- llvm/test/CodeGen/AArch64/arm64-ldp.ll | 64 +- llvm/test/CodeGen/AArch64/arm64-ldur.ll | 14 +- .../CodeGen/AArch64/arm64-misched-basic-A53.ll | 24 +- .../CodeGen/AArch64/arm64-misched-basic-A57.ll | 24 +- .../CodeGen/AArch64/arm64-neon-simd-ldst-one.ll | 52 +- .../AArch64/arm64-patchpoint-scratch-regs.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-patchpoint.ll | 6 +- .../test/CodeGen/AArch64/arm64-pic-local-symbol.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-platform-reg.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-prefetch.ll | 26 +- llvm/test/CodeGen/AArch64/arm64-redzone.ll | 6 +- .../AArch64/arm64-register-offset-addressing.ll | 24 +- .../AArch64/arm64-regress-interphase-shift.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-return-vector.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-rev.ll | 36 +- llvm/test/CodeGen/AArch64/arm64-scaled_iv.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-scvt.ll | 110 +-- .../CodeGen/AArch64/arm64-sitofp-combine-chains.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-spill-lr.ll | 42 +- llvm/test/CodeGen/AArch64/arm64-spill.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-stack-no-frame.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-strict-align.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-tls-darwin.ll | 2 +- .../CodeGen/AArch64/arm64-tls-dynamic-together.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-tls-dynamics.ll | 8 +- llvm/test/CodeGen/AArch64/arm64-tls-execs.ll | 4 +- .../AArch64/arm64-triv-disjoint-mem-access.ll | 4 +- llvm/test/CodeGen/AArch64/arm64-trn.ll | 40 +- llvm/test/CodeGen/AArch64/arm64-trunc-store.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-unaligned_ldst.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-uzp.ll | 32 +- llvm/test/CodeGen/AArch64/arm64-vabs.ll | 288 ++++---- llvm/test/CodeGen/AArch64/arm64-vadd.ll | 248 +++---- llvm/test/CodeGen/AArch64/arm64-vbitwise.ll | 18 +- llvm/test/CodeGen/AArch64/arm64-vcmp.ll | 52 +- llvm/test/CodeGen/AArch64/arm64-vcnt.ll | 12 +- llvm/test/CodeGen/AArch64/arm64-vcvt.ll | 2 +- llvm/test/CodeGen/AArch64/arm64-vector-imm.ll | 8 +- llvm/test/CodeGen/AArch64/arm64-vector-ldst.ll | 128 ++-- llvm/test/CodeGen/AArch64/arm64-vext.ll | 176 ++--- llvm/test/CodeGen/AArch64/arm64-vhadd.ll | 96 +-- llvm/test/CodeGen/AArch64/arm64-vhsub.ll | 48 +- llvm/test/CodeGen/AArch64/arm64-vmax.ll | 264 +++---- llvm/test/CodeGen/AArch64/arm64-vmul.ll | 446 ++++++------ llvm/test/CodeGen/AArch64/arm64-volatile.ll | 8 +- llvm/test/CodeGen/AArch64/arm64-vqadd.ll | 112 +-- llvm/test/CodeGen/AArch64/arm64-vqsub.ll | 56 +- llvm/test/CodeGen/AArch64/arm64-vshift.ll | 608 ++++++++-------- llvm/test/CodeGen/AArch64/arm64-vshr.ll | 12 +- llvm/test/CodeGen/AArch64/arm64-vsqrt.ll | 56 +- llvm/test/CodeGen/AArch64/arm64-vsra.ll | 56 +- llvm/test/CodeGen/AArch64/arm64-vsub.ll | 120 ++-- llvm/test/CodeGen/AArch64/arm64-weak-reference.ll | 2 +- .../CodeGen/AArch64/arm64-zextload-unscaled.ll | 6 +- llvm/test/CodeGen/AArch64/arm64-zip.ll | 32 +- llvm/test/CodeGen/AArch64/assertion-rc-mismatch.ll | 2 +- .../CodeGen/AArch64/atomic-ops-not-barriers.ll | 2 +- llvm/test/CodeGen/AArch64/atomic-ops.ll | 14 +- llvm/test/CodeGen/AArch64/basic-pic.ll | 8 +- llvm/test/CodeGen/AArch64/bitfield-insert-0.ll | 4 +- llvm/test/CodeGen/AArch64/bitfield-insert.ll | 36 +- llvm/test/CodeGen/AArch64/bitfield.ll | 8 +- llvm/test/CodeGen/AArch64/blockaddress.ll | 2 +- llvm/test/CodeGen/AArch64/bool-loads.ll | 8 +- llvm/test/CodeGen/AArch64/breg.ll | 2 +- llvm/test/CodeGen/AArch64/callee-save.ll | 64 +- llvm/test/CodeGen/AArch64/cmpwithshort.ll | 6 +- llvm/test/CodeGen/AArch64/code-model-large-abs.ll | 8 +- .../CodeGen/AArch64/combine-comparisons-by-cse.ll | 84 +-- llvm/test/CodeGen/AArch64/compare-branch.ll | 8 +- llvm/test/CodeGen/AArch64/complex-copy-noneon.ll | 4 +- llvm/test/CodeGen/AArch64/complex-int-to-fp.ll | 2 +- .../test/CodeGen/AArch64/dag-combine-invaraints.ll | 8 +- llvm/test/CodeGen/AArch64/dp-3source.ll | 4 +- llvm/test/CodeGen/AArch64/dp1.ll | 28 +- llvm/test/CodeGen/AArch64/dp2.ll | 54 +- llvm/test/CodeGen/AArch64/eliminate-trunc.ll | 4 +- llvm/test/CodeGen/AArch64/f16-convert.ll | 20 +- .../CodeGen/AArch64/fast-isel-addressing-modes.ll | 90 +-- llvm/test/CodeGen/AArch64/fast-isel-int-ext.ll | 84 +-- llvm/test/CodeGen/AArch64/fast-isel-int-ext2.ll | 60 +- llvm/test/CodeGen/AArch64/fast-isel-int-ext3.ll | 20 +- llvm/test/CodeGen/AArch64/floatdp_1source.ll | 10 +- llvm/test/CodeGen/AArch64/floatdp_2source.ll | 4 +- llvm/test/CodeGen/AArch64/fp128-folding.ll | 2 +- llvm/test/CodeGen/AArch64/fp16-instructions.ll | 2 +- llvm/test/CodeGen/AArch64/fp16-v4-instructions.ll | 2 +- llvm/test/CodeGen/AArch64/fp16-v8-instructions.ll | 2 +- .../test/CodeGen/AArch64/fp16-vector-load-store.ll | 12 +- llvm/test/CodeGen/AArch64/fpimm.ll | 4 +- llvm/test/CodeGen/AArch64/free-zext.ll | 4 +- llvm/test/CodeGen/AArch64/func-argpassing.ll | 16 +- llvm/test/CodeGen/AArch64/func-calls.ll | 12 +- llvm/test/CodeGen/AArch64/funcptr_cast.ll | 2 +- llvm/test/CodeGen/AArch64/ghc-cc.ll | 6 +- llvm/test/CodeGen/AArch64/global-alignment.ll | 10 +- llvm/test/CodeGen/AArch64/global-merge-4.ll | 20 +- llvm/test/CodeGen/AArch64/half.ll | 8 +- llvm/test/CodeGen/AArch64/i1-contents.ll | 4 +- llvm/test/CodeGen/AArch64/ldst-opt.ll | 90 +-- llvm/test/CodeGen/AArch64/ldst-regoffset.ll | 78 +-- llvm/test/CodeGen/AArch64/ldst-unscaledimm.ll | 50 +- llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll | 64 +- llvm/test/CodeGen/AArch64/literal_pools_float.ll | 4 +- llvm/test/CodeGen/AArch64/local_vars.ll | 2 +- llvm/test/CodeGen/AArch64/logical_shifted_reg.ll | 12 +- llvm/test/CodeGen/AArch64/machine_cse.ll | 10 +- llvm/test/CodeGen/AArch64/neon-fpround_f128.ll | 4 +- .../CodeGen/AArch64/neon-truncStore-extLoad.ll | 6 +- llvm/test/CodeGen/AArch64/nzcv-save.ll | 4 +- llvm/test/CodeGen/AArch64/paired-load.ll | 4 +- llvm/test/CodeGen/AArch64/pic-eh-stubs.ll | 2 +- llvm/test/CodeGen/AArch64/ragreedy-csr.ll | 50 +- llvm/test/CodeGen/AArch64/regress-tail-livereg.ll | 2 +- llvm/test/CodeGen/AArch64/regress-tblgen-chains.ll | 4 +- .../AArch64/regress-w29-reserved-with-fp.ll | 18 +- llvm/test/CodeGen/AArch64/rm_redundant_cmp.ll | 32 +- llvm/test/CodeGen/AArch64/sibling-call.ll | 2 +- .../CodeGen/AArch64/stack-guard-remat-bitcast.ll | 2 +- llvm/test/CodeGen/AArch64/tbz-tbnz.ll | 2 +- llvm/test/CodeGen/AArch64/tst-br.ll | 4 +- llvm/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll | 2 +- llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll | 22 +- llvm/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll | 2 +- llvm/test/CodeGen/ARM/2007-03-13-InstrSched.ll | 12 +- .../CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll | 4 +- .../CodeGen/ARM/2007-03-27-RegScavengerAssert.ll | 4 +- .../CodeGen/ARM/2007-04-02-RegScavengerAssert.ll | 4 +- .../test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll | 16 +- llvm/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll | 10 +- .../CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll | 6 +- llvm/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll | 6 +- llvm/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll | 8 +- llvm/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll | 8 +- llvm/test/CodeGen/ARM/2007-08-15-ReuseBug.ll | 6 +- .../CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll | 2 +- llvm/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll | 2 +- .../CodeGen/ARM/2008-03-07-RegScavengerAssert.ll | 2 +- .../test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll | 12 +- .../CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll | 4 +- .../CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll | 2 +- llvm/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll | 2 +- llvm/test/CodeGen/ARM/2009-02-16-SpillerBug.ll | 2 +- .../CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll | 2 +- llvm/test/CodeGen/ARM/2009-02-27-SpillerBug.ll | 4 +- llvm/test/CodeGen/ARM/2009-03-07-SpillerBug.ll | 2 +- llvm/test/CodeGen/ARM/2009-04-06-AsmModifier.ll | 6 +- llvm/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll | 6 +- llvm/test/CodeGen/ARM/2009-04-08-FloatUndef.ll | 2 +- .../test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll | 2 +- .../CodeGen/ARM/2009-05-11-CodePlacementCrash.ll | 2 +- llvm/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll | 2 +- llvm/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll | 4 +- .../CodeGen/ARM/2009-06-30-RegScavengerAssert.ll | 10 +- .../CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll | 6 +- .../CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll | 10 +- .../CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll | 10 +- .../CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll | 2 +- llvm/test/CodeGen/ARM/2009-07-01-CommuteBug.ll | 10 +- llvm/test/CodeGen/ARM/2009-07-18-RewriterBug.ll | 442 ++++++------ .../test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll | 2 +- .../test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll | 2 +- llvm/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll | 2 +- .../ARM/2009-08-02-RegScavengerAssert-Neon.ll | 8 +- .../ARM/2009-08-15-RegScavenger-EarlyClobber.ll | 2 +- llvm/test/CodeGen/ARM/2009-08-21-PostRAKill.ll | 8 +- llvm/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll | 16 +- llvm/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll | 2 +- llvm/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll | 4 +- llvm/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll | 6 +- .../test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll | 14 +- .../CodeGen/ARM/2009-09-23-LiveVariablesBug.ll | 2 +- llvm/test/CodeGen/ARM/2009-09-24-spill-align.ll | 2 +- llvm/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll | 4 +- llvm/test/CodeGen/ARM/2009-11-02-NegativeLane.ll | 2 +- .../CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll | 8 +- .../test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll | 4 +- .../CodeGen/ARM/2009-11-13-ScavengerAssert2.ll | 30 +- .../test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll | 22 +- llvm/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll | 16 +- llvm/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll | 6 +- llvm/test/CodeGen/ARM/2010-05-17-FastAllocCrash.ll | 6 +- .../test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll | 2 +- llvm/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll | 2 +- llvm/test/CodeGen/ARM/2010-05-19-Shuffles.ll | 2 +- llvm/test/CodeGen/ARM/2010-05-21-BuildVector.ll | 10 +- .../test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll | 2 +- .../test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll | 18 +- llvm/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll | 20 +- .../ARM/2010-06-25-Thumb2ITInvalidIterator.ll | 2 +- llvm/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll | 2 +- llvm/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll | 10 +- llvm/test/CodeGen/ARM/2010-08-04-EHCrash.ll | 12 +- llvm/test/CodeGen/ARM/2010-08-04-StackVariable.ll | 8 +- .../CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll | 24 +- llvm/test/CodeGen/ARM/2010-12-08-tpsoft.ll | 2 +- llvm/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll | 4 +- .../test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll | 10 +- llvm/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll | 10 +- .../test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll | 2 +- .../test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll | 6 +- llvm/test/CodeGen/ARM/2011-04-07-schediv.ll | 2 +- llvm/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll | 2 +- llvm/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll | 2 +- llvm/test/CodeGen/ARM/2011-04-26-SchedTweak.ll | 8 +- .../test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll | 10 +- llvm/test/CodeGen/ARM/2011-08-29-SchedCycle.ll | 2 +- llvm/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll | 2 +- .../CodeGen/ARM/2011-09-09-OddVectorDivision.ll | 8 +- llvm/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll | 2 +- .../ARM/2011-10-26-ExpandUnalignedLoadCrash.ll | 4 +- .../ARM/2011-11-07-PromoteVectorLoadStore.ll | 8 +- .../CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll | 2 +- .../ARM/2011-11-09-IllegalVectorFPIntConvert.ll | 8 +- llvm/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll | 14 +- llvm/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll | 16 +- .../CodeGen/ARM/2011-11-29-128bitArithmetics.ll | 22 +- llvm/test/CodeGen/ARM/2011-11-30-MergeAlignment.ll | 4 +- llvm/test/CodeGen/ARM/2011-12-14-machine-sink.ll | 6 +- llvm/test/CodeGen/ARM/2011-12-19-sjlj-clobber.ll | 4 +- llvm/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll | 14 +- .../CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll | 2 +- llvm/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll | 8 +- llvm/test/CodeGen/ARM/2012-02-01-CoalescerBug.ll | 2 +- llvm/test/CodeGen/ARM/2012-03-13-DAGCombineBug.ll | 2 +- .../test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll | 4 +- .../CodeGen/ARM/2012-08-04-DtripleSpillReload.ll | 2 +- .../CodeGen/ARM/2012-08-08-legalize-unaligned.ll | 2 +- llvm/test/CodeGen/ARM/2012-08-09-neon-extload.ll | 12 +- llvm/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll | 30 +- .../CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll | 2 +- .../CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll | 2 +- llvm/test/CodeGen/ARM/2013-01-21-PR14992.ll | 6 +- .../CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll | 10 +- .../CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll | 18 +- .../CodeGen/ARM/2013-05-31-char-shift-crash.ll | 2 +- .../CodeGen/ARM/2013-07-29-vector-or-combine.ll | 2 +- .../ARM/2014-01-09-pseudo_expand_implicit_reg.ll | 6 +- .../CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll | 16 +- llvm/test/CodeGen/ARM/MergeConsecutiveStores.ll | 18 +- .../CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll | 4 +- llvm/test/CodeGen/ARM/Windows/dllimport.ll | 4 +- llvm/test/CodeGen/ARM/Windows/frame-register.ll | 6 +- .../CodeGen/ARM/Windows/movw-movt-relocations.ll | 4 +- llvm/test/CodeGen/ARM/Windows/pic.ll | 2 +- .../CodeGen/ARM/Windows/stack-probe-non-default.ll | 2 +- llvm/test/CodeGen/ARM/Windows/vla.ll | 2 +- llvm/test/CodeGen/ARM/a15-partial-update.ll | 4 +- llvm/test/CodeGen/ARM/addrmode.ll | 4 +- llvm/test/CodeGen/ARM/aliases.ll | 6 +- llvm/test/CodeGen/ARM/alloc-no-stack-realign.ll | 8 +- llvm/test/CodeGen/ARM/arm-and-tst-peephole.ll | 6 +- llvm/test/CodeGen/ARM/arm-modifier.ll | 8 +- llvm/test/CodeGen/ARM/atomic-64bit.ll | 2 +- llvm/test/CodeGen/ARM/atomic-load-store.ll | 8 +- llvm/test/CodeGen/ARM/atomic-op.ll | 8 +- llvm/test/CodeGen/ARM/atomic-ops-v8.ll | 16 +- llvm/test/CodeGen/ARM/available_externally.ll | 2 +- llvm/test/CodeGen/ARM/avoid-cpsr-rmw.ll | 18 +- llvm/test/CodeGen/ARM/bfi.ll | 2 +- llvm/test/CodeGen/ARM/bfx.ll | 6 +- llvm/test/CodeGen/ARM/big-endian-neon-bitconv.ll | 96 +-- llvm/test/CodeGen/ARM/big-endian-neon-extend.ll | 14 +- .../CodeGen/ARM/big-endian-neon-trunc-store.ll | 4 +- llvm/test/CodeGen/ARM/big-endian-ret-f64.ll | 2 +- llvm/test/CodeGen/ARM/big-endian-vector-caller.ll | 168 ++--- llvm/test/CodeGen/ARM/bswap16.ll | 4 +- llvm/test/CodeGen/ARM/call-tc.ll | 6 +- llvm/test/CodeGen/ARM/call.ll | 6 +- llvm/test/CodeGen/ARM/call_nolink.ll | 20 +- llvm/test/CodeGen/ARM/coalesce-dbgvalue.ll | 6 +- llvm/test/CodeGen/ARM/coalesce-subregs.ll | 22 +- llvm/test/CodeGen/ARM/code-placement.ll | 4 +- llvm/test/CodeGen/ARM/commute-movcc.ll | 2 +- llvm/test/CodeGen/ARM/compare-call.ll | 4 +- llvm/test/CodeGen/ARM/copy-paired-reg.ll | 2 +- llvm/test/CodeGen/ARM/crash-greedy-v6.ll | 2 +- llvm/test/CodeGen/ARM/crash.ll | 2 +- llvm/test/CodeGen/ARM/cse-ldrlit.ll | 2 +- llvm/test/CodeGen/ARM/cse-libcalls.ll | 2 +- .../test/CodeGen/ARM/dagcombine-anyexttozeroext.ll | 4 +- llvm/test/CodeGen/ARM/debug-frame-large-stack.ll | 2 +- llvm/test/CodeGen/ARM/debug-frame-vararg.ll | 4 +- llvm/test/CodeGen/ARM/debug-info-blocks.ll | 26 +- llvm/test/CodeGen/ARM/divmod.ll | 4 +- llvm/test/CodeGen/ARM/dwarf-eh.ll | 8 +- llvm/test/CodeGen/ARM/dyn-stackalloc.ll | 4 +- llvm/test/CodeGen/ARM/emit-big-cst.ll | 2 +- llvm/test/CodeGen/ARM/extload-knownzero.ll | 2 +- llvm/test/CodeGen/ARM/extloadi1.ll | 2 +- llvm/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll | 8 +- llvm/test/CodeGen/ARM/fast-isel-align.ll | 10 +- llvm/test/CodeGen/ARM/fast-isel-call.ll | 2 +- llvm/test/CodeGen/ARM/fast-isel-fold.ll | 12 +- llvm/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll | 12 +- .../ARM/fast-isel-ldr-str-thumb-neg-index.ll | 18 +- llvm/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll | 22 +- .../CodeGen/ARM/fast-isel-load-store-verify.ll | 6 +- llvm/test/CodeGen/ARM/fast-isel-pic.ll | 4 +- llvm/test/CodeGen/ARM/fast-isel-pred.ll | 20 +- llvm/test/CodeGen/ARM/fast-isel-redefinition.ll | 2 +- llvm/test/CodeGen/ARM/fast-isel-static.ll | 10 +- llvm/test/CodeGen/ARM/fast-isel-vararg.ll | 12 +- llvm/test/CodeGen/ARM/fast-isel.ll | 12 +- .../CodeGen/ARM/fastisel-gep-promote-before-add.ll | 4 +- llvm/test/CodeGen/ARM/flag-crash.ll | 6 +- llvm/test/CodeGen/ARM/fnegs.ll | 4 +- llvm/test/CodeGen/ARM/fold-stack-adjust.ll | 6 +- llvm/test/CodeGen/ARM/fp.ll | 2 +- llvm/test/CodeGen/ARM/fp16.ll | 4 +- llvm/test/CodeGen/ARM/fpcmp-opt.ll | 8 +- llvm/test/CodeGen/ARM/fpmem.ll | 6 +- llvm/test/CodeGen/ARM/fptoint.ll | 4 +- llvm/test/CodeGen/ARM/frame-register.ll | 6 +- llvm/test/CodeGen/ARM/fusedMAC.ll | 2 +- llvm/test/CodeGen/ARM/ghc-tcreturn-lowered.ll | 2 +- llvm/test/CodeGen/ARM/global-merge-1.ll | 8 +- llvm/test/CodeGen/ARM/globals.ll | 2 +- llvm/test/CodeGen/ARM/gv-stubs-crash.ll | 2 +- llvm/test/CodeGen/ARM/half.ll | 8 +- llvm/test/CodeGen/ARM/hidden-vis-2.ll | 2 +- llvm/test/CodeGen/ARM/hidden-vis-3.ll | 4 +- llvm/test/CodeGen/ARM/ifconv-kills.ll | 4 +- llvm/test/CodeGen/ARM/ifconv-regmask.ll | 2 +- llvm/test/CodeGen/ARM/ifcvt-branch-weight.ll | 4 +- llvm/test/CodeGen/ARM/ifcvt11.ll | 6 +- llvm/test/CodeGen/ARM/ifcvt5.ll | 2 +- llvm/test/CodeGen/ARM/ifcvt7.ll | 6 +- llvm/test/CodeGen/ARM/illegal-vector-bitcast.ll | 4 +- llvm/test/CodeGen/ARM/indirectbr-2.ll | 6 +- llvm/test/CodeGen/ARM/indirectbr.ll | 4 +- llvm/test/CodeGen/ARM/inline-diagnostics.ll | 2 +- llvm/test/CodeGen/ARM/interrupt-attr.ll | 8 +- llvm/test/CodeGen/ARM/intrinsics-crypto.ll | 10 +- llvm/test/CodeGen/ARM/invoke-donothing-assert.ll | 2 +- llvm/test/CodeGen/ARM/isel-v8i32-crash.ll | 2 +- llvm/test/CodeGen/ARM/krait-cpu-div-attribute.ll | 4 +- llvm/test/CodeGen/ARM/large-stack.ll | 2 +- llvm/test/CodeGen/ARM/ldm.ll | 16 +- llvm/test/CodeGen/ARM/ldr.ll | 14 +- llvm/test/CodeGen/ARM/ldr_ext.ll | 10 +- llvm/test/CodeGen/ARM/ldr_frame.ll | 8 +- llvm/test/CodeGen/ARM/ldr_post.ll | 4 +- llvm/test/CodeGen/ARM/ldr_pre.ll | 4 +- llvm/test/CodeGen/ARM/ldrd-memoper.ll | 4 +- llvm/test/CodeGen/ARM/ldrd.ll | 12 +- llvm/test/CodeGen/ARM/ldst-f32-2-i32.ll | 2 +- llvm/test/CodeGen/ARM/ldstrex-m.ll | 6 +- llvm/test/CodeGen/ARM/ldstrex.ll | 6 +- llvm/test/CodeGen/ARM/load-global.ll | 2 +- llvm/test/CodeGen/ARM/load.ll | 8 +- llvm/test/CodeGen/ARM/load_i1_select.ll | 2 +- llvm/test/CodeGen/ARM/long.ll | 2 +- llvm/test/CodeGen/ARM/lsr-code-insertion.ll | 12 +- llvm/test/CodeGen/ARM/lsr-icmp-imm.ll | 2 +- llvm/test/CodeGen/ARM/lsr-unfolded-offset.ll | 8 +- llvm/test/CodeGen/ARM/machine-cse-cmp.ll | 4 +- llvm/test/CodeGen/ARM/machine-licm.ll | 4 +- llvm/test/CodeGen/ARM/minsize-litpools.ll | 4 +- llvm/test/CodeGen/ARM/misched-copy-arm.ll | 4 +- llvm/test/CodeGen/ARM/mult-alt-generic-arm.ll | 38 +- llvm/test/CodeGen/ARM/negative-offset.ll | 4 +- llvm/test/CodeGen/ARM/neon_cmp.ll | 4 +- llvm/test/CodeGen/ARM/neon_div.ll | 16 +- llvm/test/CodeGen/ARM/neon_fpconv.ll | 4 +- llvm/test/CodeGen/ARM/neon_ld1.ll | 8 +- llvm/test/CodeGen/ARM/neon_ld2.ll | 12 +- llvm/test/CodeGen/ARM/neon_spill.ll | 2 +- llvm/test/CodeGen/ARM/no-fpu.ll | 2 +- llvm/test/CodeGen/ARM/no-tail-call.ll | 4 +- llvm/test/CodeGen/ARM/none-macho.ll | 6 +- llvm/test/CodeGen/ARM/nop_concat_vectors.ll | 2 +- llvm/test/CodeGen/ARM/optselect-regclass.ll | 2 +- llvm/test/CodeGen/ARM/phi.ll | 2 +- llvm/test/CodeGen/ARM/popcnt.ll | 36 +- llvm/test/CodeGen/ARM/pr13249.ll | 4 +- llvm/test/CodeGen/ARM/pr18364-movw.ll | 4 +- llvm/test/CodeGen/ARM/pr3502.ll | 2 +- llvm/test/CodeGen/ARM/private.ll | 2 +- llvm/test/CodeGen/ARM/reg_sequence.ll | 12 +- llvm/test/CodeGen/ARM/saxpy10-a9.ll | 40 +- llvm/test/CodeGen/ARM/segmented-stacks.ll | 2 +- llvm/test/CodeGen/ARM/select_xform.ll | 2 +- llvm/test/CodeGen/ARM/shifter_operand.ll | 6 +- llvm/test/CodeGen/ARM/smul.ll | 2 +- llvm/test/CodeGen/ARM/space-directive.ll | 2 +- llvm/test/CodeGen/ARM/spill-q.ll | 2 +- llvm/test/CodeGen/ARM/ssp-data-layout.ll | 42 +- llvm/test/CodeGen/ARM/stack-alignment.ll | 60 +- llvm/test/CodeGen/ARM/str_post.ll | 4 +- llvm/test/CodeGen/ARM/str_pre-2.ll | 4 +- llvm/test/CodeGen/ARM/str_pre.ll | 4 +- llvm/test/CodeGen/ARM/struct-byval-frame-index.ll | 36 +- llvm/test/CodeGen/ARM/sub-cmp-peephole.ll | 4 +- llvm/test/CodeGen/ARM/swift-atomics.ll | 4 +- llvm/test/CodeGen/ARM/swift-vldm.ll | 10 +- llvm/test/CodeGen/ARM/tail-dup.ll | 8 +- llvm/test/CodeGen/ARM/test-sharedidx.ll | 16 +- llvm/test/CodeGen/ARM/thumb1-varalloc.ll | 2 +- llvm/test/CodeGen/ARM/thumb1_return_sequence.ll | 24 +- llvm/test/CodeGen/ARM/thumb_indirect_calls.ll | 2 +- llvm/test/CodeGen/ARM/tls1.ll | 2 +- llvm/test/CodeGen/ARM/tls2.ll | 2 +- llvm/test/CodeGen/ARM/tls3.ll | 2 +- llvm/test/CodeGen/ARM/trunc_ldr.ll | 4 +- llvm/test/CodeGen/ARM/truncstore-dag-combine.ll | 4 +- llvm/test/CodeGen/ARM/twoaddrinstr.ll | 2 +- llvm/test/CodeGen/ARM/uint64tof64.ll | 2 +- llvm/test/CodeGen/ARM/umulo-32.ll | 2 +- llvm/test/CodeGen/ARM/unaligned_load_store.ll | 8 +- .../CodeGen/ARM/unaligned_load_store_vector.ll | 54 +- llvm/test/CodeGen/ARM/undef-sext.ll | 2 +- llvm/test/CodeGen/ARM/vaba.ll | 108 +-- llvm/test/CodeGen/ARM/vabd.ll | 80 +-- llvm/test/CodeGen/ARM/vabs.ll | 28 +- llvm/test/CodeGen/ARM/vadd.ll | 100 +-- llvm/test/CodeGen/ARM/vargs_align.ll | 4 +- llvm/test/CodeGen/ARM/vbits.ll | 208 +++--- llvm/test/CodeGen/ARM/vbsl-constant.ll | 48 +- llvm/test/CodeGen/ARM/vbsl.ll | 48 +- llvm/test/CodeGen/ARM/vceq.ll | 34 +- llvm/test/CodeGen/ARM/vcge.ll | 68 +- llvm/test/CodeGen/ARM/vcgt.ll | 72 +- llvm/test/CodeGen/ARM/vcnt.ll | 28 +- llvm/test/CodeGen/ARM/vcombine.ll | 24 +- llvm/test/CodeGen/ARM/vcvt-cost.ll | 20 +- llvm/test/CodeGen/ARM/vcvt-v8.ll | 32 +- llvm/test/CodeGen/ARM/vcvt.ll | 36 +- llvm/test/CodeGen/ARM/vcvt_combine.ll | 12 +- llvm/test/CodeGen/ARM/vdiv_combine.ll | 12 +- llvm/test/CodeGen/ARM/vdup.ll | 16 +- llvm/test/CodeGen/ARM/vector-DAGCombine.ll | 32 +- llvm/test/CodeGen/ARM/vector-extend-narrow.ll | 8 +- llvm/test/CodeGen/ARM/vector-load.ll | 104 +-- llvm/test/CodeGen/ARM/vector-promotion.ll | 76 +- llvm/test/CodeGen/ARM/vector-spilling.ll | 8 +- llvm/test/CodeGen/ARM/vector-store.ll | 52 +- llvm/test/CodeGen/ARM/vext.ll | 50 +- llvm/test/CodeGen/ARM/vfcmp.ll | 44 +- llvm/test/CodeGen/ARM/vfp.ll | 36 +- llvm/test/CodeGen/ARM/vget_lane.ll | 44 +- llvm/test/CodeGen/ARM/vhadd.ll | 96 +-- llvm/test/CodeGen/ARM/vhsub.ll | 48 +- llvm/test/CodeGen/ARM/vicmp.ll | 40 +- llvm/test/CodeGen/ARM/vld1.ll | 6 +- llvm/test/CodeGen/ARM/vld2.ll | 4 +- llvm/test/CodeGen/ARM/vld3.ll | 4 +- llvm/test/CodeGen/ARM/vld4.ll | 4 +- llvm/test/CodeGen/ARM/vlddup.ll | 18 +- llvm/test/CodeGen/ARM/vldlane.ll | 90 +-- llvm/test/CodeGen/ARM/vldm-liveness.ll | 8 +- llvm/test/CodeGen/ARM/vldm-sched-a9.ll | 18 +- llvm/test/CodeGen/ARM/vminmax.ll | 112 +-- llvm/test/CodeGen/ARM/vminmaxnm.ll | 16 +- llvm/test/CodeGen/ARM/vmla.ll | 84 +-- llvm/test/CodeGen/ARM/vmls.ll | 84 +-- llvm/test/CodeGen/ARM/vmov.ll | 40 +- llvm/test/CodeGen/ARM/vmul.ll | 102 +-- llvm/test/CodeGen/ARM/vneg.ll | 28 +- llvm/test/CodeGen/ARM/vpadal.ll | 48 +- llvm/test/CodeGen/ARM/vpadd.ll | 44 +- llvm/test/CodeGen/ARM/vpminmax.ll | 56 +- llvm/test/CodeGen/ARM/vqadd.ll | 64 +- llvm/test/CodeGen/ARM/vqdmul.ll | 64 +- llvm/test/CodeGen/ARM/vqshl.ll | 176 ++--- llvm/test/CodeGen/ARM/vqshrn.ll | 36 +- llvm/test/CodeGen/ARM/vqsub.ll | 64 +- llvm/test/CodeGen/ARM/vrec.ll | 32 +- llvm/test/CodeGen/ARM/vrev.ll | 38 +- llvm/test/CodeGen/ARM/vselect_imax.ll | 24 +- llvm/test/CodeGen/ARM/vshift.ll | 144 ++-- llvm/test/CodeGen/ARM/vshiftins.ll | 64 +- llvm/test/CodeGen/ARM/vshl.ll | 208 +++--- llvm/test/CodeGen/ARM/vshll.ll | 24 +- llvm/test/CodeGen/ARM/vshrn.ll | 18 +- llvm/test/CodeGen/ARM/vsra.ll | 128 ++-- llvm/test/CodeGen/ARM/vst1.ll | 30 +- llvm/test/CodeGen/ARM/vst2.ll | 30 +- llvm/test/CodeGen/ARM/vst3.ll | 30 +- llvm/test/CodeGen/ARM/vst4.ll | 30 +- llvm/test/CodeGen/ARM/vstlane.ll | 78 +-- llvm/test/CodeGen/ARM/vsub.ll | 100 +-- llvm/test/CodeGen/ARM/vtbl.ll | 40 +- llvm/test/CodeGen/ARM/vtrn.ll | 40 +- llvm/test/CodeGen/ARM/vuzp.ll | 32 +- llvm/test/CodeGen/ARM/vzip.ll | 32 +- llvm/test/CodeGen/ARM/zextload_demandedbits.ll | 2 +- llvm/test/CodeGen/BPF/basictest.ll | 2 +- llvm/test/CodeGen/BPF/ex1.ll | 2 +- llvm/test/CodeGen/BPF/intrinsics.ll | 4 +- llvm/test/CodeGen/BPF/load.ll | 10 +- llvm/test/CodeGen/BPF/loops.ll | 10 +- llvm/test/CodeGen/BPF/struct_ret1.ll | 4 +- llvm/test/CodeGen/CPP/2009-05-01-Long-Double.ll | 2 +- llvm/test/CodeGen/CPP/2009-05-04-CondBr.ll | 10 +- llvm/test/CodeGen/Generic/2003-05-28-ManyArgs.ll | 96 +-- llvm/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll | 2 +- .../CodeGen/Generic/2003-07-29-BadConstSbyte.ll | 2 +- .../Generic/2004-05-09-LiveVarPartialRegister.ll | 4 +- .../CodeGen/Generic/2006-02-12-InsertLibcall.ll | 2 +- .../Generic/2006-03-01-dagcombineinfloop.ll | 22 +- llvm/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll | 4 +- .../Generic/2006-06-13-ComputeMaskedBitsCrash.ll | 4 +- .../Generic/2006-06-28-SimplifySetCCCrash.ll | 8 +- .../CodeGen/Generic/2006-09-02-LocalAllocCrash.ll | 14 +- .../CodeGen/Generic/2006-11-20-DAGCombineCrash.ll | 4 +- .../CodeGen/Generic/2007-01-15-LoadSelectCycle.ll | 4 +- .../CodeGen/Generic/2008-01-25-dag-combine-mul.ll | 12 +- llvm/test/CodeGen/Generic/2008-01-30-LoadCrash.ll | 2 +- llvm/test/CodeGen/Generic/2008-02-25-NegateZero.ll | 4 +- .../Generic/2009-03-29-SoftFloatVectorExtract.ll | 2 +- .../CodeGen/Generic/2009-04-28-i128-cmp-crash.ll | 4 +- .../CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll | 4 +- llvm/test/CodeGen/Generic/2012-06-08-APIntCrash.ll | 2 +- .../CodeGen/Generic/2014-02-05-OpaqueConstants.ll | 2 +- llvm/test/CodeGen/Generic/APIntLoadStore.ll | 512 +++++++------- llvm/test/CodeGen/Generic/badFoldGEP.ll | 2 +- llvm/test/CodeGen/Generic/builtin-expect.ll | 34 +- llvm/test/CodeGen/Generic/cast-fp.ll | 4 +- llvm/test/CodeGen/Generic/constindices.ll | 8 +- llvm/test/CodeGen/Generic/crash.ll | 8 +- llvm/test/CodeGen/Generic/dag-combine-crash.ll | 2 +- llvm/test/CodeGen/Generic/empty-load-store.ll | 4 +- llvm/test/CodeGen/Generic/empty-phi.ll | 2 +- llvm/test/CodeGen/Generic/fp-to-int-invalid.ll | 4 +- llvm/test/CodeGen/Generic/fwdtwice.ll | 2 +- llvm/test/CodeGen/Generic/global-ret0.ll | 2 +- .../test/CodeGen/Generic/inline-asm-mem-clobber.ll | 6 +- llvm/test/CodeGen/Generic/pr2625.ll | 4 +- llvm/test/CodeGen/Generic/print-arith-fp.ll | 4 +- llvm/test/CodeGen/Generic/print-arith-int.ll | 4 +- llvm/test/CodeGen/Generic/print-mul-exp.ll | 2 +- llvm/test/CodeGen/Generic/print-mul.ll | 4 +- llvm/test/CodeGen/Generic/print-shift.ll | 4 +- llvm/test/CodeGen/Generic/select.ll | 6 +- llvm/test/CodeGen/Generic/undef-phi.ll | 4 +- llvm/test/CodeGen/Generic/v-split.ll | 4 +- llvm/test/CodeGen/Generic/vector-casts.ll | 14 +- .../CodeGen/Generic/vector-identity-shuffle.ll | 2 +- llvm/test/CodeGen/Generic/vector.ll | 42 +- llvm/test/CodeGen/Hexagon/BranchPredict.ll | 2 +- llvm/test/CodeGen/Hexagon/absaddr-store.ll | 6 +- llvm/test/CodeGen/Hexagon/absimm.ll | 2 +- llvm/test/CodeGen/Hexagon/always-ext.ll | 4 +- llvm/test/CodeGen/Hexagon/block-addr.ll | 14 +- llvm/test/CodeGen/Hexagon/cext-check.ll | 10 +- llvm/test/CodeGen/Hexagon/cext-valid-packet2.ll | 10 +- llvm/test/CodeGen/Hexagon/cmp_pred2.ll | 8 +- llvm/test/CodeGen/Hexagon/cmpb_pred.ll | 6 +- llvm/test/CodeGen/Hexagon/combine.ll | 4 +- llvm/test/CodeGen/Hexagon/combine_ir.ll | 10 +- llvm/test/CodeGen/Hexagon/convertdptoint.ll | 8 +- llvm/test/CodeGen/Hexagon/convertdptoll.ll | 8 +- llvm/test/CodeGen/Hexagon/convertsptoint.ll | 8 +- llvm/test/CodeGen/Hexagon/convertsptoll.ll | 8 +- llvm/test/CodeGen/Hexagon/dadd.ll | 4 +- llvm/test/CodeGen/Hexagon/dmul.ll | 4 +- llvm/test/CodeGen/Hexagon/double.ll | 10 +- .../CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll | 8 +- llvm/test/CodeGen/Hexagon/dsub.ll | 4 +- llvm/test/CodeGen/Hexagon/extload-combine.ll | 12 +- llvm/test/CodeGen/Hexagon/fadd.ll | 4 +- llvm/test/CodeGen/Hexagon/fcmp.ll | 6 +- llvm/test/CodeGen/Hexagon/float.ll | 10 +- .../CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll | 10 +- llvm/test/CodeGen/Hexagon/fmul.ll | 4 +- llvm/test/CodeGen/Hexagon/frame.ll | 10 +- llvm/test/CodeGen/Hexagon/fsub.ll | 4 +- llvm/test/CodeGen/Hexagon/fusedandshift.ll | 2 +- llvm/test/CodeGen/Hexagon/gp-plus-offset-load.ll | 6 +- llvm/test/CodeGen/Hexagon/gp-rel.ll | 6 +- llvm/test/CodeGen/Hexagon/hwloop-cleanup.ll | 4 +- llvm/test/CodeGen/Hexagon/hwloop-dbg.ll | 2 +- llvm/test/CodeGen/Hexagon/hwloop-le.ll | 30 +- llvm/test/CodeGen/Hexagon/hwloop-lt.ll | 30 +- llvm/test/CodeGen/Hexagon/hwloop-ne.ll | 30 +- llvm/test/CodeGen/Hexagon/i16_VarArg.ll | 4 +- llvm/test/CodeGen/Hexagon/i1_VarArg.ll | 4 +- llvm/test/CodeGen/Hexagon/i8_VarArg.ll | 4 +- .../CodeGen/Hexagon/idxload-with-zero-offset.ll | 12 +- llvm/test/CodeGen/Hexagon/macint.ll | 2 +- llvm/test/CodeGen/Hexagon/memops.ll | 252 +++---- llvm/test/CodeGen/Hexagon/memops1.ll | 10 +- llvm/test/CodeGen/Hexagon/memops2.ll | 4 +- llvm/test/CodeGen/Hexagon/memops3.ll | 4 +- llvm/test/CodeGen/Hexagon/misaligned-access.ll | 4 +- llvm/test/CodeGen/Hexagon/mpy.ll | 6 +- llvm/test/CodeGen/Hexagon/newvaluejump.ll | 4 +- llvm/test/CodeGen/Hexagon/newvaluejump2.ll | 4 +- llvm/test/CodeGen/Hexagon/newvaluestore.ll | 6 +- llvm/test/CodeGen/Hexagon/opt-fabs.ll | 2 +- llvm/test/CodeGen/Hexagon/opt-fneg.ll | 2 +- llvm/test/CodeGen/Hexagon/postinc-load.ll | 4 +- llvm/test/CodeGen/Hexagon/postinc-store.ll | 4 +- llvm/test/CodeGen/Hexagon/pred-gp.ll | 4 +- llvm/test/CodeGen/Hexagon/pred-instrs.ll | 2 +- llvm/test/CodeGen/Hexagon/remove_lsr.ll | 6 +- llvm/test/CodeGen/Hexagon/static.ll | 6 +- llvm/test/CodeGen/Hexagon/struct_args.ll | 2 +- llvm/test/CodeGen/Hexagon/tfr-to-combine.ll | 2 +- llvm/test/CodeGen/Hexagon/union-1.ll | 4 +- llvm/test/CodeGen/Hexagon/vaddh.ll | 4 +- llvm/test/CodeGen/Hexagon/validate-offset.ll | 14 +- llvm/test/CodeGen/Hexagon/zextloadi1.ll | 4 +- llvm/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll | 2 +- llvm/test/CodeGen/MSP430/2009-05-17-Rot.ll | 6 +- llvm/test/CodeGen/MSP430/2009-05-17-Shift.ll | 4 +- .../CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll | 4 +- .../test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll | 8 +- llvm/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll | 2 +- .../test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll | 4 +- llvm/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll | 2 +- llvm/test/CodeGen/MSP430/AddrMode-bis-rx.ll | 14 +- llvm/test/CodeGen/MSP430/AddrMode-bis-xr.ll | 14 +- llvm/test/CodeGen/MSP430/AddrMode-mov-rx.ll | 14 +- llvm/test/CodeGen/MSP430/Inst16mi.ll | 8 +- llvm/test/CodeGen/MSP430/Inst16mm.ll | 22 +- llvm/test/CodeGen/MSP430/Inst16mr.ll | 10 +- llvm/test/CodeGen/MSP430/Inst16rm.ll | 10 +- llvm/test/CodeGen/MSP430/Inst8mi.ll | 8 +- llvm/test/CodeGen/MSP430/Inst8mm.ll | 18 +- llvm/test/CodeGen/MSP430/Inst8mr.ll | 10 +- llvm/test/CodeGen/MSP430/Inst8rm.ll | 10 +- llvm/test/CodeGen/MSP430/bit.ll | 24 +- llvm/test/CodeGen/MSP430/byval.ll | 2 +- llvm/test/CodeGen/MSP430/indirectbr.ll | 4 +- llvm/test/CodeGen/MSP430/indirectbr2.ll | 2 +- llvm/test/CodeGen/MSP430/inline-asm.ll | 2 +- llvm/test/CodeGen/MSP430/jumptable.ll | 4 +- llvm/test/CodeGen/MSP430/memset.ll | 2 +- llvm/test/CodeGen/MSP430/misched-msp430.ll | 2 +- .../test/CodeGen/MSP430/mult-alt-generic-msp430.ll | 38 +- llvm/test/CodeGen/MSP430/postinc.ll | 10 +- llvm/test/CodeGen/Mips/2008-07-15-SmallSection.ll | 4 +- llvm/test/CodeGen/Mips/2008-08-01-AsmInline.ll | 10 +- llvm/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll | 4 +- llvm/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll | 2 +- llvm/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll | 14 +- llvm/test/CodeGen/Mips/2010-07-20-Switch.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/br1.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/callabi.ll | 8 +- llvm/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll | 48 +- llvm/test/CodeGen/Mips/Fast-ISel/fpext.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/fpintconv.ll | 4 +- llvm/test/CodeGen/Mips/Fast-ISel/fptrunc.ll | 2 +- llvm/test/CodeGen/Mips/Fast-ISel/icmpa.ll | 40 +- llvm/test/CodeGen/Mips/Fast-ISel/loadstore2.ll | 10 +- llvm/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll | 30 +- llvm/test/CodeGen/Mips/Fast-ISel/overflt.ll | 6 +- llvm/test/CodeGen/Mips/Fast-ISel/retabi.ll | 10 +- llvm/test/CodeGen/Mips/Fast-ISel/shift.ll | 2 +- llvm/test/CodeGen/Mips/addi.ll | 8 +- llvm/test/CodeGen/Mips/addressing-mode.ll | 4 +- llvm/test/CodeGen/Mips/align16.ll | 4 +- llvm/test/CodeGen/Mips/alloca.ll | 14 +- llvm/test/CodeGen/Mips/alloca16.ll | 44 +- llvm/test/CodeGen/Mips/and1.ll | 4 +- llvm/test/CodeGen/Mips/atomic.ll | 4 +- llvm/test/CodeGen/Mips/atomicops.ll | 6 +- llvm/test/CodeGen/Mips/beqzc.ll | 2 +- llvm/test/CodeGen/Mips/beqzc1.ll | 2 +- llvm/test/CodeGen/Mips/biggot.ll | 2 +- llvm/test/CodeGen/Mips/brconeq.ll | 4 +- llvm/test/CodeGen/Mips/brconeqk.ll | 2 +- llvm/test/CodeGen/Mips/brconeqz.ll | 2 +- llvm/test/CodeGen/Mips/brconge.ll | 6 +- llvm/test/CodeGen/Mips/brcongt.ll | 4 +- llvm/test/CodeGen/Mips/brconle.ll | 6 +- llvm/test/CodeGen/Mips/brconlt.ll | 4 +- llvm/test/CodeGen/Mips/brconne.ll | 4 +- llvm/test/CodeGen/Mips/brconnek.ll | 2 +- llvm/test/CodeGen/Mips/brconnez.ll | 2 +- llvm/test/CodeGen/Mips/brdelayslot.ll | 12 +- llvm/test/CodeGen/Mips/brind.ll | 2 +- .../cconv/arguments-varargs-small-structs-byte.ll | 38 +- ...arguments-varargs-small-structs-combinations.ll | 20 +- ...rguments-varargs-small-structs-multiple-args.ll | 36 +- llvm/test/CodeGen/Mips/cconv/return-float.ll | 4 +- llvm/test/CodeGen/Mips/cconv/return-hard-float.ll | 6 +- llvm/test/CodeGen/Mips/cconv/return-hard-fp128.ll | 2 +- .../CodeGen/Mips/cconv/return-hard-struct-f128.ll | 2 +- llvm/test/CodeGen/Mips/cconv/return-struct.ll | 8 +- llvm/test/CodeGen/Mips/cconv/return.ll | 6 +- llvm/test/CodeGen/Mips/cfi_offset.ll | 4 +- llvm/test/CodeGen/Mips/ci2.ll | 2 +- llvm/test/CodeGen/Mips/cmov.ll | 6 +- llvm/test/CodeGen/Mips/cmplarge.ll | 4 +- llvm/test/CodeGen/Mips/const4a.ll | 2 +- llvm/test/CodeGen/Mips/ctlz.ll | 2 +- llvm/test/CodeGen/Mips/disable-tail-merge.ll | 6 +- llvm/test/CodeGen/Mips/div.ll | 4 +- llvm/test/CodeGen/Mips/div_rem.ll | 4 +- llvm/test/CodeGen/Mips/divrem.ll | 4 +- llvm/test/CodeGen/Mips/divu.ll | 4 +- llvm/test/CodeGen/Mips/divu_remu.ll | 4 +- llvm/test/CodeGen/Mips/dsp-patterns.ll | 6 +- llvm/test/CodeGen/Mips/dsp-vec-load-store.ll | 2 +- llvm/test/CodeGen/Mips/eh.ll | 2 +- llvm/test/CodeGen/Mips/emit-big-cst.ll | 2 +- llvm/test/CodeGen/Mips/ex2.ll | 2 +- llvm/test/CodeGen/Mips/extins.ll | 2 +- llvm/test/CodeGen/Mips/f16abs.ll | 4 +- llvm/test/CodeGen/Mips/fastcc.ll | 120 ++-- llvm/test/CodeGen/Mips/fixdfsf.ll | 2 +- llvm/test/CodeGen/Mips/fp-indexed-ls.ll | 18 +- llvm/test/CodeGen/Mips/fp-spill-reload.ll | 16 +- llvm/test/CodeGen/Mips/fp16instrinsmc.ll | 60 +- llvm/test/CodeGen/Mips/fp16static.ll | 4 +- llvm/test/CodeGen/Mips/fpneeded.ll | 6 +- llvm/test/CodeGen/Mips/fpnotneeded.ll | 2 +- llvm/test/CodeGen/Mips/global-address.ll | 4 +- llvm/test/CodeGen/Mips/gpreg-lazy-binding.ll | 2 +- llvm/test/CodeGen/Mips/gprestore.ll | 6 +- llvm/test/CodeGen/Mips/hf16_1.ll | 80 +-- llvm/test/CodeGen/Mips/hf16call32.ll | 530 +++++++------- llvm/test/CodeGen/Mips/hf16call32_body.ll | 54 +- llvm/test/CodeGen/Mips/hf1_body.ll | 2 +- llvm/test/CodeGen/Mips/hfptrcall.ll | 32 +- .../CodeGen/Mips/inlineasm-assembler-directives.ll | 2 +- llvm/test/CodeGen/Mips/inlineasm-operand-code.ll | 6 +- llvm/test/CodeGen/Mips/inlineasm64.ll | 4 +- llvm/test/CodeGen/Mips/internalfunc.ll | 4 +- llvm/test/CodeGen/Mips/jtstat.ll | 2 +- llvm/test/CodeGen/Mips/l3mc.ll | 32 +- llvm/test/CodeGen/Mips/lb1.ll | 4 +- llvm/test/CodeGen/Mips/lbu1.ll | 4 +- llvm/test/CodeGen/Mips/lcb2.ll | 16 +- llvm/test/CodeGen/Mips/lcb3c.ll | 4 +- llvm/test/CodeGen/Mips/lcb4a.ll | 4 +- llvm/test/CodeGen/Mips/lcb5.ll | 32 +- llvm/test/CodeGen/Mips/lh1.ll | 4 +- llvm/test/CodeGen/Mips/lhu1.ll | 4 +- llvm/test/CodeGen/Mips/llcarry.ll | 10 +- llvm/test/CodeGen/Mips/load-store-left-right.ll | 14 +- llvm/test/CodeGen/Mips/machineverifier.ll | 2 +- llvm/test/CodeGen/Mips/mbrsize4a.ll | 2 +- llvm/test/CodeGen/Mips/micromips-addiu.ll | 6 +- llvm/test/CodeGen/Mips/micromips-and16.ll | 4 +- llvm/test/CodeGen/Mips/micromips-andi.ll | 4 +- .../CodeGen/Mips/micromips-compact-branches.ll | 2 +- llvm/test/CodeGen/Mips/micromips-delay-slot-jr.ll | 2 +- llvm/test/CodeGen/Mips/micromips-delay-slot.ll | 2 +- llvm/test/CodeGen/Mips/micromips-gp-rc.ll | 2 +- llvm/test/CodeGen/Mips/micromips-jal.ll | 10 +- .../Mips/micromips-load-effective-address.ll | 8 +- llvm/test/CodeGen/Mips/micromips-or16.ll | 4 +- .../CodeGen/Mips/micromips-rdhwr-directives.ll | 2 +- llvm/test/CodeGen/Mips/micromips-shift.ll | 8 +- llvm/test/CodeGen/Mips/micromips-sw-lw-16.ll | 12 +- llvm/test/CodeGen/Mips/micromips-xor16.ll | 4 +- llvm/test/CodeGen/Mips/mips16_32_8.ll | 10 +- llvm/test/CodeGen/Mips/mips16_fpret.ll | 16 +- llvm/test/CodeGen/Mips/mips16ex.ll | 12 +- llvm/test/CodeGen/Mips/mips16fpe.ll | 112 +-- llvm/test/CodeGen/Mips/mips64-f128-call.ll | 4 +- llvm/test/CodeGen/Mips/mips64-f128.ll | 72 +- llvm/test/CodeGen/Mips/mips64directive.ll | 2 +- llvm/test/CodeGen/Mips/mips64fpldst.ll | 8 +- llvm/test/CodeGen/Mips/mips64instrs.ll | 8 +- llvm/test/CodeGen/Mips/mips64intldst.ll | 22 +- llvm/test/CodeGen/Mips/mips64sinttofpsf.ll | 2 +- llvm/test/CodeGen/Mips/mipslopat.ll | 4 +- llvm/test/CodeGen/Mips/misha.ll | 8 +- llvm/test/CodeGen/Mips/mno-ldc1-sdc1.ll | 4 +- llvm/test/CodeGen/Mips/msa/2r.ll | 24 +- llvm/test/CodeGen/Mips/msa/2r_vector_scalar.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf.ll | 32 +- llvm/test/CodeGen/Mips/msa/2rf_exup.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_float_int.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_fq.ll | 8 +- llvm/test/CodeGen/Mips/msa/2rf_int_float.ll | 20 +- llvm/test/CodeGen/Mips/msa/2rf_tq.ll | 8 +- llvm/test/CodeGen/Mips/msa/3r-a.ll | 192 +++--- llvm/test/CodeGen/Mips/msa/3r-b.ll | 96 +-- llvm/test/CodeGen/Mips/msa/3r-c.ll | 80 +-- llvm/test/CodeGen/Mips/msa/3r-d.ll | 88 +-- llvm/test/CodeGen/Mips/msa/3r-i.ll | 64 +- llvm/test/CodeGen/Mips/msa/3r-m.ll | 160 ++--- llvm/test/CodeGen/Mips/msa/3r-p.ll | 32 +- llvm/test/CodeGen/Mips/msa/3r-s.ll | 248 +++---- llvm/test/CodeGen/Mips/msa/3r-v.ll | 24 +- llvm/test/CodeGen/Mips/msa/3r_4r.ll | 48 +- llvm/test/CodeGen/Mips/msa/3r_4r_widen.ll | 72 +- llvm/test/CodeGen/Mips/msa/3r_splat.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf.ll | 96 +-- llvm/test/CodeGen/Mips/msa/3rf_4rf.ll | 24 +- llvm/test/CodeGen/Mips/msa/3rf_4rf_q.ll | 48 +- llvm/test/CodeGen/Mips/msa/3rf_exdo.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf_float_int.ll | 8 +- llvm/test/CodeGen/Mips/msa/3rf_int_float.ll | 176 ++--- llvm/test/CodeGen/Mips/msa/3rf_q.ll | 16 +- llvm/test/CodeGen/Mips/msa/arithmetic.ll | 176 ++--- llvm/test/CodeGen/Mips/msa/arithmetic_float.ll | 88 +-- llvm/test/CodeGen/Mips/msa/basic_operations.ll | 72 +- .../CodeGen/Mips/msa/basic_operations_float.ll | 34 +- llvm/test/CodeGen/Mips/msa/bit.ll | 56 +- llvm/test/CodeGen/Mips/msa/bitcast.ll | 98 +-- llvm/test/CodeGen/Mips/msa/bitwise.ll | 310 ++++----- llvm/test/CodeGen/Mips/msa/compare.ll | 408 +++++------ llvm/test/CodeGen/Mips/msa/compare_float.ll | 156 ++--- llvm/test/CodeGen/Mips/msa/elm_copy.ll | 16 +- llvm/test/CodeGen/Mips/msa/elm_insv.ll | 32 +- llvm/test/CodeGen/Mips/msa/elm_move.ll | 2 +- llvm/test/CodeGen/Mips/msa/elm_shift_slide.ll | 24 +- llvm/test/CodeGen/Mips/msa/frameindex.ll | 46 +- llvm/test/CodeGen/Mips/msa/i10.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5-a.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5-b.ll | 56 +- llvm/test/CodeGen/Mips/msa/i5-c.ll | 40 +- llvm/test/CodeGen/Mips/msa/i5-m.ll | 32 +- llvm/test/CodeGen/Mips/msa/i5-s.ll | 8 +- llvm/test/CodeGen/Mips/msa/i5_ld_st.ll | 8 +- llvm/test/CodeGen/Mips/msa/i8.ll | 26 +- llvm/test/CodeGen/Mips/msa/inline-asm.ll | 4 +- .../CodeGen/Mips/msa/llvm-stress-s1704963983.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s1935737938.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s2704903805.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s3861334421.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s3926023935.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s3997499501.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s525530439.ll | 22 +- .../CodeGen/Mips/msa/llvm-stress-s997348632.ll | 22 +- llvm/test/CodeGen/Mips/msa/shuffle.ll | 166 ++--- llvm/test/CodeGen/Mips/msa/spill.ll | 272 ++++---- llvm/test/CodeGen/Mips/msa/vec.ll | 184 ++--- llvm/test/CodeGen/Mips/msa/vecs10.ll | 4 +- llvm/test/CodeGen/Mips/mul.ll | 4 +- llvm/test/CodeGen/Mips/mulll.ll | 4 +- llvm/test/CodeGen/Mips/mulull.ll | 4 +- llvm/test/CodeGen/Mips/nacl-align.ll | 2 +- llvm/test/CodeGen/Mips/nacl-branch-delay.ll | 2 +- llvm/test/CodeGen/Mips/nacl-reserved-regs.ll | 32 +- llvm/test/CodeGen/Mips/neg1.ll | 2 +- llvm/test/CodeGen/Mips/no-odd-spreg-msa.ll | 8 +- llvm/test/CodeGen/Mips/nomips16.ll | 4 +- llvm/test/CodeGen/Mips/not1.ll | 2 +- llvm/test/CodeGen/Mips/o32_cc_byval.ll | 22 +- llvm/test/CodeGen/Mips/o32_cc_vararg.ll | 20 +- llvm/test/CodeGen/Mips/optimize-pic-o0.ll | 6 +- llvm/test/CodeGen/Mips/or1.ll | 4 +- llvm/test/CodeGen/Mips/prevent-hoisting.ll | 12 +- llvm/test/CodeGen/Mips/private.ll | 2 +- llvm/test/CodeGen/Mips/ra-allocatable.ll | 242 +++---- llvm/test/CodeGen/Mips/rdhwr-directives.ll | 2 +- llvm/test/CodeGen/Mips/rem.ll | 4 +- llvm/test/CodeGen/Mips/remu.ll | 4 +- llvm/test/CodeGen/Mips/s2rem.ll | 4 +- llvm/test/CodeGen/Mips/sb1.ll | 6 +- llvm/test/CodeGen/Mips/sel1c.ll | 4 +- llvm/test/CodeGen/Mips/sel2c.ll | 4 +- llvm/test/CodeGen/Mips/selTBteqzCmpi.ll | 6 +- llvm/test/CodeGen/Mips/selTBtnezCmpi.ll | 6 +- llvm/test/CodeGen/Mips/selTBtnezSlti.ll | 6 +- llvm/test/CodeGen/Mips/select.ll | 12 +- llvm/test/CodeGen/Mips/seleq.ll | 32 +- llvm/test/CodeGen/Mips/seleqk.ll | 24 +- llvm/test/CodeGen/Mips/selgek.ll | 24 +- llvm/test/CodeGen/Mips/selgt.ll | 34 +- llvm/test/CodeGen/Mips/selle.ll | 32 +- llvm/test/CodeGen/Mips/selltk.ll | 24 +- llvm/test/CodeGen/Mips/selne.ll | 32 +- llvm/test/CodeGen/Mips/selnek.ll | 32 +- llvm/test/CodeGen/Mips/selpat.ll | 136 ++-- llvm/test/CodeGen/Mips/seteq.ll | 4 +- llvm/test/CodeGen/Mips/seteqz.ll | 4 +- llvm/test/CodeGen/Mips/setge.ll | 6 +- llvm/test/CodeGen/Mips/setgek.ll | 2 +- llvm/test/CodeGen/Mips/setle.ll | 6 +- llvm/test/CodeGen/Mips/setlt.ll | 4 +- llvm/test/CodeGen/Mips/setltk.ll | 2 +- llvm/test/CodeGen/Mips/setne.ll | 4 +- llvm/test/CodeGen/Mips/setuge.ll | 6 +- llvm/test/CodeGen/Mips/setugt.ll | 4 +- llvm/test/CodeGen/Mips/setule.ll | 6 +- llvm/test/CodeGen/Mips/setult.ll | 4 +- llvm/test/CodeGen/Mips/setultk.ll | 2 +- llvm/test/CodeGen/Mips/sh1.ll | 6 +- llvm/test/CodeGen/Mips/simplebr.ll | 2 +- llvm/test/CodeGen/Mips/sitofp-selectcc-opt.ll | 2 +- llvm/test/CodeGen/Mips/sll1.ll | 4 +- llvm/test/CodeGen/Mips/sll2.ll | 6 +- llvm/test/CodeGen/Mips/small-section-reserve-gp.ll | 2 +- llvm/test/CodeGen/Mips/spill-copy-acreg.ll | 6 +- llvm/test/CodeGen/Mips/sra1.ll | 2 +- llvm/test/CodeGen/Mips/sra2.ll | 4 +- llvm/test/CodeGen/Mips/srl1.ll | 4 +- llvm/test/CodeGen/Mips/srl2.ll | 6 +- llvm/test/CodeGen/Mips/stackcoloring.ll | 4 +- llvm/test/CodeGen/Mips/stchar.ll | 28 +- llvm/test/CodeGen/Mips/stldst.ll | 16 +- llvm/test/CodeGen/Mips/sub1.ll | 2 +- llvm/test/CodeGen/Mips/sub2.ll | 4 +- llvm/test/CodeGen/Mips/tailcall.ll | 20 +- llvm/test/CodeGen/Mips/tls.ll | 6 +- llvm/test/CodeGen/Mips/tls16.ll | 2 +- llvm/test/CodeGen/Mips/tls16_2.ll | 2 +- llvm/test/CodeGen/Mips/uitofp.ll | 2 +- llvm/test/CodeGen/Mips/vector-load-store.ll | 4 +- llvm/test/CodeGen/Mips/vector-setcc.ll | 4 +- llvm/test/CodeGen/Mips/xor1.ll | 4 +- llvm/test/CodeGen/Mips/zeroreg.ll | 8 +- llvm/test/CodeGen/NVPTX/access-non-generic.ll | 14 +- llvm/test/CodeGen/NVPTX/addrspacecast.ll | 16 +- llvm/test/CodeGen/NVPTX/bug21465.ll | 2 +- llvm/test/CodeGen/NVPTX/bug22322.ll | 2 +- llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll | 8 +- llvm/test/CodeGen/NVPTX/fp16.ll | 8 +- llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll | 4 +- llvm/test/CodeGen/NVPTX/half.ll | 14 +- llvm/test/CodeGen/NVPTX/i1-global.ll | 2 +- llvm/test/CodeGen/NVPTX/i8-param.ll | 2 +- llvm/test/CodeGen/NVPTX/ld-addrspace.ll | 36 +- llvm/test/CodeGen/NVPTX/ld-generic.ll | 12 +- llvm/test/CodeGen/NVPTX/load-sext-i1.ll | 2 +- llvm/test/CodeGen/NVPTX/machine-sink.ll | 4 +- llvm/test/CodeGen/NVPTX/misaligned-vector-ldst.ll | 8 +- llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll | 24 +- llvm/test/CodeGen/NVPTX/nounroll.ll | 2 +- llvm/test/CodeGen/NVPTX/pr13291-i1-store.ll | 2 +- llvm/test/CodeGen/NVPTX/pr16278.ll | 2 +- llvm/test/CodeGen/NVPTX/refl1.ll | 2 +- llvm/test/CodeGen/NVPTX/sched1.ll | 8 +- llvm/test/CodeGen/NVPTX/sched2.ll | 8 +- llvm/test/CodeGen/NVPTX/shift-parts.ll | 8 +- llvm/test/CodeGen/NVPTX/simple-call.ll | 2 +- llvm/test/CodeGen/NVPTX/vector-compare.ll | 4 +- llvm/test/CodeGen/NVPTX/vector-loads.ll | 12 +- llvm/test/CodeGen/NVPTX/vector-select.ll | 6 +- llvm/test/CodeGen/NVPTX/weak-global.ll | 2 +- .../CodeGen/PowerPC/2005-11-30-vastart-crash.ll | 2 +- .../CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll | 4 +- llvm/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll | 2 +- .../CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll | 14 +- .../PowerPC/2006-07-07-ComputeMaskedBits.ll | 8 +- .../CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll | 2 +- .../CodeGen/PowerPC/2006-08-15-SelectionCrash.ll | 2 +- .../test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll | 2 +- llvm/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll | 10 +- llvm/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll | 2 +- .../CodeGen/PowerPC/2007-03-30-SpillerCrash.ll | 242 +++---- .../PowerPC/2007-04-30-InlineAsmEarlyClobber.ll | 2 +- .../test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll | 8 +- .../PowerPC/2007-09-07-LoadStoreIdxForms.ll | 2 +- llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll | 12 +- .../CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll | 4 +- .../PowerPC/2007-10-21-LocalRegAllocAssert.ll | 4 +- .../PowerPC/2007-10-21-LocalRegAllocAssert2.ll | 8 +- .../CodeGen/PowerPC/2007-11-16-landingpad-split.ll | 2 +- .../CodeGen/PowerPC/2007-11-19-VectorSplitting.ll | 2 +- .../PowerPC/2008-02-09-LocalRegAllocAssert.ll | 2 +- .../PowerPC/2008-03-05-RegScavengerAssert.ll | 2 +- .../PowerPC/2008-03-17-RegScavengerCrash.ll | 2 +- .../CodeGen/PowerPC/2008-03-24-AddressRegImm.ll | 2 +- .../CodeGen/PowerPC/2008-03-26-CoalescerBug.ll | 2 +- .../CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll | 4 +- .../CodeGen/PowerPC/2008-06-21-F128LoadStore.ll | 2 +- .../PowerPC/2008-06-23-LiveVariablesCrash.ll | 2 +- llvm/test/CodeGen/PowerPC/2008-07-15-Bswap.ll | 40 +- .../CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll | 2 +- .../CodeGen/PowerPC/2008-09-12-CoalescerBug.ll | 46 +- .../CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll | 2 +- .../CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll | 10 +- .../2009-08-17-inline-asm-addr-mode-breakage.ll | 2 +- .../CodeGen/PowerPC/2010-03-09-indirect-call.ll | 2 +- .../CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll | 4 +- .../CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll | 20 +- .../PowerPC/2011-12-06-SpillAndRestoreCR.ll | 34 +- .../PowerPC/2011-12-08-DemandedBitsMiscompile.ll | 2 +- llvm/test/CodeGen/PowerPC/Atomics-64.ll | 160 ++--- llvm/test/CodeGen/PowerPC/a2-fp-basic.ll | 12 +- llvm/test/CodeGen/PowerPC/addi-licm.ll | 4 +- llvm/test/CodeGen/PowerPC/addi-reassoc.ll | 4 +- llvm/test/CodeGen/PowerPC/alias.ll | 4 +- llvm/test/CodeGen/PowerPC/and-elim.ll | 2 +- llvm/test/CodeGen/PowerPC/anon_aggr.ll | 8 +- llvm/test/CodeGen/PowerPC/asm-constraints.ll | 2 +- llvm/test/CodeGen/PowerPC/atomic-2.ll | 2 +- llvm/test/CodeGen/PowerPC/atomics-indexed.ll | 8 +- llvm/test/CodeGen/PowerPC/atomics.ll | 8 +- llvm/test/CodeGen/PowerPC/bdzlr.ll | 4 +- llvm/test/CodeGen/PowerPC/bswap-load-store.ll | 6 +- llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll | 4 +- llvm/test/CodeGen/PowerPC/byval-aliased.ll | 2 +- llvm/test/CodeGen/PowerPC/code-align.ll | 6 +- llvm/test/CodeGen/PowerPC/complex-return.ll | 12 +- llvm/test/CodeGen/PowerPC/cr-spills.ll | 94 +-- llvm/test/CodeGen/PowerPC/crbits.ll | 2 +- llvm/test/CodeGen/PowerPC/crsave.ll | 4 +- llvm/test/CodeGen/PowerPC/ctrloop-cpsgn.ll | 2 +- llvm/test/CodeGen/PowerPC/ctrloop-fp64.ll | 4 +- llvm/test/CodeGen/PowerPC/ctrloop-i64.ll | 8 +- llvm/test/CodeGen/PowerPC/ctrloop-le.ll | 30 +- llvm/test/CodeGen/PowerPC/ctrloop-lt.ll | 30 +- llvm/test/CodeGen/PowerPC/ctrloop-ne.ll | 30 +- llvm/test/CodeGen/PowerPC/ctrloop-s000.ll | 32 +- llvm/test/CodeGen/PowerPC/ctrloop-sh.ll | 12 +- llvm/test/CodeGen/PowerPC/ctrloop-sums.ll | 4 +- llvm/test/CodeGen/PowerPC/ctrloops.ll | 6 +- llvm/test/CodeGen/PowerPC/dcbt-sched.ll | 4 +- llvm/test/CodeGen/PowerPC/delete-node.ll | 4 +- llvm/test/CodeGen/PowerPC/dyn-alloca-aligned.ll | 4 +- llvm/test/CodeGen/PowerPC/emptystruct.ll | 2 +- llvm/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll | 8 +- .../test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll | 8 +- llvm/test/CodeGen/PowerPC/fast-isel-call.ll | 2 +- llvm/test/CodeGen/PowerPC/fast-isel-fold.ll | 26 +- llvm/test/CodeGen/PowerPC/fast-isel-load-store.ll | 18 +- .../test/CodeGen/PowerPC/fast-isel-redefinition.ll | 2 +- .../PowerPC/fastisel-gep-promote-before-add.ll | 4 +- llvm/test/CodeGen/PowerPC/floatPSA.ll | 30 +- llvm/test/CodeGen/PowerPC/flt-preinc.ll | 8 +- llvm/test/CodeGen/PowerPC/fp-to-int-ext.ll | 8 +- llvm/test/CodeGen/PowerPC/frounds.ll | 4 +- llvm/test/CodeGen/PowerPC/glob-comp-aa-crash.ll | 8 +- llvm/test/CodeGen/PowerPC/hidden-vis-2.ll | 4 +- llvm/test/CodeGen/PowerPC/hidden-vis.ll | 2 +- llvm/test/CodeGen/PowerPC/ia-mem-r0.ll | 6 +- llvm/test/CodeGen/PowerPC/indexed-load.ll | 2 +- llvm/test/CodeGen/PowerPC/indirectbr.ll | 4 +- llvm/test/CodeGen/PowerPC/inlineasm-i64-reg.ll | 20 +- llvm/test/CodeGen/PowerPC/isel-rc-nox0.ll | 2 +- llvm/test/CodeGen/PowerPC/lbz-from-ld-shift.ll | 2 +- llvm/test/CodeGen/PowerPC/lbzux.ll | 4 +- llvm/test/CodeGen/PowerPC/ld-st-upd.ll | 2 +- llvm/test/CodeGen/PowerPC/ldtoc-inv.ll | 2 +- llvm/test/CodeGen/PowerPC/lha.ll | 2 +- llvm/test/CodeGen/PowerPC/load-constant-addr.ll | 2 +- llvm/test/CodeGen/PowerPC/load-shift-combine.ll | 12 +- llvm/test/CodeGen/PowerPC/loop-data-prefetch.ll | 2 +- llvm/test/CodeGen/PowerPC/lsa.ll | 4 +- llvm/test/CodeGen/PowerPC/lsr-postinc-pos.ll | 4 +- llvm/test/CodeGen/PowerPC/mask64.ll | 4 +- llvm/test/CodeGen/PowerPC/mcm-1.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-10.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-11.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-2.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-3.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-5.ll | 12 +- llvm/test/CodeGen/PowerPC/mcm-6.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-7.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-8.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-9.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-default.ll | 2 +- llvm/test/CodeGen/PowerPC/mcm-obj-2.ll | 4 +- llvm/test/CodeGen/PowerPC/mcm-obj.ll | 22 +- llvm/test/CodeGen/PowerPC/mem-rr-addr-mode.ll | 6 +- llvm/test/CodeGen/PowerPC/mem_update.ll | 12 +- .../CodeGen/PowerPC/misched-inorder-latency.ll | 4 +- .../CodeGen/PowerPC/mult-alt-generic-powerpc.ll | 38 +- .../CodeGen/PowerPC/mult-alt-generic-powerpc64.ll | 38 +- llvm/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll | 4 +- llvm/test/CodeGen/PowerPC/novrsave.ll | 2 +- llvm/test/CodeGen/PowerPC/or-addressing-mode.ll | 4 +- llvm/test/CodeGen/PowerPC/post-ra-ec.ll | 4 +- llvm/test/CodeGen/PowerPC/ppc-prologue.ll | 4 +- llvm/test/CodeGen/PowerPC/ppc32-lshrti3.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc32-pic-large.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc32-pic.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc440-fp-basic.ll | 12 +- llvm/test/CodeGen/PowerPC/ppc64-abi-extend.ll | 8 +- .../CodeGen/PowerPC/ppc64-align-long-double.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc64-byval-align.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc64-calls.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc64-gep-opt.ll | 8 +- llvm/test/CodeGen/PowerPC/ppc64-patchpoint.ll | 6 +- llvm/test/CodeGen/PowerPC/ppc64-smallarg.ll | 4 +- llvm/test/CodeGen/PowerPC/ppc64-toc.ll | 6 +- llvm/test/CodeGen/PowerPC/ppc64le-aggregates.ll | 14 +- llvm/test/CodeGen/PowerPC/ppc64le-localentry.ll | 2 +- llvm/test/CodeGen/PowerPC/ppc64le-smallarg.ll | 4 +- llvm/test/CodeGen/PowerPC/ppcf128-1.ll | 32 +- llvm/test/CodeGen/PowerPC/ppcf128-endian.ll | 6 +- llvm/test/CodeGen/PowerPC/pr13891.ll | 2 +- llvm/test/CodeGen/PowerPC/pr15031.ll | 14 +- llvm/test/CodeGen/PowerPC/pr15630.ll | 2 +- llvm/test/CodeGen/PowerPC/pr16556-2.ll | 6 +- llvm/test/CodeGen/PowerPC/pr17168.ll | 2 +- llvm/test/CodeGen/PowerPC/pr18663.ll | 18 +- llvm/test/CodeGen/PowerPC/pr20442.ll | 10 +- llvm/test/CodeGen/PowerPC/preincprep-invoke.ll | 2 +- llvm/test/CodeGen/PowerPC/private.ll | 2 +- llvm/test/CodeGen/PowerPC/pwr7-gt-nop.ll | 6 +- llvm/test/CodeGen/PowerPC/qpx-load.ll | 4 +- llvm/test/CodeGen/PowerPC/qpx-s-load.ll | 4 +- llvm/test/CodeGen/PowerPC/qpx-s-sel.ll | 2 +- llvm/test/CodeGen/PowerPC/qpx-sel.ll | 2 +- llvm/test/CodeGen/PowerPC/qpx-unalperm.ll | 24 +- llvm/test/CodeGen/PowerPC/quadint-return.ll | 2 +- llvm/test/CodeGen/PowerPC/reg-coalesce-simple.ll | 2 +- llvm/test/CodeGen/PowerPC/reloc-align.ll | 2 +- llvm/test/CodeGen/PowerPC/resolvefi-basereg.ll | 140 ++-- llvm/test/CodeGen/PowerPC/resolvefi-disp.ll | 8 +- llvm/test/CodeGen/PowerPC/return-val-i128.ll | 10 +- llvm/test/CodeGen/PowerPC/rlwimi-and.ll | 4 +- llvm/test/CodeGen/PowerPC/rlwimi-commute.ll | 8 +- llvm/test/CodeGen/PowerPC/rlwimi-dyn-and.ll | 8 +- llvm/test/CodeGen/PowerPC/rm-zext.ll | 4 +- llvm/test/CodeGen/PowerPC/rs-undef-use.ll | 6 +- llvm/test/CodeGen/PowerPC/s000-alias-misched.ll | 8 +- llvm/test/CodeGen/PowerPC/sjlj.ll | 4 +- llvm/test/CodeGen/PowerPC/small-arguments.ll | 6 +- llvm/test/CodeGen/PowerPC/split-index-tc.ll | 6 +- llvm/test/CodeGen/PowerPC/stack-protector.ll | 2 +- llvm/test/CodeGen/PowerPC/stack-realign.ll | 14 +- llvm/test/CodeGen/PowerPC/std-unal-fi.ll | 14 +- llvm/test/CodeGen/PowerPC/store-load-fwd.ll | 2 +- llvm/test/CodeGen/PowerPC/structsinmem.ll | 28 +- llvm/test/CodeGen/PowerPC/structsinregs.ll | 28 +- llvm/test/CodeGen/PowerPC/subreg-postra-2.ll | 4 +- llvm/test/CodeGen/PowerPC/subreg-postra.ll | 6 +- llvm/test/CodeGen/PowerPC/subsumes-pred-regs.ll | 2 +- llvm/test/CodeGen/PowerPC/tls-cse.ll | 4 +- llvm/test/CodeGen/PowerPC/tls-pic.ll | 4 +- llvm/test/CodeGen/PowerPC/tls.ll | 2 +- llvm/test/CodeGen/PowerPC/toc-load-sched-bug.ll | 50 +- llvm/test/CodeGen/PowerPC/trampoline.ll | 44 +- llvm/test/CodeGen/PowerPC/unal-altivec-wint.ll | 4 +- llvm/test/CodeGen/PowerPC/unal-altivec.ll | 4 +- llvm/test/CodeGen/PowerPC/unal-altivec2.ll | 34 +- llvm/test/CodeGen/PowerPC/unaligned.ll | 12 +- llvm/test/CodeGen/PowerPC/vaddsplat.ll | 24 +- llvm/test/CodeGen/PowerPC/varargs-struct-float.ll | 2 +- llvm/test/CodeGen/PowerPC/vcmp-fold.ll | 8 +- llvm/test/CodeGen/PowerPC/vec-abi-align.ll | 8 +- llvm/test/CodeGen/PowerPC/vec_auto_constant.ll | 4 +- llvm/test/CodeGen/PowerPC/vec_br_cmp.ll | 4 +- .../CodeGen/PowerPC/vec_buildvector_loadstore.ll | 2 +- llvm/test/CodeGen/PowerPC/vec_constants.ll | 6 +- llvm/test/CodeGen/PowerPC/vec_conv.ll | 8 +- llvm/test/CodeGen/PowerPC/vec_fneg.ll | 2 +- llvm/test/CodeGen/PowerPC/vec_misaligned.ll | 8 +- llvm/test/CodeGen/PowerPC/vec_mul.ll | 16 +- llvm/test/CodeGen/PowerPC/vec_perf_shuffle.ll | 20 +- llvm/test/CodeGen/PowerPC/vec_shuffle.ll | 56 +- llvm/test/CodeGen/PowerPC/vec_shuffle_le.ll | 54 +- llvm/test/CodeGen/PowerPC/vec_splat.ll | 10 +- llvm/test/CodeGen/PowerPC/vec_splat_constant.ll | 4 +- llvm/test/CodeGen/PowerPC/vec_zero.ll | 2 +- .../CodeGen/PowerPC/vector-identity-shuffle.ll | 2 +- llvm/test/CodeGen/PowerPC/vector.ll | 46 +- llvm/test/CodeGen/PowerPC/vsx-div.ll | 4 +- llvm/test/CodeGen/PowerPC/vsx-infl-copy1.ll | 36 +- llvm/test/CodeGen/PowerPC/vsx-infl-copy2.ll | 18 +- llvm/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll | 72 +- llvm/test/CodeGen/PowerPC/vsx-ldst.ll | 12 +- llvm/test/CodeGen/PowerPC/vsx-minmax.ll | 22 +- llvm/test/CodeGen/PowerPC/vsx-p8.ll | 4 +- llvm/test/CodeGen/PowerPC/vsx.ll | 14 +- llvm/test/CodeGen/PowerPC/vsx_insert_extract_le.ll | 12 +- llvm/test/CodeGen/PowerPC/vsx_shuffle_le.ll | 64 +- .../test/CodeGen/PowerPC/weak_def_can_be_hidden.ll | 4 +- llvm/test/CodeGen/PowerPC/zero-not-run.ll | 2 +- llvm/test/CodeGen/PowerPC/zext-free.ll | 6 +- .../CodeGen/R600/32-bit-local-address-space.ll | 12 +- llvm/test/CodeGen/R600/add-debug.ll | 2 +- llvm/test/CodeGen/R600/add.ll | 16 +- llvm/test/CodeGen/R600/add_i64.ll | 12 +- llvm/test/CodeGen/R600/address-space.ll | 4 +- llvm/test/CodeGen/R600/and.ll | 30 +- llvm/test/CodeGen/R600/array-ptr-calc-i32.ll | 6 +- llvm/test/CodeGen/R600/array-ptr-calc-i64.ll | 4 +- llvm/test/CodeGen/R600/big_alu.ll | 40 +- llvm/test/CodeGen/R600/bitcast.ll | 16 +- llvm/test/CodeGen/R600/bswap.ll | 14 +- llvm/test/CodeGen/R600/call.ll | 8 +- llvm/test/CodeGen/R600/combine_vloads.ll | 2 +- llvm/test/CodeGen/R600/commute_modifiers.ll | 32 +- llvm/test/CodeGen/R600/copy-illegal-type.ll | 18 +- llvm/test/CodeGen/R600/copy-to-reg.ll | 2 +- llvm/test/CodeGen/R600/ctlz_zero_undef.ll | 6 +- llvm/test/CodeGen/R600/ctpop.ll | 32 +- llvm/test/CodeGen/R600/ctpop64.ll | 8 +- llvm/test/CodeGen/R600/cttz_zero_undef.ll | 6 +- llvm/test/CodeGen/R600/cvt_f32_ubyte.ll | 24 +- .../R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll | 4 +- llvm/test/CodeGen/R600/dot4-folding.ll | 4 +- .../ds-negative-offset-addressing-mode-loop.ll | 10 +- llvm/test/CodeGen/R600/ds_read2.ll | 120 ++-- llvm/test/CodeGen/R600/ds_read2_offset_order.ll | 14 +- llvm/test/CodeGen/R600/ds_read2st64.ll | 52 +- llvm/test/CodeGen/R600/ds_write2.ll | 52 +- llvm/test/CodeGen/R600/ds_write2st64.ll | 16 +- llvm/test/CodeGen/R600/extload-private.ll | 8 +- llvm/test/CodeGen/R600/extload.ll | 8 +- llvm/test/CodeGen/R600/fabs.f64.ll | 2 +- llvm/test/CodeGen/R600/fadd.ll | 4 +- llvm/test/CodeGen/R600/fadd64.ll | 4 +- llvm/test/CodeGen/R600/fcmp-cnd.ll | 2 +- llvm/test/CodeGen/R600/fcmp-cnde-int-args.ll | 2 +- llvm/test/CodeGen/R600/fcmp.ll | 4 +- llvm/test/CodeGen/R600/fcmp64.ll | 24 +- llvm/test/CodeGen/R600/fconst64.ll | 2 +- llvm/test/CodeGen/R600/fdiv.f64.ll | 16 +- llvm/test/CodeGen/R600/fdiv.ll | 4 +- llvm/test/CodeGen/R600/fetch-limits.r600.ll | 18 +- llvm/test/CodeGen/R600/fetch-limits.r700+.ll | 34 +- llvm/test/CodeGen/R600/flat-address-space.ll | 18 +- llvm/test/CodeGen/R600/fma-combine.ll | 90 +-- llvm/test/CodeGen/R600/fma.f64.ll | 18 +- llvm/test/CodeGen/R600/fma.ll | 26 +- llvm/test/CodeGen/R600/fmax3.ll | 12 +- llvm/test/CodeGen/R600/fmax_legacy.f64.ll | 16 +- llvm/test/CodeGen/R600/fmax_legacy.ll | 20 +- llvm/test/CodeGen/R600/fmin3.ll | 12 +- llvm/test/CodeGen/R600/fmin_legacy.f64.ll | 16 +- llvm/test/CodeGen/R600/fmin_legacy.ll | 20 +- llvm/test/CodeGen/R600/fmul.ll | 4 +- llvm/test/CodeGen/R600/fmul64.ll | 12 +- llvm/test/CodeGen/R600/fmuladd.ll | 44 +- llvm/test/CodeGen/R600/fneg-fabs.f64.ll | 4 +- llvm/test/CodeGen/R600/fneg-fabs.ll | 2 +- llvm/test/CodeGen/R600/fp16_to_fp.ll | 4 +- llvm/test/CodeGen/R600/fp32_to_fp16.ll | 2 +- llvm/test/CodeGen/R600/fp_to_sint.f64.ll | 2 +- llvm/test/CodeGen/R600/fp_to_sint.ll | 2 +- llvm/test/CodeGen/R600/fp_to_uint.f64.ll | 2 +- llvm/test/CodeGen/R600/fp_to_uint.ll | 2 +- llvm/test/CodeGen/R600/frem.ll | 28 +- llvm/test/CodeGen/R600/fsqrt.ll | 4 +- llvm/test/CodeGen/R600/fsub.ll | 8 +- llvm/test/CodeGen/R600/fsub64.ll | 16 +- llvm/test/CodeGen/R600/ftrunc.f64.ll | 2 +- llvm/test/CodeGen/R600/global-directive.ll | 4 +- llvm/test/CodeGen/R600/global-extload-i1.ll | 64 +- llvm/test/CodeGen/R600/global-extload-i16.ll | 64 +- llvm/test/CodeGen/R600/global-extload-i32.ll | 28 +- llvm/test/CodeGen/R600/global-extload-i8.ll | 64 +- llvm/test/CodeGen/R600/global-zero-initializer.ll | 2 +- llvm/test/CodeGen/R600/gv-const-addrspace-fail.ll | 8 +- llvm/test/CodeGen/R600/gv-const-addrspace.ll | 10 +- llvm/test/CodeGen/R600/half.ll | 12 +- llvm/test/CodeGen/R600/i8-to-double-to-float.ll | 2 +- .../CodeGen/R600/icmp-select-sete-reverse-args.ll | 4 +- llvm/test/CodeGen/R600/imm.ll | 4 +- llvm/test/CodeGen/R600/indirect-private-64.ll | 16 +- llvm/test/CodeGen/R600/insert_vector_elt.ll | 4 +- llvm/test/CodeGen/R600/jump-address.ll | 6 +- llvm/test/CodeGen/R600/kcache-fold.ll | 48 +- llvm/test/CodeGen/R600/large-alloca.ll | 2 +- .../CodeGen/R600/large-constant-initializer.ll | 2 +- llvm/test/CodeGen/R600/lds-initializer.ll | 2 +- llvm/test/CodeGen/R600/lds-oqap-crash.ll | 2 +- llvm/test/CodeGen/R600/lds-output-queue.ll | 12 +- llvm/test/CodeGen/R600/lds-zero-initializer.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.abs.ll | 4 +- .../CodeGen/R600/llvm.AMDGPU.barrier.global.ll | 2 +- .../test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll | 24 +- llvm/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll | 44 +- llvm/test/CodeGen/R600/llvm.AMDGPU.brev.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.class.ll | 24 +- llvm/test/CodeGen/R600/llvm.AMDGPU.cube.ll | 8 +- .../test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll | 8 +- llvm/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll | 14 +- llvm/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll | 44 +- llvm/test/CodeGen/R600/llvm.AMDGPU.fract.ll | 4 +- llvm/test/CodeGen/R600/llvm.AMDGPU.imax.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.imin.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.tex.ll | 2 +- llvm/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll | 6 +- llvm/test/CodeGen/R600/llvm.AMDGPU.umad24.ll | 4 +- llvm/test/CodeGen/R600/llvm.AMDGPU.umax.ll | 4 +- llvm/test/CodeGen/R600/llvm.AMDGPU.umin.ll | 4 +- llvm/test/CodeGen/R600/llvm.SI.imageload.ll | 10 +- llvm/test/CodeGen/R600/llvm.SI.load.dword.ll | 2 +- llvm/test/CodeGen/R600/llvm.amdgpu.dp4.ll | 4 +- llvm/test/CodeGen/R600/llvm.round.f64.ll | 2 +- llvm/test/CodeGen/R600/load-i1.ll | 14 +- llvm/test/CodeGen/R600/load-input-fold.ll | 38 +- llvm/test/CodeGen/R600/load.ll | 92 +-- llvm/test/CodeGen/R600/load.vec.ll | 4 +- llvm/test/CodeGen/R600/load64.ll | 6 +- llvm/test/CodeGen/R600/local-64.ll | 16 +- llvm/test/CodeGen/R600/local-memory-two-objects.ll | 4 +- llvm/test/CodeGen/R600/local-memory.ll | 2 +- llvm/test/CodeGen/R600/loop-idiom.ll | 2 +- llvm/test/CodeGen/R600/m0-spill.ll | 2 +- llvm/test/CodeGen/R600/mad-combine.ll | 110 +-- llvm/test/CodeGen/R600/mad-sub.ll | 50 +- llvm/test/CodeGen/R600/madak.ll | 28 +- llvm/test/CodeGen/R600/madmk.ll | 28 +- llvm/test/CodeGen/R600/max.ll | 16 +- llvm/test/CodeGen/R600/max3.ll | 12 +- llvm/test/CodeGen/R600/min.ll | 20 +- llvm/test/CodeGen/R600/min3.ll | 28 +- llvm/test/CodeGen/R600/missing-store.ll | 4 +- llvm/test/CodeGen/R600/mubuf.ll | 12 +- llvm/test/CodeGen/R600/mul.ll | 28 +- .../R600/no-initializer-constant-addrspace.ll | 4 +- llvm/test/CodeGen/R600/no-shrink-extloads.ll | 16 +- llvm/test/CodeGen/R600/or.ll | 28 +- llvm/test/CodeGen/R600/parallelandifcollapse.ll | 16 +- llvm/test/CodeGen/R600/parallelorifcollapse.ll | 16 +- llvm/test/CodeGen/R600/private-memory.ll | 46 +- llvm/test/CodeGen/R600/pv-packing.ll | 4 +- llvm/test/CodeGen/R600/pv.ll | 68 +- llvm/test/CodeGen/R600/r600-export-fix.ll | 50 +- llvm/test/CodeGen/R600/r600cfg.ll | 2 +- llvm/test/CodeGen/R600/register-count-comments.ll | 4 +- llvm/test/CodeGen/R600/reorder-stores.ll | 16 +- llvm/test/CodeGen/R600/rotl.i64.ll | 4 +- llvm/test/CodeGen/R600/rotr.i64.ll | 8 +- llvm/test/CodeGen/R600/rsq.ll | 10 +- llvm/test/CodeGen/R600/s_movk_i32.ll | 26 +- llvm/test/CodeGen/R600/saddo.ll | 8 +- llvm/test/CodeGen/R600/salu-to-valu.ll | 16 +- llvm/test/CodeGen/R600/scalar_to_vector.ll | 6 +- llvm/test/CodeGen/R600/schedule-fs-loop-nested.ll | 8 +- llvm/test/CodeGen/R600/schedule-fs-loop.ll | 8 +- llvm/test/CodeGen/R600/schedule-global-loads.ll | 8 +- llvm/test/CodeGen/R600/schedule-if-2.ll | 8 +- llvm/test/CodeGen/R600/schedule-if.ll | 6 +- .../R600/schedule-vs-if-nested-loop-failure.ll | 32 +- .../CodeGen/R600/schedule-vs-if-nested-loop.ll | 32 +- llvm/test/CodeGen/R600/scratch-buffer.ll | 12 +- llvm/test/CodeGen/R600/sdiv.ll | 20 +- llvm/test/CodeGen/R600/sdivrem24.ll | 48 +- llvm/test/CodeGen/R600/select64.ll | 8 +- llvm/test/CodeGen/R600/selectcc-cnd.ll | 2 +- llvm/test/CodeGen/R600/selectcc-cnde-int.ll | 2 +- .../CodeGen/R600/selectcc-icmp-select-float.ll | 2 +- llvm/test/CodeGen/R600/setcc-opt.ll | 2 +- llvm/test/CodeGen/R600/setcc.ll | 12 +- llvm/test/CodeGen/R600/sext-in-reg.ll | 44 +- llvm/test/CodeGen/R600/sgpr-control-flow.ll | 4 +- .../CodeGen/R600/sgpr-copy-duplicate-operand.ll | 2 +- llvm/test/CodeGen/R600/sgpr-copy.ll | 24 +- llvm/test/CodeGen/R600/shl.ll | 20 +- llvm/test/CodeGen/R600/shl_add_constant.ll | 6 +- llvm/test/CodeGen/R600/shl_add_ptr.ll | 12 +- llvm/test/CodeGen/R600/si-lod-bias.ll | 6 +- llvm/test/CodeGen/R600/si-sgpr-spill.ll | 96 +-- .../CodeGen/R600/si-triv-disjoint-mem-access.ll | 56 +- llvm/test/CodeGen/R600/si-vector-hang.ll | 32 +- llvm/test/CodeGen/R600/sign_extend.ll | 2 +- .../R600/simplify-demanded-bits-build-pair.ll | 2 +- llvm/test/CodeGen/R600/sint_to_fp.f64.ll | 2 +- llvm/test/CodeGen/R600/sint_to_fp.ll | 2 +- llvm/test/CodeGen/R600/smrd.ll | 14 +- llvm/test/CodeGen/R600/split-scalar-i64-add.ll | 2 +- llvm/test/CodeGen/R600/sra.ll | 20 +- llvm/test/CodeGen/R600/srem.ll | 38 +- llvm/test/CodeGen/R600/srl.ll | 24 +- llvm/test/CodeGen/R600/ssubo.ll | 8 +- llvm/test/CodeGen/R600/store-barrier.ll | 10 +- llvm/test/CodeGen/R600/store.ll | 4 +- llvm/test/CodeGen/R600/store.r600.ll | 4 +- llvm/test/CodeGen/R600/sub.ll | 24 +- llvm/test/CodeGen/R600/swizzle-export.ll | 42 +- llvm/test/CodeGen/R600/trunc-cmp-constant.ll | 26 +- llvm/test/CodeGen/R600/trunc.ll | 4 +- llvm/test/CodeGen/R600/uaddo.ll | 8 +- llvm/test/CodeGen/R600/udiv.ll | 12 +- llvm/test/CodeGen/R600/udivrem24.ll | 48 +- llvm/test/CodeGen/R600/uint_to_fp.f64.ll | 2 +- llvm/test/CodeGen/R600/uint_to_fp.ll | 2 +- llvm/test/CodeGen/R600/unaligned-load-store.ll | 24 +- .../R600/unhandled-loop-condition-assertion.ll | 30 +- llvm/test/CodeGen/R600/unroll.ll | 2 +- llvm/test/CodeGen/R600/urem.ll | 26 +- llvm/test/CodeGen/R600/usubo.ll | 8 +- llvm/test/CodeGen/R600/v_cndmask.ll | 2 +- llvm/test/CodeGen/R600/valu-i1.ll | 8 +- llvm/test/CodeGen/R600/vector-alloca.ll | 6 +- llvm/test/CodeGen/R600/vertex-fetch-encoding.ll | 4 +- llvm/test/CodeGen/R600/vselect.ll | 16 +- llvm/test/CodeGen/R600/vtx-fetch-branch.ll | 2 +- llvm/test/CodeGen/R600/vtx-schedule.ll | 4 +- llvm/test/CodeGen/R600/wait.ll | 6 +- llvm/test/CodeGen/R600/xor.ll | 34 +- .../SPARC/2008-10-10-InlineAsmMemoryOperand.ll | 2 +- llvm/test/CodeGen/SPARC/2009-08-28-PIC.ll | 4 +- llvm/test/CodeGen/SPARC/2011-01-11-CC.ll | 6 +- llvm/test/CodeGen/SPARC/2011-01-22-SRet.ll | 6 +- llvm/test/CodeGen/SPARC/64abi.ll | 16 +- llvm/test/CodeGen/SPARC/64bit.ll | 16 +- llvm/test/CodeGen/SPARC/atomics.ll | 8 +- llvm/test/CodeGen/SPARC/fp128.ll | 34 +- llvm/test/CodeGen/SPARC/globals.ll | 2 +- llvm/test/CodeGen/SPARC/leafproc.ll | 2 +- llvm/test/CodeGen/SPARC/mult-alt-generic-sparc.ll | 38 +- llvm/test/CodeGen/SPARC/obj-relocs.ll | 2 +- llvm/test/CodeGen/SPARC/private.ll | 2 +- llvm/test/CodeGen/SPARC/setjmp.ll | 4 +- llvm/test/CodeGen/SPARC/spillsize.ll | 4 +- llvm/test/CodeGen/SPARC/tls.ll | 4 +- llvm/test/CodeGen/SPARC/varargs.ll | 2 +- llvm/test/CodeGen/SystemZ/addr-01.ll | 16 +- llvm/test/CodeGen/SystemZ/addr-02.ll | 16 +- llvm/test/CodeGen/SystemZ/addr-03.ll | 10 +- llvm/test/CodeGen/SystemZ/alias-01.ll | 2 +- llvm/test/CodeGen/SystemZ/and-01.ll | 40 +- llvm/test/CodeGen/SystemZ/and-03.ll | 34 +- llvm/test/CodeGen/SystemZ/and-05.ll | 26 +- llvm/test/CodeGen/SystemZ/and-06.ll | 16 +- llvm/test/CodeGen/SystemZ/and-08.ll | 104 +-- llvm/test/CodeGen/SystemZ/asm-18.ll | 48 +- llvm/test/CodeGen/SystemZ/atomic-load-01.ll | 2 +- llvm/test/CodeGen/SystemZ/atomic-load-02.ll | 2 +- llvm/test/CodeGen/SystemZ/atomic-load-03.ll | 2 +- llvm/test/CodeGen/SystemZ/atomic-load-04.ll | 2 +- llvm/test/CodeGen/SystemZ/branch-02.ll | 12 +- llvm/test/CodeGen/SystemZ/branch-03.ll | 8 +- llvm/test/CodeGen/SystemZ/branch-04.ll | 28 +- llvm/test/CodeGen/SystemZ/branch-06.ll | 14 +- llvm/test/CodeGen/SystemZ/branch-08.ll | 2 +- llvm/test/CodeGen/SystemZ/bswap-02.ll | 48 +- llvm/test/CodeGen/SystemZ/bswap-03.ll | 48 +- llvm/test/CodeGen/SystemZ/cond-load-01.ll | 18 +- llvm/test/CodeGen/SystemZ/cond-load-02.ll | 18 +- llvm/test/CodeGen/SystemZ/cond-store-01.ll | 44 +- llvm/test/CodeGen/SystemZ/cond-store-02.ll | 44 +- llvm/test/CodeGen/SystemZ/cond-store-03.ll | 36 +- llvm/test/CodeGen/SystemZ/cond-store-04.ll | 24 +- llvm/test/CodeGen/SystemZ/cond-store-05.ll | 24 +- llvm/test/CodeGen/SystemZ/cond-store-06.ll | 24 +- llvm/test/CodeGen/SystemZ/cond-store-07.ll | 22 +- llvm/test/CodeGen/SystemZ/cond-store-08.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-abs-01.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-abs-02.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-add-01.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-add-02.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-add-03.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-cmp-01.ll | 56 +- llvm/test/CodeGen/SystemZ/fp-cmp-02.ll | 34 +- llvm/test/CodeGen/SystemZ/fp-cmp-03.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-cmp-04.ll | 6 +- llvm/test/CodeGen/SystemZ/fp-conv-01.ll | 8 +- llvm/test/CodeGen/SystemZ/fp-conv-02.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-conv-03.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-conv-04.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-conv-09.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-conv-10.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-conv-11.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-conv-12.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-conv-14.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-copysign-01.ll | 12 +- llvm/test/CodeGen/SystemZ/fp-div-01.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-div-02.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-div-03.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-move-01.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-move-02.ll | 48 +- llvm/test/CodeGen/SystemZ/fp-move-03.ll | 20 +- llvm/test/CodeGen/SystemZ/fp-move-04.ll | 20 +- llvm/test/CodeGen/SystemZ/fp-move-05.ll | 22 +- llvm/test/CodeGen/SystemZ/fp-move-09.ll | 6 +- llvm/test/CodeGen/SystemZ/fp-mul-01.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-mul-02.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-mul-03.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-mul-04.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-mul-05.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-mul-06.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-mul-07.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-mul-08.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-mul-09.ll | 14 +- llvm/test/CodeGen/SystemZ/fp-neg-01.ll | 4 +- llvm/test/CodeGen/SystemZ/fp-round-01.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-round-02.ll | 12 +- llvm/test/CodeGen/SystemZ/fp-sqrt-01.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-sqrt-02.ll | 44 +- llvm/test/CodeGen/SystemZ/fp-sqrt-03.ll | 2 +- llvm/test/CodeGen/SystemZ/fp-sub-01.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-sub-02.ll | 32 +- llvm/test/CodeGen/SystemZ/fp-sub-03.ll | 2 +- llvm/test/CodeGen/SystemZ/frame-02.ll | 96 +-- llvm/test/CodeGen/SystemZ/frame-03.ll | 96 +-- llvm/test/CodeGen/SystemZ/frame-04.ll | 48 +- llvm/test/CodeGen/SystemZ/frame-05.ll | 76 +- llvm/test/CodeGen/SystemZ/frame-06.ll | 76 +- llvm/test/CodeGen/SystemZ/frame-07.ll | 64 +- llvm/test/CodeGen/SystemZ/frame-08.ll | 92 +-- llvm/test/CodeGen/SystemZ/frame-09.ll | 26 +- llvm/test/CodeGen/SystemZ/frame-13.ll | 38 +- llvm/test/CodeGen/SystemZ/frame-14.ll | 38 +- llvm/test/CodeGen/SystemZ/frame-15.ll | 58 +- llvm/test/CodeGen/SystemZ/frame-16.ll | 34 +- llvm/test/CodeGen/SystemZ/frame-17.ll | 86 +-- llvm/test/CodeGen/SystemZ/frame-18.ll | 60 +- llvm/test/CodeGen/SystemZ/insert-01.ll | 34 +- llvm/test/CodeGen/SystemZ/insert-02.ll | 34 +- llvm/test/CodeGen/SystemZ/insert-06.ll | 6 +- llvm/test/CodeGen/SystemZ/int-add-01.ll | 20 +- llvm/test/CodeGen/SystemZ/int-add-02.ll | 40 +- llvm/test/CodeGen/SystemZ/int-add-03.ll | 34 +- llvm/test/CodeGen/SystemZ/int-add-04.ll | 34 +- llvm/test/CodeGen/SystemZ/int-add-05.ll | 34 +- llvm/test/CodeGen/SystemZ/int-add-08.ll | 38 +- llvm/test/CodeGen/SystemZ/int-add-09.ll | 8 +- llvm/test/CodeGen/SystemZ/int-add-10.ll | 34 +- llvm/test/CodeGen/SystemZ/int-add-11.ll | 84 +-- llvm/test/CodeGen/SystemZ/int-add-12.ll | 84 +-- llvm/test/CodeGen/SystemZ/int-cmp-01.ll | 22 +- llvm/test/CodeGen/SystemZ/int-cmp-02.ll | 22 +- llvm/test/CodeGen/SystemZ/int-cmp-03.ll | 22 +- llvm/test/CodeGen/SystemZ/int-cmp-04.ll | 16 +- llvm/test/CodeGen/SystemZ/int-cmp-05.ll | 42 +- llvm/test/CodeGen/SystemZ/int-cmp-06.ll | 42 +- llvm/test/CodeGen/SystemZ/int-cmp-07.ll | 16 +- llvm/test/CodeGen/SystemZ/int-cmp-08.ll | 16 +- llvm/test/CodeGen/SystemZ/int-cmp-15.ll | 38 +- llvm/test/CodeGen/SystemZ/int-cmp-16.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-17.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-18.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-19.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-20.ll | 32 +- llvm/test/CodeGen/SystemZ/int-cmp-21.ll | 32 +- llvm/test/CodeGen/SystemZ/int-cmp-22.ll | 18 +- llvm/test/CodeGen/SystemZ/int-cmp-23.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-24.ll | 8 +- llvm/test/CodeGen/SystemZ/int-cmp-25.ll | 8 +- llvm/test/CodeGen/SystemZ/int-cmp-26.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-27.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-28.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-29.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-30.ll | 32 +- llvm/test/CodeGen/SystemZ/int-cmp-31.ll | 32 +- llvm/test/CodeGen/SystemZ/int-cmp-32.ll | 36 +- llvm/test/CodeGen/SystemZ/int-cmp-33.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-34.ll | 36 +- llvm/test/CodeGen/SystemZ/int-cmp-35.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-36.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-37.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-38.ll | 14 +- llvm/test/CodeGen/SystemZ/int-cmp-39.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-40.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-41.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-42.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-43.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-44.ll | 20 +- llvm/test/CodeGen/SystemZ/int-cmp-45.ll | 12 +- llvm/test/CodeGen/SystemZ/int-cmp-48.ll | 32 +- llvm/test/CodeGen/SystemZ/int-conv-01.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-02.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-03.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-04.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-05.ll | 52 +- llvm/test/CodeGen/SystemZ/int-conv-06.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-07.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-08.ll | 46 +- llvm/test/CodeGen/SystemZ/int-conv-09.ll | 14 +- llvm/test/CodeGen/SystemZ/int-conv-10.ll | 14 +- llvm/test/CodeGen/SystemZ/int-conv-11.ll | 128 ++-- llvm/test/CodeGen/SystemZ/int-div-01.ll | 42 +- llvm/test/CodeGen/SystemZ/int-div-02.ll | 38 +- llvm/test/CodeGen/SystemZ/int-div-03.ll | 20 +- llvm/test/CodeGen/SystemZ/int-div-04.ll | 40 +- llvm/test/CodeGen/SystemZ/int-div-05.ll | 40 +- llvm/test/CodeGen/SystemZ/int-move-02.ll | 20 +- llvm/test/CodeGen/SystemZ/int-move-03.ll | 14 +- llvm/test/CodeGen/SystemZ/int-move-08.ll | 16 +- llvm/test/CodeGen/SystemZ/int-move-09.ll | 20 +- llvm/test/CodeGen/SystemZ/int-mul-01.ll | 20 +- llvm/test/CodeGen/SystemZ/int-mul-02.ll | 40 +- llvm/test/CodeGen/SystemZ/int-mul-03.ll | 34 +- llvm/test/CodeGen/SystemZ/int-mul-04.ll | 34 +- llvm/test/CodeGen/SystemZ/int-mul-08.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-01.ll | 40 +- llvm/test/CodeGen/SystemZ/int-sub-02.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-03.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-04.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-05.ll | 38 +- llvm/test/CodeGen/SystemZ/int-sub-06.ll | 34 +- llvm/test/CodeGen/SystemZ/int-sub-07.ll | 20 +- llvm/test/CodeGen/SystemZ/loop-01.ll | 6 +- llvm/test/CodeGen/SystemZ/memchr-02.ll | 4 +- llvm/test/CodeGen/SystemZ/memcpy-02.ll | 64 +- llvm/test/CodeGen/SystemZ/or-01.ll | 40 +- llvm/test/CodeGen/SystemZ/or-03.ll | 34 +- llvm/test/CodeGen/SystemZ/or-05.ll | 26 +- llvm/test/CodeGen/SystemZ/or-06.ll | 16 +- llvm/test/CodeGen/SystemZ/or-08.ll | 16 +- llvm/test/CodeGen/SystemZ/serialize-01.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-01.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-02.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-03.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-04.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-05.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-06.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-07.ll | 2 +- llvm/test/CodeGen/SystemZ/shift-08.ll | 2 +- llvm/test/CodeGen/SystemZ/spill-01.ll | 212 +++--- llvm/test/CodeGen/SystemZ/strcpy-01.ll | 2 +- llvm/test/CodeGen/SystemZ/tls-05.ll | 2 +- llvm/test/CodeGen/SystemZ/tls-06.ll | 4 +- llvm/test/CodeGen/SystemZ/tls-07.ll | 4 +- llvm/test/CodeGen/SystemZ/unaligned-01.ll | 10 +- llvm/test/CodeGen/SystemZ/xor-01.ll | 40 +- llvm/test/CodeGen/SystemZ/xor-03.ll | 34 +- llvm/test/CodeGen/SystemZ/xor-05.ll | 26 +- llvm/test/CodeGen/SystemZ/xor-06.ll | 16 +- llvm/test/CodeGen/SystemZ/xor-08.ll | 16 +- .../test/CodeGen/Thumb/2007-01-31-RegInfoAssert.ll | 2 +- .../CodeGen/Thumb/2007-05-05-InvalidPushPop.ll | 10 +- llvm/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll | 2 +- .../CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll | 310 ++++----- .../test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll | 6 +- llvm/test/CodeGen/Thumb/2009-08-20-ISelBug.ll | 8 +- .../Thumb/2009-12-17-pre-regalloc-taildup.ll | 12 +- llvm/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll | 10 +- llvm/test/CodeGen/Thumb/2011-EpilogueBug.ll | 2 +- .../Thumb/2014-06-10-thumb1-ldst-opt-bug.ll | 4 +- llvm/test/CodeGen/Thumb/asmprinter-bug.ll | 30 +- .../CodeGen/Thumb/cortex-m0-unaligned-access.ll | 2 +- llvm/test/CodeGen/Thumb/dyn-stackalloc.ll | 4 +- llvm/test/CodeGen/Thumb/large-stack.ll | 2 +- llvm/test/CodeGen/Thumb/ldm-merge-call.ll | 4 +- llvm/test/CodeGen/Thumb/ldm-merge-struct.ll | 4 +- .../CodeGen/Thumb/ldm-stm-base-materialization.ll | 4 +- llvm/test/CodeGen/Thumb/ldr_ext.ll | 10 +- llvm/test/CodeGen/Thumb/ldr_frame.ll | 8 +- llvm/test/CodeGen/Thumb/long.ll | 2 +- llvm/test/CodeGen/Thumb/segmented-stacks.ll | 2 +- llvm/test/CodeGen/Thumb/stack-access.ll | 12 +- llvm/test/CodeGen/Thumb/stm-merge.ll | 2 +- llvm/test/CodeGen/Thumb/thumb-ldm.ll | 16 +- llvm/test/CodeGen/Thumb/vargs.ll | 4 +- .../CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll | 2 +- llvm/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll | 10 +- llvm/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll | 34 +- .../test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll | 2 +- .../test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll | 4 +- .../test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll | 12 +- .../CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll | 70 +- .../Thumb2/2009-08-04-SubregLoweringBug3.ll | 2 +- llvm/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll | 6 +- llvm/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll | 6 +- .../test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll | 22 +- llvm/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll | 14 +- .../CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll | 10 +- llvm/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll | 40 +- .../Thumb2/2010-01-06-TailDuplicateLabels.ll | 12 +- .../test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll | 8 +- .../test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll | 2 +- .../test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll | 2 +- .../test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll | 4 +- .../CodeGen/Thumb2/2010-08-10-VarSizedAllocaBug.ll | 2 +- .../Thumb2/2011-06-07-TwoAddrEarlyClobber.ll | 4 +- .../Thumb2/2011-12-16-T2SizeReduceAssert.ll | 8 +- llvm/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll | 12 +- .../Thumb2/2013-02-19-tail-call-register-hint.ll | 6 +- llvm/test/CodeGen/Thumb2/aligned-constants.ll | 4 +- llvm/test/CodeGen/Thumb2/aligned-spill.ll | 2 +- llvm/test/CodeGen/Thumb2/bfi.ll | 2 +- .../Thumb2/constant-islands-new-island-padding.ll | 8 +- llvm/test/CodeGen/Thumb2/constant-islands.ll | 254 +++---- llvm/test/CodeGen/Thumb2/crash.ll | 16 +- llvm/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll | 10 +- llvm/test/CodeGen/Thumb2/float-ops.ll | 4 +- llvm/test/CodeGen/Thumb2/frameless2.ll | 2 +- llvm/test/CodeGen/Thumb2/ifcvt-neon.ll | 4 +- llvm/test/CodeGen/Thumb2/inflate-regs.ll | 4 +- llvm/test/CodeGen/Thumb2/large-call.ll | 2 +- llvm/test/CodeGen/Thumb2/large-stack.ll | 2 +- llvm/test/CodeGen/Thumb2/lsr-deficiency.ll | 6 +- llvm/test/CodeGen/Thumb2/machine-licm.ll | 4 +- llvm/test/CodeGen/Thumb2/tail-call-r9.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-call-tc.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-call.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ifcvt2.ll | 6 +- llvm/test/CodeGen/Thumb2/thumb2-ifcvt3.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ldm.ll | 16 +- llvm/test/CodeGen/Thumb2/thumb2-ldr.ll | 14 +- llvm/test/CodeGen/Thumb2/thumb2-ldr_ext.ll | 8 +- llvm/test/CodeGen/Thumb2/thumb2-ldr_post.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-ldr_pre.ll | 6 +- llvm/test/CodeGen/Thumb2/thumb2-ldrb.ll | 14 +- llvm/test/CodeGen/Thumb2/thumb2-ldrd.ll | 4 +- llvm/test/CodeGen/Thumb2/thumb2-ldrh.ll | 14 +- llvm/test/CodeGen/Thumb2/thumb2-smul.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-spill-q.ll | 2 +- llvm/test/CodeGen/Thumb2/thumb2-str_post.ll | 4 +- llvm/test/CodeGen/Thumb2/thumb2-str_pre.ll | 4 +- llvm/test/CodeGen/Thumb2/thumb2-tbh.ll | 2 +- llvm/test/CodeGen/Thumb2/tls1.ll | 2 +- llvm/test/CodeGen/Thumb2/tls2.ll | 2 +- llvm/test/CodeGen/Thumb2/tpsoft.ll | 2 +- llvm/test/CodeGen/Thumb2/v8_IT_2.ll | 6 +- llvm/test/CodeGen/Thumb2/v8_IT_3.ll | 8 +- llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll | 4 +- llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll | 2 +- llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll | 6 +- .../CodeGen/X86/2006-05-01-SchedCausingSpills.ll | 8 +- llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll | 8 +- llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll | 4 +- .../CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll | 4 +- llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll | 8 +- llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll | 8 +- llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll | 2 +- llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll | 4 +- llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll | 4 +- llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll | 2 +- llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll | 10 +- llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll | 4 +- .../X86/2006-10-10-FindModifiedNodeSlotBug.ll | 6 +- llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll | 4 +- llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll | 4 +- llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll | 20 +- llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll | 6 +- llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll | 2 +- llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll | 60 +- llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll | 2 +- llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll | 8 +- .../CodeGen/X86/2007-02-19-LiveIntervalAssert.ll | 4 +- llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll | 2 +- llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll | 20 +- llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll | 8 +- llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll | 2 +- .../CodeGen/X86/2007-04-17-LiveIntervalAssert.ll | 2 +- llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll | 4 +- .../CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll | 2 +- llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll | 4 +- llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll | 4 +- .../CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll | 68 +- llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll | 10 +- .../test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll | 2 +- .../CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll | 8 +- llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll | 8 +- llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll | 8 +- llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll | 4 +- llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll | 8 +- llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll | 2 +- .../CodeGen/X86/2007-10-31-extractelement-i64.ll | 24 +- .../CodeGen/X86/2007-11-04-LiveIntervalCrash.ll | 2 +- llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll | 4 +- llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll | 4 +- llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll | 4 +- llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll | 4 +- llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll | 2 +- .../CodeGen/X86/2008-01-16-FPStackifierAssert.ll | 10 +- .../X86/2008-01-16-InvalidDAGCombineXform.ll | 22 +- llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll | 2 +- llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll | 4 +- llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll | 24 +- .../CodeGen/X86/2008-02-20-InlineAsmClobber.ll | 2 +- .../CodeGen/X86/2008-02-22-LocalRegAllocBug.ll | 28 +- .../CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll | 8 +- .../test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll | 6 +- llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll | 20 +- .../test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll | 2 +- .../CodeGen/X86/2008-03-12-ThreadLocalAlias.ll | 8 +- llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll | 4 +- .../CodeGen/X86/2008-03-23-DarwinAsmComments.ll | 10 +- .../CodeGen/X86/2008-03-31-SpillerFoldingBug.ll | 2 +- llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll | 2 +- .../test/CodeGen/X86/2008-04-15-LiveVariableBug.ll | 4 +- llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll | 2 +- llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll | 4 +- .../CodeGen/X86/2008-04-24-pblendw-fold-crash.ll | 2 +- llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll | 2 +- .../CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll | 2 +- llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll | 20 +- llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll | 2 +- .../CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll | 2 +- .../CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll | 4 +- .../CodeGen/X86/2008-06-13-VolatileLoadStore.ll | 4 +- llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll | 2 +- .../CodeGen/X86/2008-07-07-DanglingDeadInsts.ll | 2 +- llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll | 128 ++-- llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll | 2 +- llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll | 10 +- llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll | 2 +- llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll | 2 +- llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll | 4 +- llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll | 4 +- llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll | 2 +- llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll | 6 +- llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll | 2 +- llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll | 8 +- llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll | 2 +- llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll | 2 +- llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll | 6 +- llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll | 2 +- llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll | 2 +- llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll | 2 +- llvm/test/CodeGen/X86/2008-11-06-testb.ll | 2 +- .../X86/2008-12-01-loop-iv-used-outside-loop.ll | 2 +- .../CodeGen/X86/2008-12-02-IllegalResultType.ll | 2 +- llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll | 4 +- .../CodeGen/X86/2009-01-18-ConstantExprCrash.ll | 2 +- llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll | 2 +- llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll | 2 +- llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll | 2 +- .../CodeGen/X86/2009-02-11-codegenprepare-reuse.ll | 6 +- llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll | 26 +- llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll | 6 +- llvm/test/CodeGen/X86/2009-03-03-BTHang.ll | 4 +- .../test/CodeGen/X86/2009-03-05-burr-list-crash.ll | 2 +- llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll | 2 +- llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll | 2 +- llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll | 6 +- llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll | 48 +- llvm/test/CodeGen/X86/2009-03-25-TestBug.ll | 2 +- llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll | 6 +- llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll | 16 +- llvm/test/CodeGen/X86/2009-04-24.ll | 2 +- llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll | 2 +- .../test/CodeGen/X86/2009-04-27-CoalescerAssert.ll | 170 ++--- .../CodeGen/X86/2009-04-29-IndirectDestOperands.ll | 8 +- llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll | 26 +- llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll | 2 +- llvm/test/CodeGen/X86/2009-04-scale.ll | 4 +- .../test/CodeGen/X86/2009-05-11-tailmerge-crash.ll | 2 +- .../test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll | 2 +- llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll | 4 +- llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll | 40 +- llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll | 2 +- llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll | 8 +- llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll | 2 +- llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll | 2 +- .../CodeGen/X86/2009-08-06-branchfolder-crash.ll | 8 +- .../X86/2009-08-14-Win64MemoryIndirectArg.ll | 6 +- .../X86/2009-08-19-LoadNarrowingMiscompile.ll | 2 +- .../test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll | 6 +- llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll | 2 +- llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll | 28 +- llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll | 2 +- .../CodeGen/X86/2009-09-21-NoSpillLoopCount.ll | 4 +- llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll | 2 +- llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll | 10 +- .../CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll | 4 +- llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll | 8 +- llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll | 8 +- llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll | 2 +- .../test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll | 8 +- llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll | 10 +- llvm/test/CodeGen/X86/20090313-signext.ll | 2 +- llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll | 10 +- .../CodeGen/X86/2010-01-15-SelectionDAGCycle.ll | 4 +- llvm/test/CodeGen/X86/2010-01-18-DbgValue.ll | 6 +- llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll | 2 +- llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll | 8 +- llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll | 4 +- .../CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll | 2 +- .../CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll | 18 +- .../CodeGen/X86/2010-02-23-RematImplicitSubreg.ll | 4 +- llvm/test/CodeGen/X86/2010-03-17-ISelBug.ll | 2 +- .../CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll | 2 +- llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll | 2 +- .../CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll | 2 +- .../X86/2010-04-30-LocalAlloc-LandingPad.ll | 12 +- .../X86/2010-05-05-LocalAllocEarlyClobber.ll | 4 +- llvm/test/CodeGen/X86/2010-05-07-ldconvert.ll | 4 +- llvm/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll | 2 +- .../test/CodeGen/X86/2010-05-16-nosseconversion.ll | 2 +- llvm/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll | 2 +- .../test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll | 2 +- .../CodeGen/X86/2010-06-14-fast-isel-fs-load.ll | 2 +- .../X86/2010-06-15-FastAllocEarlyCLobber.ll | 6 +- .../X86/2010-06-25-CoalescerSubRegDefDead.ll | 2 +- llvm/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll | 4 +- .../CodeGen/X86/2010-06-28-matched-g-constraint.ll | 2 +- llvm/test/CodeGen/X86/2010-07-02-UnfoldBug.ll | 2 +- llvm/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll | 2 +- .../CodeGen/X86/2010-08-04-MaskedSignedCompare.ll | 4 +- llvm/test/CodeGen/X86/2010-08-04-StackVariable.ll | 8 +- .../X86/2010-09-01-RemoveCopyByCommutingDef.ll | 2 +- .../CodeGen/X86/2010-09-17-SideEffectsInChain.ll | 4 +- llvm/test/CodeGen/X86/2010-11-09-MOVLPS.ll | 14 +- .../test/CodeGen/X86/2010-11-18-SelectOfExtload.ll | 4 +- llvm/test/CodeGen/X86/2011-02-12-shuffle.ll | 2 +- llvm/test/CodeGen/X86/2011-03-02-DAGCombiner.ll | 14 +- .../CodeGen/X86/2011-03-09-Physreg-Coalescing.ll | 2 +- llvm/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll | 6 +- llvm/test/CodeGen/X86/2011-05-09-loaduse.ll | 2 +- .../CodeGen/X86/2011-05-26-UnreachableBlockElim.ll | 2 +- .../CodeGen/X86/2011-05-27-CrossClassCoalescing.ll | 4 +- llvm/test/CodeGen/X86/2011-06-01-fildll.ll | 2 +- llvm/test/CodeGen/X86/2011-06-03-x87chain.ll | 6 +- llvm/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll | 6 +- .../X86/2011-07-13-BadFrameIndexDisplacement.ll | 2 +- llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll | 2 +- llvm/test/CodeGen/X86/2011-09-21-setcc-bug.ll | 16 +- llvm/test/CodeGen/X86/2011-10-11-srl.ll | 2 +- llvm/test/CodeGen/X86/2011-10-12-MachineCSE.ll | 32 +- .../X86/2011-10-18-FastISel-VectorParams.ll | 10 +- llvm/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll | 4 +- llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll | 4 +- llvm/test/CodeGen/X86/2011-10-27-tstore.ll | 2 +- llvm/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll | 18 +- llvm/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll | 8 +- .../2011-12-26-extractelement-duplicate-load.ll | 2 +- .../CodeGen/X86/2012-01-10-UndefExceptionEdge.ll | 2 +- llvm/test/CodeGen/X86/2012-01-11-split-cv.ll | 2 +- llvm/test/CodeGen/X86/2012-01-12-extract-sv.ll | 2 +- .../CodeGen/X86/2012-01-16-mfence-nosse-flags.ll | 2 +- llvm/test/CodeGen/X86/2012-02-12-dagco.ll | 4 +- llvm/test/CodeGen/X86/2012-02-29-CoalescerBug.ll | 4 +- llvm/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll | 8 +- llvm/test/CodeGen/X86/2012-04-26-sdglue.ll | 4 +- llvm/test/CodeGen/X86/2012-07-10-extload64.ll | 4 +- llvm/test/CodeGen/X86/2012-07-15-broadcastfold.ll | 2 +- .../test/CodeGen/X86/2012-08-17-legalizer-crash.ll | 4 +- llvm/test/CodeGen/X86/2012-09-28-CGPBug.ll | 6 +- llvm/test/CodeGen/X86/2012-10-02-DAGCycle.ll | 8 +- llvm/test/CodeGen/X86/2012-10-03-DAGCycle.ll | 4 +- llvm/test/CodeGen/X86/2012-10-18-crash-dagco.ll | 14 +- .../CodeGen/X86/2012-11-28-merge-store-alias.ll | 4 +- llvm/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll | 2 +- llvm/test/CodeGen/X86/2012-11-30-misched-dbg.ll | 2 +- .../CodeGen/X86/2012-12-06-python27-miscompile.ll | 2 +- .../test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll | 2 +- llvm/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll | 2 +- .../X86/2013-10-14-FastISel-incorrect-vreg.ll | 6 +- llvm/test/CodeGen/X86/Atomics-64.ll | 200 +++--- llvm/test/CodeGen/X86/GC/alloc_loop.ll | 4 +- llvm/test/CodeGen/X86/GC/argpromotion.ll | 2 +- llvm/test/CodeGen/X86/GC/inline.ll | 2 +- llvm/test/CodeGen/X86/GC/inline2.ll | 2 +- llvm/test/CodeGen/X86/MachineBranchProb.ll | 2 +- llvm/test/CodeGen/X86/MachineSink-DbgValue.ll | 2 +- llvm/test/CodeGen/X86/MachineSink-eflags.ll | 12 +- llvm/test/CodeGen/X86/MergeConsecutiveStores.ll | 50 +- llvm/test/CodeGen/X86/StackColoring.ll | 2 +- llvm/test/CodeGen/X86/SwitchLowering.ll | 2 +- llvm/test/CodeGen/X86/SwizzleShuff.ll | 20 +- llvm/test/CodeGen/X86/abi-isel.ll | 182 ++--- llvm/test/CodeGen/X86/addr-mode-matcher.ll | 4 +- .../X86/address-type-promotion-constantexpr.ll | 2 +- llvm/test/CodeGen/X86/aliases.ll | 6 +- llvm/test/CodeGen/X86/aligned-variadic.ll | 2 +- llvm/test/CodeGen/X86/and-su.ll | 2 +- .../X86/atom-call-reg-indirect-foldedreload32.ll | 28 +- .../X86/atom-call-reg-indirect-foldedreload64.ll | 42 +- llvm/test/CodeGen/X86/atom-call-reg-indirect.ll | 8 +- llvm/test/CodeGen/X86/atom-cmpb.ll | 4 +- llvm/test/CodeGen/X86/atom-fixup-lea1.ll | 2 +- llvm/test/CodeGen/X86/atom-fixup-lea2.ll | 12 +- llvm/test/CodeGen/X86/atom-fixup-lea3.ll | 6 +- llvm/test/CodeGen/X86/atom-fixup-lea4.ll | 2 +- llvm/test/CodeGen/X86/atom-lea-addw-bug.ll | 6 +- llvm/test/CodeGen/X86/atom-sched.ll | 8 +- llvm/test/CodeGen/X86/atomic-dagsched.ll | 18 +- llvm/test/CodeGen/X86/atomic-load-store-wide.ll | 2 +- llvm/test/CodeGen/X86/atomic-load-store.ll | 2 +- llvm/test/CodeGen/X86/atomic-or.ll | 4 +- llvm/test/CodeGen/X86/atomic-pointer.ll | 2 +- llvm/test/CodeGen/X86/atomic128.ll | 4 +- llvm/test/CodeGen/X86/atomic_mi.ll | 60 +- llvm/test/CodeGen/X86/atomic_op.ll | 2 +- llvm/test/CodeGen/X86/avoid-loop-align-2.ll | 4 +- llvm/test/CodeGen/X86/avoid-loop-align.ll | 2 +- llvm/test/CodeGen/X86/avoid_complex_am.ll | 4 +- llvm/test/CodeGen/X86/avx-arith.ll | 6 +- llvm/test/CodeGen/X86/avx-basic.ll | 4 +- llvm/test/CodeGen/X86/avx-bitcast.ll | 2 +- llvm/test/CodeGen/X86/avx-cvt.ll | 10 +- llvm/test/CodeGen/X86/avx-intel-ocl.ll | 4 +- llvm/test/CodeGen/X86/avx-intrinsics-x86.ll | 14 +- llvm/test/CodeGen/X86/avx-load-store.ll | 16 +- llvm/test/CodeGen/X86/avx-logic.ll | 4 +- llvm/test/CodeGen/X86/avx-splat.ll | 2 +- llvm/test/CodeGen/X86/avx-unpack.ll | 16 +- llvm/test/CodeGen/X86/avx-varargs-x86_64.ll | 2 +- llvm/test/CodeGen/X86/avx-vbroadcast.ll | 30 +- llvm/test/CodeGen/X86/avx-vinsertf128.ll | 4 +- llvm/test/CodeGen/X86/avx-vperm2x128.ll | 4 +- llvm/test/CodeGen/X86/avx-vzeroupper.ll | 4 +- llvm/test/CodeGen/X86/avx.ll | 12 +- llvm/test/CodeGen/X86/avx1-logical-load-folding.ll | 8 +- llvm/test/CodeGen/X86/avx2-conversions.ll | 10 +- llvm/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll | 24 +- llvm/test/CodeGen/X86/avx2-shift.ll | 20 +- llvm/test/CodeGen/X86/avx2-vbroadcast.ll | 50 +- llvm/test/CodeGen/X86/avx512-arith.ll | 28 +- llvm/test/CodeGen/X86/avx512-build-vector.ll | 2 +- llvm/test/CodeGen/X86/avx512-cvt.ll | 12 +- .../CodeGen/X86/avx512-gather-scatter-intrin.ll | 8 +- llvm/test/CodeGen/X86/avx512-i1test.ll | 2 +- llvm/test/CodeGen/X86/avx512-insert-extract.ll | 10 +- llvm/test/CodeGen/X86/avx512-intel-ocl.ll | 4 +- llvm/test/CodeGen/X86/avx512-intrinsics.ll | 4 +- llvm/test/CodeGen/X86/avx512-logic.ll | 4 +- llvm/test/CodeGen/X86/avx512-mask-op.ll | 6 +- llvm/test/CodeGen/X86/avx512-mov.ll | 60 +- llvm/test/CodeGen/X86/avx512-round.ll | 2 +- llvm/test/CodeGen/X86/avx512-shift.ll | 8 +- llvm/test/CodeGen/X86/avx512-vbroadcast.ll | 12 +- llvm/test/CodeGen/X86/avx512-vec-cmp.ll | 20 +- llvm/test/CodeGen/X86/avx512bw-arith.ll | 8 +- llvm/test/CodeGen/X86/avx512bw-mask-op.ll | 4 +- llvm/test/CodeGen/X86/avx512bw-mov.ll | 12 +- llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll | 12 +- llvm/test/CodeGen/X86/avx512bwvl-arith.ll | 16 +- llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll | 28 +- llvm/test/CodeGen/X86/avx512bwvl-mov.ll | 24 +- llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll | 24 +- llvm/test/CodeGen/X86/avx512dq-mask-op.ll | 2 +- llvm/test/CodeGen/X86/avx512er-intrinsics.ll | 4 +- llvm/test/CodeGen/X86/avx512vl-arith.ll | 40 +- llvm/test/CodeGen/X86/avx512vl-intrinsics.ll | 4 +- llvm/test/CodeGen/X86/avx512vl-mov.ll | 96 +-- llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll | 40 +- llvm/test/CodeGen/X86/bitcast-mmx.ll | 4 +- llvm/test/CodeGen/X86/block-placement.ll | 186 ++--- llvm/test/CodeGen/X86/bmi.ll | 22 +- llvm/test/CodeGen/X86/break-anti-dependencies.ll | 4 +- llvm/test/CodeGen/X86/break-false-dep.ll | 24 +- llvm/test/CodeGen/X86/bswap.ll | 6 +- llvm/test/CodeGen/X86/byval-align.ll | 8 +- llvm/test/CodeGen/X86/byval.ll | 2 +- llvm/test/CodeGen/X86/call-push.ll | 2 +- llvm/test/CodeGen/X86/cas.ll | 24 +- llvm/test/CodeGen/X86/chain_order.ll | 8 +- llvm/test/CodeGen/X86/change-compare-stride-1.ll | 18 +- llvm/test/CodeGen/X86/clobber-fi0.ll | 6 +- llvm/test/CodeGen/X86/cmov-into-branch.ll | 8 +- llvm/test/CodeGen/X86/cmov.ll | 16 +- llvm/test/CodeGen/X86/cmp.ll | 8 +- llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll | 2 +- llvm/test/CodeGen/X86/cmpxchg-i1.ll | 2 +- llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll | 2 +- llvm/test/CodeGen/X86/coalesce-esp.ll | 2 +- llvm/test/CodeGen/X86/coalesce-implicitdef.ll | 12 +- llvm/test/CodeGen/X86/coalescer-commute1.ll | 4 +- llvm/test/CodeGen/X86/coalescer-commute4.ll | 4 +- llvm/test/CodeGen/X86/coalescer-cross.ll | 4 +- llvm/test/CodeGen/X86/coalescer-dce2.ll | 16 +- llvm/test/CodeGen/X86/coalescer-identity.ll | 6 +- llvm/test/CodeGen/X86/code_placement.ll | 38 +- .../CodeGen/X86/codegen-prepare-addrmode-sext.ll | 72 +- llvm/test/CodeGen/X86/codegen-prepare-cast.ll | 4 +- llvm/test/CodeGen/X86/codegen-prepare-extload.ll | 44 +- llvm/test/CodeGen/X86/codegen-prepare.ll | 4 +- llvm/test/CodeGen/X86/codemodel.ll | 12 +- llvm/test/CodeGen/X86/combiner-aa-0.ll | 6 +- llvm/test/CodeGen/X86/combiner-aa-1.ll | 4 +- llvm/test/CodeGen/X86/commute-blend-avx2.ll | 16 +- llvm/test/CodeGen/X86/commute-blend-sse41.ll | 6 +- llvm/test/CodeGen/X86/commute-clmul.ll | 8 +- llvm/test/CodeGen/X86/commute-fcmp.ll | 48 +- llvm/test/CodeGen/X86/commute-intrinsic.ll | 2 +- llvm/test/CodeGen/X86/commute-xop.ll | 40 +- llvm/test/CodeGen/X86/compact-unwind.ll | 10 +- llvm/test/CodeGen/X86/complex-asm.ll | 4 +- llvm/test/CodeGen/X86/computeKnownBits_urem.ll | 2 +- llvm/test/CodeGen/X86/const-base-addr.ll | 6 +- llvm/test/CodeGen/X86/constant-combines.ll | 2 +- llvm/test/CodeGen/X86/constant-hoisting-optnone.ll | 4 +- .../X86/constant-hoisting-shift-immediate.ll | 4 +- .../CodeGen/X86/convert-2-addr-3-addr-inc64.ll | 2 +- llvm/test/CodeGen/X86/cppeh-catch-all.ll | 4 +- llvm/test/CodeGen/X86/cppeh-catch-scalar.ll | 18 +- llvm/test/CodeGen/X86/cppeh-frame-vars.ll | 62 +- llvm/test/CodeGen/X86/crash-O0.ll | 2 +- llvm/test/CodeGen/X86/crash-nosse.ll | 2 +- llvm/test/CodeGen/X86/crash.ll | 44 +- llvm/test/CodeGen/X86/critical-anti-dep-breaker.ll | 4 +- llvm/test/CodeGen/X86/cse-add-with-overflow.ll | 4 +- llvm/test/CodeGen/X86/cvt16.ll | 4 +- llvm/test/CodeGen/X86/dagcombine-buildvector.ll | 2 +- llvm/test/CodeGen/X86/dagcombine-cse.ll | 4 +- llvm/test/CodeGen/X86/darwin-quote.ll | 2 +- llvm/test/CodeGen/X86/dbg-changes-codegen.ll | 6 +- llvm/test/CodeGen/X86/dbg-combine.ll | 8 +- llvm/test/CodeGen/X86/discontiguous-loops.ll | 2 +- llvm/test/CodeGen/X86/div8.ll | 6 +- llvm/test/CodeGen/X86/dllimport-x86_64.ll | 6 +- llvm/test/CodeGen/X86/dllimport.ll | 6 +- llvm/test/CodeGen/X86/dollar-name.ll | 4 +- .../X86/dont-trunc-store-double-to-float.ll | 2 +- llvm/test/CodeGen/X86/dynamic-allocas-VLAs.ll | 22 +- llvm/test/CodeGen/X86/early-ifcvt.ll | 2 +- llvm/test/CodeGen/X86/emit-big-cst.ll | 2 +- llvm/test/CodeGen/X86/expand-opaque-const.ll | 6 +- llvm/test/CodeGen/X86/extend.ll | 4 +- llvm/test/CodeGen/X86/extract-extract.ll | 4 +- llvm/test/CodeGen/X86/extractelement-load.ll | 8 +- llvm/test/CodeGen/X86/extractps.ll | 4 +- llvm/test/CodeGen/X86/f16c-intrinsics.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-args-fail.ll | 2 +- .../X86/fast-isel-avoid-unnecessary-pic-base.ll | 6 +- llvm/test/CodeGen/X86/fast-isel-call-bool.ll | 2 +- llvm/test/CodeGen/X86/fast-isel-fold-mem.ll | 2 +- llvm/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-gep.ll | 18 +- llvm/test/CodeGen/X86/fast-isel-gv.ll | 6 +- llvm/test/CodeGen/X86/fast-isel-i1.ll | 2 +- .../CodeGen/X86/fast-isel-int-float-conversion.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-mem.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-tailcall.ll | 2 +- llvm/test/CodeGen/X86/fast-isel-tls.ll | 4 +- llvm/test/CodeGen/X86/fast-isel-x86-64.ll | 6 +- llvm/test/CodeGen/X86/fast-isel-x86.ll | 4 +- llvm/test/CodeGen/X86/fast-isel.ll | 16 +- llvm/test/CodeGen/X86/fastcc-byval.ll | 2 +- llvm/test/CodeGen/X86/fastcc-sret.ll | 2 +- llvm/test/CodeGen/X86/fastcc.ll | 8 +- .../CodeGen/X86/fastisel-gep-promote-before-add.ll | 10 +- llvm/test/CodeGen/X86/fma-do-not-commute.ll | 4 +- .../X86/fma4-intrinsics-x86_64-folded-load.ll | 24 +- llvm/test/CodeGen/X86/fma_patterns.ll | 4 +- llvm/test/CodeGen/X86/fmul-zero.ll | 2 +- llvm/test/CodeGen/X86/fold-add.ll | 4 +- llvm/test/CodeGen/X86/fold-and-shift.ll | 12 +- llvm/test/CodeGen/X86/fold-call-2.ll | 2 +- llvm/test/CodeGen/X86/fold-call-3.ll | 10 +- llvm/test/CodeGen/X86/fold-call-oper.ll | 6 +- llvm/test/CodeGen/X86/fold-call.ll | 2 +- llvm/test/CodeGen/X86/fold-load-unops.ll | 8 +- llvm/test/CodeGen/X86/fold-load-vec.ll | 18 +- llvm/test/CodeGen/X86/fold-load.ll | 8 +- llvm/test/CodeGen/X86/fold-mul-lohi.ll | 2 +- llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll | 4 +- llvm/test/CodeGen/X86/fold-sext-trunc.ll | 4 +- llvm/test/CodeGen/X86/fold-tied-op.ll | 12 +- llvm/test/CodeGen/X86/fold-vex.ll | 2 +- llvm/test/CodeGen/X86/fold-zext-trunc.ll | 4 +- llvm/test/CodeGen/X86/force-align-stack-alloca.ll | 2 +- llvm/test/CodeGen/X86/fp-double-rounding.ll | 2 +- llvm/test/CodeGen/X86/fp-load-trunc.ll | 8 +- llvm/test/CodeGen/X86/fp-stack-O0-crash.ll | 8 +- llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll | 2 +- llvm/test/CodeGen/X86/fp-stack-compare.ll | 2 +- llvm/test/CodeGen/X86/fp-stack-ret.ll | 2 +- llvm/test/CodeGen/X86/fp-stack.ll | 6 +- llvm/test/CodeGen/X86/fp2sint.ll | 4 +- llvm/test/CodeGen/X86/fp_load_cast_fold.ll | 6 +- llvm/test/CodeGen/X86/fp_load_fold.ll | 12 +- llvm/test/CodeGen/X86/frameallocate.ll | 2 +- llvm/test/CodeGen/X86/full-lsr.ll | 8 +- llvm/test/CodeGen/X86/gather-addresses.ll | 16 +- llvm/test/CodeGen/X86/ghc-cc.ll | 8 +- llvm/test/CodeGen/X86/ghc-cc64.ll | 32 +- llvm/test/CodeGen/X86/gs-fold.ll | 4 +- llvm/test/CodeGen/X86/h-register-addressing-32.ll | 14 +- llvm/test/CodeGen/X86/h-register-addressing-64.ll | 14 +- llvm/test/CodeGen/X86/half.ll | 8 +- llvm/test/CodeGen/X86/hidden-vis-2.ll | 2 +- llvm/test/CodeGen/X86/hidden-vis-3.ll | 4 +- llvm/test/CodeGen/X86/hidden-vis-4.ll | 2 +- llvm/test/CodeGen/X86/hidden-vis-pic.ll | 2 +- llvm/test/CodeGen/X86/hipe-cc.ll | 12 +- llvm/test/CodeGen/X86/hipe-cc64.ll | 14 +- llvm/test/CodeGen/X86/hoist-invariant-load.ll | 2 +- llvm/test/CodeGen/X86/i128-mul.ll | 2 +- llvm/test/CodeGen/X86/i128-ret.ll | 2 +- llvm/test/CodeGen/X86/i1narrowfail.ll | 2 +- llvm/test/CodeGen/X86/i256-add.ll | 8 +- llvm/test/CodeGen/X86/i2k.ll | 4 +- llvm/test/CodeGen/X86/i486-fence-loop.ll | 4 +- llvm/test/CodeGen/X86/i64-mem-copy.ll | 2 +- llvm/test/CodeGen/X86/inline-asm-fpstack.ll | 6 +- llvm/test/CodeGen/X86/inline-asm-out-regs.ll | 4 +- llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll | 6 +- llvm/test/CodeGen/X86/inline-asm-stack-realign.ll | 2 +- llvm/test/CodeGen/X86/inline-asm-stack-realign2.ll | 2 +- llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll | 2 +- llvm/test/CodeGen/X86/inline-asm-tied.ll | 6 +- llvm/test/CodeGen/X86/ins_split_regalloc.ll | 2 +- llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll | 2 +- llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll | 16 +- llvm/test/CodeGen/X86/insertps-O0-bug.ll | 4 +- llvm/test/CodeGen/X86/invalid-shift-immediate.ll | 2 +- llvm/test/CodeGen/X86/isel-optnone.ll | 12 +- llvm/test/CodeGen/X86/isel-sink.ll | 2 +- llvm/test/CodeGen/X86/isel-sink2.ll | 4 +- llvm/test/CodeGen/X86/isel-sink3.ll | 4 +- llvm/test/CodeGen/X86/jump_sign.ll | 8 +- llvm/test/CodeGen/X86/large-constants.ll | 16 +- llvm/test/CodeGen/X86/ldzero.ll | 12 +- llvm/test/CodeGen/X86/lea-5.ll | 4 +- llvm/test/CodeGen/X86/lea-recursion.ll | 16 +- llvm/test/CodeGen/X86/legalize-shift-64.ll | 2 +- llvm/test/CodeGen/X86/licm-nested.ll | 4 +- llvm/test/CodeGen/X86/liveness-local-regalloc.ll | 2 +- llvm/test/CodeGen/X86/load-slice.ll | 10 +- llvm/test/CodeGen/X86/longlong-deadload.ll | 2 +- llvm/test/CodeGen/X86/loop-strength-reduce4.ll | 16 +- llvm/test/CodeGen/X86/loop-strength-reduce7.ll | 2 +- llvm/test/CodeGen/X86/loop-strength-reduce8.ll | 8 +- llvm/test/CodeGen/X86/lsr-delayed-fold.ll | 4 +- llvm/test/CodeGen/X86/lsr-i386.ll | 2 +- llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll | 40 +- llvm/test/CodeGen/X86/lsr-normalization.ll | 10 +- llvm/test/CodeGen/X86/lsr-redundant-addressing.ll | 8 +- llvm/test/CodeGen/X86/lsr-reuse-trunc.ll | 6 +- llvm/test/CodeGen/X86/lsr-reuse.ll | 86 +-- llvm/test/CodeGen/X86/lsr-static-addr.ll | 2 +- llvm/test/CodeGen/X86/lsr-wrap.ll | 2 +- llvm/test/CodeGen/X86/lzcnt-tzcnt.ll | 36 +- llvm/test/CodeGen/X86/machine-cse.ll | 2 +- llvm/test/CodeGen/X86/masked-iv-safe.ll | 48 +- llvm/test/CodeGen/X86/masked-iv-unsafe.ll | 78 +-- llvm/test/CodeGen/X86/mcinst-lowering.ll | 2 +- llvm/test/CodeGen/X86/mem-intrin-base-reg.ll | 12 +- llvm/test/CodeGen/X86/mem-promote-integers.ll | 70 +- llvm/test/CodeGen/X86/misaligned-memset.ll | 2 +- llvm/test/CodeGen/X86/misched-aa-colored.ll | 4 +- llvm/test/CodeGen/X86/misched-aa-mmos.ll | 4 +- llvm/test/CodeGen/X86/misched-balance.ll | 90 +-- .../X86/misched-code-difference-with-debug.ll | 8 +- llvm/test/CodeGen/X86/misched-crash.ll | 4 +- llvm/test/CodeGen/X86/misched-fusion.ll | 14 +- llvm/test/CodeGen/X86/misched-matmul.ll | 64 +- llvm/test/CodeGen/X86/misched-matrix.ll | 64 +- llvm/test/CodeGen/X86/misched-new.ll | 6 +- llvm/test/CodeGen/X86/mmx-arg-passing-x86-64.ll | 2 +- llvm/test/CodeGen/X86/mmx-arith.ll | 66 +- llvm/test/CodeGen/X86/mmx-bitcast.ll | 8 +- llvm/test/CodeGen/X86/mmx-copy-gprs.ll | 2 +- llvm/test/CodeGen/X86/mmx-fold-load.ll | 50 +- llvm/test/CodeGen/X86/movbe.ll | 6 +- llvm/test/CodeGen/X86/movfs.ll | 4 +- llvm/test/CodeGen/X86/movgs.ll | 16 +- llvm/test/CodeGen/X86/movmsk.ll | 4 +- llvm/test/CodeGen/X86/movtopush.ll | 6 +- llvm/test/CodeGen/X86/ms-inline-asm.ll | 6 +- llvm/test/CodeGen/X86/mul128_sext_loop.ll | 2 +- llvm/test/CodeGen/X86/muloti.ll | 10 +- llvm/test/CodeGen/X86/mult-alt-generic-i686.ll | 38 +- llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll | 38 +- llvm/test/CodeGen/X86/mult-alt-x86.ll | 48 +- llvm/test/CodeGen/X86/multiple-loop-post-inc.ll | 28 +- llvm/test/CodeGen/X86/mulx32.ll | 2 +- llvm/test/CodeGen/X86/mulx64.ll | 2 +- llvm/test/CodeGen/X86/musttail-indirect.ll | 30 +- llvm/test/CodeGen/X86/musttail-varargs.ll | 6 +- llvm/test/CodeGen/X86/nancvt.ll | 54 +- llvm/test/CodeGen/X86/narrow-shl-load.ll | 6 +- llvm/test/CodeGen/X86/narrow_op-1.ll | 4 +- llvm/test/CodeGen/X86/negate-add-zero.ll | 16 +- llvm/test/CodeGen/X86/no-cmov.ll | 2 +- llvm/test/CodeGen/X86/norex-subreg.ll | 6 +- llvm/test/CodeGen/X86/nosse-error1.ll | 8 +- llvm/test/CodeGen/X86/nosse-error2.ll | 8 +- llvm/test/CodeGen/X86/nosse-varargs.ll | 8 +- llvm/test/CodeGen/X86/object-size.ll | 16 +- llvm/test/CodeGen/X86/opt-ext-uses.ll | 2 +- llvm/test/CodeGen/X86/optimize-max-0.ll | 12 +- llvm/test/CodeGen/X86/optimize-max-2.ll | 2 +- llvm/test/CodeGen/X86/optimize-max-3.ll | 2 +- llvm/test/CodeGen/X86/packed_struct.ll | 10 +- llvm/test/CodeGen/X86/palignr-2.ll | 4 +- llvm/test/CodeGen/X86/patchpoint.ll | 6 +- llvm/test/CodeGen/X86/peep-test-0.ll | 2 +- llvm/test/CodeGen/X86/peep-test-1.ll | 2 +- llvm/test/CodeGen/X86/peephole-fold-movsd.ll | 4 +- llvm/test/CodeGen/X86/peephole-multiple-folds.ll | 4 +- llvm/test/CodeGen/X86/phi-bit-propagation.ll | 4 +- llvm/test/CodeGen/X86/phielim-split.ll | 2 +- llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll | 6 +- llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll | 2 +- llvm/test/CodeGen/X86/pic.ll | 8 +- llvm/test/CodeGen/X86/pic_jumptable.ll | 2 +- llvm/test/CodeGen/X86/pmovext.ll | 2 +- llvm/test/CodeGen/X86/pmovsx-inreg.ll | 24 +- llvm/test/CodeGen/X86/pmulld.ll | 2 +- llvm/test/CodeGen/X86/pointer-vector.ll | 24 +- llvm/test/CodeGen/X86/postra-licm.ll | 8 +- llvm/test/CodeGen/X86/pr10475.ll | 2 +- llvm/test/CodeGen/X86/pr10525.ll | 2 +- llvm/test/CodeGen/X86/pr11334.ll | 2 +- llvm/test/CodeGen/X86/pr12360.ll | 4 +- llvm/test/CodeGen/X86/pr12889.ll | 2 +- llvm/test/CodeGen/X86/pr13209.ll | 30 +- llvm/test/CodeGen/X86/pr13859.ll | 2 +- llvm/test/CodeGen/X86/pr13899.ll | 20 +- llvm/test/CodeGen/X86/pr14161.ll | 4 +- llvm/test/CodeGen/X86/pr14562.ll | 2 +- llvm/test/CodeGen/X86/pr1505b.ll | 4 +- llvm/test/CodeGen/X86/pr15267.ll | 8 +- llvm/test/CodeGen/X86/pr15309.ll | 2 +- llvm/test/CodeGen/X86/pr18023.ll | 8 +- llvm/test/CodeGen/X86/pr18162.ll | 6 +- llvm/test/CodeGen/X86/pr18846.ll | 24 +- llvm/test/CodeGen/X86/pr20020.ll | 8 +- llvm/test/CodeGen/X86/pr2177.ll | 4 +- llvm/test/CodeGen/X86/pr2182.ll | 8 +- llvm/test/CodeGen/X86/pr2326.ll | 8 +- llvm/test/CodeGen/X86/pr2656.ll | 4 +- llvm/test/CodeGen/X86/pr2849.ll | 8 +- llvm/test/CodeGen/X86/pr2924.ll | 8 +- llvm/test/CodeGen/X86/pr2982.ll | 6 +- llvm/test/CodeGen/X86/pr3216.ll | 2 +- llvm/test/CodeGen/X86/pr3241.ll | 2 +- llvm/test/CodeGen/X86/pr3244.ll | 4 +- llvm/test/CodeGen/X86/pr3317.ll | 8 +- llvm/test/CodeGen/X86/pr3366.ll | 2 +- llvm/test/CodeGen/X86/pr9127.ll | 2 +- llvm/test/CodeGen/X86/pre-ra-sched.ll | 14 +- llvm/test/CodeGen/X86/private-2.ll | 2 +- llvm/test/CodeGen/X86/private.ll | 2 +- llvm/test/CodeGen/X86/promote-assert-zext.ll | 2 +- llvm/test/CodeGen/X86/promote-trunc.ll | 4 +- llvm/test/CodeGen/X86/promote.ll | 4 +- llvm/test/CodeGen/X86/pshufb-mask-comments.ll | 4 +- llvm/test/CodeGen/X86/psubus.ll | 24 +- llvm/test/CodeGen/X86/ragreedy-bug.ll | 50 +- llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll | 4 +- .../CodeGen/X86/ragreedy-last-chance-recoloring.ll | 34 +- llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll | 30 +- .../CodeGen/X86/regalloc-reconcile-broken-hints.ll | 22 +- llvm/test/CodeGen/X86/regpressure.ll | 60 +- llvm/test/CodeGen/X86/remat-constant.ll | 2 +- llvm/test/CodeGen/X86/remat-fold-load.ll | 20 +- llvm/test/CodeGen/X86/remat-invalid-liveness.ll | 6 +- llvm/test/CodeGen/X86/remat-scalar-zero.ll | 34 +- llvm/test/CodeGen/X86/reverse_branches.ll | 2 +- llvm/test/CodeGen/X86/rip-rel-address.ll | 2 +- llvm/test/CodeGen/X86/rot32.ll | 4 +- llvm/test/CodeGen/X86/rot64.ll | 4 +- llvm/test/CodeGen/X86/rotate4.ll | 8 +- llvm/test/CodeGen/X86/sandybridge-loads.ll | 10 +- llvm/test/CodeGen/X86/scalar-extract.ll | 2 +- llvm/test/CodeGen/X86/scalar_widen_div.ll | 20 +- llvm/test/CodeGen/X86/scalarize-bitcast.ll | 2 +- llvm/test/CodeGen/X86/scev-interchange.ll | 2 +- llvm/test/CodeGen/X86/segmented-stacks.ll | 2 +- llvm/test/CodeGen/X86/seh-safe-div.ll | 14 +- llvm/test/CodeGen/X86/select-with-and-or.ll | 2 +- llvm/test/CodeGen/X86/select.ll | 10 +- llvm/test/CodeGen/X86/setcc-narrowing.ll | 2 +- llvm/test/CodeGen/X86/sext-load.ll | 2 +- llvm/test/CodeGen/X86/sha.ll | 14 +- llvm/test/CodeGen/X86/shift-and.ll | 4 +- llvm/test/CodeGen/X86/shift-bmi2.ll | 16 +- llvm/test/CodeGen/X86/shift-coalesce.ll | 2 +- llvm/test/CodeGen/X86/shift-codegen.ll | 4 +- llvm/test/CodeGen/X86/shift-combine.ll | 2 +- llvm/test/CodeGen/X86/shift-folding.ll | 4 +- llvm/test/CodeGen/X86/shift-one.ll | 2 +- llvm/test/CodeGen/X86/shift-parts.ll | 2 +- llvm/test/CodeGen/X86/shl-i64.ll | 4 +- llvm/test/CodeGen/X86/shl_undef.ll | 4 +- llvm/test/CodeGen/X86/shrink-compare.ll | 4 +- llvm/test/CodeGen/X86/shuffle-combine-crash.ll | 2 +- llvm/test/CodeGen/X86/sibcall-4.ll | 2 +- llvm/test/CodeGen/X86/sibcall-5.ll | 2 +- llvm/test/CodeGen/X86/sibcall.ll | 4 +- llvm/test/CodeGen/X86/simple-zext.ll | 2 +- llvm/test/CodeGen/X86/sink-hoist.ll | 10 +- llvm/test/CodeGen/X86/slow-incdec.ll | 4 +- llvm/test/CodeGen/X86/split-vector-bitcast.ll | 2 +- llvm/test/CodeGen/X86/sse-align-0.ll | 4 +- llvm/test/CodeGen/X86/sse-align-1.ll | 4 +- llvm/test/CodeGen/X86/sse-align-10.ll | 2 +- llvm/test/CodeGen/X86/sse-align-12.ll | 8 +- llvm/test/CodeGen/X86/sse-align-2.ll | 4 +- llvm/test/CodeGen/X86/sse-align-5.ll | 2 +- llvm/test/CodeGen/X86/sse-align-6.ll | 2 +- llvm/test/CodeGen/X86/sse-align-9.ll | 4 +- llvm/test/CodeGen/X86/sse-domains.ll | 2 +- llvm/test/CodeGen/X86/sse-intel-ocl.ll | 4 +- llvm/test/CodeGen/X86/sse-load-ret.ll | 2 +- llvm/test/CodeGen/X86/sse-unaligned-mem-feature.ll | 2 +- llvm/test/CodeGen/X86/sse2.ll | 38 +- llvm/test/CodeGen/X86/sse3-avx-addsub.ll | 8 +- llvm/test/CodeGen/X86/sse3.ll | 18 +- llvm/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll | 24 +- llvm/test/CodeGen/X86/sse41.ll | 30 +- llvm/test/CodeGen/X86/sse42-intrinsics-x86.ll | 12 +- llvm/test/CodeGen/X86/ssp-data-layout.ll | 38 +- llvm/test/CodeGen/X86/stack-align.ll | 4 +- llvm/test/CodeGen/X86/stack-protector-dbginfo.ll | 2 +- .../X86/stack-protector-vreg-to-vreg-copy.ll | 2 +- llvm/test/CodeGen/X86/stack-protector-weight.ll | 2 +- llvm/test/CodeGen/X86/stack-protector.ll | 132 ++-- llvm/test/CodeGen/X86/stackmap.ll | 2 +- llvm/test/CodeGen/X86/statepoint-forward.ll | 12 +- llvm/test/CodeGen/X86/store-narrow.ll | 22 +- llvm/test/CodeGen/X86/store_op_load_fold.ll | 4 +- llvm/test/CodeGen/X86/store_op_load_fold2.ll | 4 +- llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll | 2 +- llvm/test/CodeGen/X86/stride-reuse.ll | 2 +- llvm/test/CodeGen/X86/subreg-to-reg-0.ll | 2 +- llvm/test/CodeGen/X86/subreg-to-reg-2.ll | 6 +- llvm/test/CodeGen/X86/subreg-to-reg-4.ll | 16 +- llvm/test/CodeGen/X86/subreg-to-reg-6.ll | 2 +- llvm/test/CodeGen/X86/switch-bt.ll | 4 +- llvm/test/CodeGen/X86/switch-zextload.ll | 2 +- llvm/test/CodeGen/X86/tail-call-win64.ll | 2 +- llvm/test/CodeGen/X86/tail-dup-addr.ll | 2 +- llvm/test/CodeGen/X86/tail-opts.ll | 24 +- llvm/test/CodeGen/X86/tailcall-64.ll | 4 +- llvm/test/CodeGen/X86/tailcall-returndup-void.ll | 4 +- llvm/test/CodeGen/X86/tailcall-ri64.ll | 4 +- llvm/test/CodeGen/X86/tailcallbyval.ll | 2 +- llvm/test/CodeGen/X86/tailcallbyval64.ll | 2 +- llvm/test/CodeGen/X86/tbm-intrinsics-x86_64.ll | 4 +- llvm/test/CodeGen/X86/tbm_patterns.ll | 4 +- llvm/test/CodeGen/X86/test-shrink-bug.ll | 2 +- llvm/test/CodeGen/X86/testl-commute.ll | 12 +- .../test/CodeGen/X86/tls-addr-non-leaf-function.ll | 2 +- llvm/test/CodeGen/X86/tls-local-dynamic.ll | 4 +- llvm/test/CodeGen/X86/tls-pic.ll | 8 +- llvm/test/CodeGen/X86/tls-pie.ll | 4 +- llvm/test/CodeGen/X86/tls.ll | 18 +- llvm/test/CodeGen/X86/tlv-1.ll | 4 +- llvm/test/CodeGen/X86/trunc-ext-ld-st.ll | 12 +- llvm/test/CodeGen/X86/trunc-to-bool.ll | 2 +- llvm/test/CodeGen/X86/twoaddr-pass-sink.ll | 6 +- llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll | 38 +- llvm/test/CodeGen/X86/unaligned-spill-folding.ll | 2 +- llvm/test/CodeGen/X86/unwindraise.ll | 40 +- llvm/test/CodeGen/X86/use-add-flags.ll | 2 +- llvm/test/CodeGen/X86/v4i32load-crash.ll | 8 +- llvm/test/CodeGen/X86/v8i1-masks.ll | 10 +- llvm/test/CodeGen/X86/vaargs.ll | 4 +- llvm/test/CodeGen/X86/vararg_tailcall.ll | 28 +- llvm/test/CodeGen/X86/vec-loadsingles-alignment.ll | 16 +- llvm/test/CodeGen/X86/vec-trunc-store.ll | 4 +- llvm/test/CodeGen/X86/vec_align.ll | 8 +- llvm/test/CodeGen/X86/vec_anyext.ll | 24 +- llvm/test/CodeGen/X86/vec_extract-mmx.ll | 6 +- llvm/test/CodeGen/X86/vec_extract-sse4.ll | 8 +- llvm/test/CodeGen/X86/vec_extract.ll | 6 +- llvm/test/CodeGen/X86/vec_fpext.ll | 6 +- llvm/test/CodeGen/X86/vec_i64.ll | 4 +- llvm/test/CodeGen/X86/vec_ins_extract.ll | 12 +- llvm/test/CodeGen/X86/vec_insert-5.ll | 6 +- llvm/test/CodeGen/X86/vec_insert-mmx.ll | 4 +- llvm/test/CodeGen/X86/vec_loadsingles.ll | 52 +- llvm/test/CodeGen/X86/vec_logical.ll | 2 +- llvm/test/CodeGen/X86/vec_set-7.ll | 2 +- llvm/test/CodeGen/X86/vec_set-F.ll | 2 +- llvm/test/CodeGen/X86/vec_setcc-2.ll | 4 +- llvm/test/CodeGen/X86/vec_ss_load_fold.ll | 4 +- llvm/test/CodeGen/X86/vec_trunc_sext.ll | 2 +- llvm/test/CodeGen/X86/vec_zero.ll | 4 +- llvm/test/CodeGen/X86/vector-gep.ll | 4 +- llvm/test/CodeGen/X86/vector-intrinsics.ll | 8 +- llvm/test/CodeGen/X86/vector-sext.ll | 18 +- llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll | 16 +- llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll | 14 +- llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll | 10 +- llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll | 14 +- llvm/test/CodeGen/X86/vector-shuffle-combining.ll | 20 +- llvm/test/CodeGen/X86/vector-shuffle-mmx.ll | 4 +- llvm/test/CodeGen/X86/vector-shuffle-sse1.ll | 8 +- llvm/test/CodeGen/X86/vector-variable-idx2.ll | 8 +- llvm/test/CodeGen/X86/vector-zext.ll | 6 +- llvm/test/CodeGen/X86/vector-zmov.ll | 4 +- llvm/test/CodeGen/X86/vector.ll | 42 +- llvm/test/CodeGen/X86/viabs.ll | 2 +- llvm/test/CodeGen/X86/visibility2.ll | 2 +- llvm/test/CodeGen/X86/volatile.ll | 6 +- llvm/test/CodeGen/X86/vselect-avx.ll | 2 +- llvm/test/CodeGen/X86/vselect-minmax.ll | 768 ++++++++++----------- llvm/test/CodeGen/X86/vshift-5.ll | 4 +- llvm/test/CodeGen/X86/vshift-6.ll | 2 +- llvm/test/CodeGen/X86/weak_def_can_be_hidden.ll | 4 +- llvm/test/CodeGen/X86/widen_arith-1.ll | 16 +- llvm/test/CodeGen/X86/widen_arith-2.ll | 24 +- llvm/test/CodeGen/X86/widen_arith-3.ll | 16 +- llvm/test/CodeGen/X86/widen_arith-4.ll | 16 +- llvm/test/CodeGen/X86/widen_arith-5.ll | 16 +- llvm/test/CodeGen/X86/widen_arith-6.ll | 18 +- llvm/test/CodeGen/X86/widen_cast-1.ll | 14 +- llvm/test/CodeGen/X86/widen_cast-2.ll | 14 +- llvm/test/CodeGen/X86/widen_cast-4.ll | 24 +- llvm/test/CodeGen/X86/widen_conversions.ll | 2 +- llvm/test/CodeGen/X86/widen_load-0.ll | 4 +- llvm/test/CodeGen/X86/widen_load-1.ll | 4 +- llvm/test/CodeGen/X86/widen_load-2.ll | 44 +- llvm/test/CodeGen/X86/win32_sret.ll | 4 +- llvm/test/CodeGen/X86/win64_eh.ll | 16 +- llvm/test/CodeGen/X86/win_eh_prepare.ll | 2 +- llvm/test/CodeGen/X86/x32-function_pointer-1.ll | 4 +- llvm/test/CodeGen/X86/x86-64-gv-offset.ll | 4 +- llvm/test/CodeGen/X86/x86-64-jumps.ll | 4 +- llvm/test/CodeGen/X86/x86-64-mem.ll | 2 +- llvm/test/CodeGen/X86/x86-64-pic-4.ll | 2 +- llvm/test/CodeGen/X86/x86-64-pic-5.ll | 2 +- llvm/test/CodeGen/X86/x86-64-pic-6.ll | 2 +- llvm/test/CodeGen/X86/x86-64-ptr-arg-simple.ll | 2 +- llvm/test/CodeGen/X86/x86-64-sret-return.ll | 18 +- llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll | 2 +- .../CodeGen/X86/x86-mixed-alignment-dagcombine.ll | 8 +- llvm/test/CodeGen/X86/xop-intrinsics-x86_64.ll | 40 +- llvm/test/CodeGen/X86/zext-extract_subreg.ll | 2 +- llvm/test/CodeGen/X86/zext-sext.ll | 10 +- llvm/test/CodeGen/X86/zlib-longest-match.ll | 74 +- llvm/test/CodeGen/XCore/2009-01-08-Crash.ll | 2 +- llvm/test/CodeGen/XCore/2010-02-25-LSR-Crash.ll | 2 +- .../test/CodeGen/XCore/2011-01-31-DAGCombineBug.ll | 2 +- llvm/test/CodeGen/XCore/atomic.ll | 18 +- llvm/test/CodeGen/XCore/codemodel.ll | 18 +- llvm/test/CodeGen/XCore/dwarf_debug.ll | 2 +- llvm/test/CodeGen/XCore/exception.ll | 2 +- llvm/test/CodeGen/XCore/indirectbr.ll | 4 +- llvm/test/CodeGen/XCore/llvm-intrinsics.ll | 2 +- llvm/test/CodeGen/XCore/load.ll | 10 +- llvm/test/CodeGen/XCore/private.ll | 2 +- llvm/test/CodeGen/XCore/scavenging.ll | 26 +- llvm/test/CodeGen/XCore/trampoline.ll | 2 +- llvm/test/CodeGen/XCore/unaligned_load.ll | 6 +- llvm/test/CodeGen/XCore/unaligned_store_combine.ll | 2 +- llvm/test/CodeGen/XCore/zextfree.ll | 2 +- 2572 files changed, 22304 insertions(+), 22304 deletions(-) (limited to 'llvm/test/CodeGen') diff --git a/llvm/test/CodeGen/AArch64/128bit_load_store.ll b/llvm/test/CodeGen/AArch64/128bit_load_store.ll index 20911e8d44d..94fd386e0ea 100644 --- a/llvm/test/CodeGen/AArch64/128bit_load_store.ll +++ b/llvm/test/CodeGen/AArch64/128bit_load_store.ll @@ -12,7 +12,7 @@ define fp128 @test_load_f128(fp128* readonly %ptr) #2 { ; CHECK-LABEL: test_load_f128 ; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}] entry: - %0 = load fp128* %ptr, align 16 + %0 = load fp128, fp128* %ptr, align 16 ret fp128 %0 } @@ -33,7 +33,7 @@ define i128 @test_vldrq_p128(i128* readonly %ptr) #2 { entry: %0 = bitcast i128* %ptr to fp128* - %1 = load fp128* %0, align 16 + %1 = load fp128, fp128* %0, align 16 %2 = bitcast fp128 %1 to i128 ret i128 %2 } @@ -44,7 +44,7 @@ define void @test_ld_st_p128(i128* nocapture %ptr) #0 { ; CHECK-NEXT: str {{q[0-9]+}}, [{{x[0-9]+}}, #16] entry: %0 = bitcast i128* %ptr to fp128* - %1 = load fp128* %0, align 16 + %1 = load fp128, fp128* %0, align 16 %add.ptr = getelementptr inbounds i128, i128* %ptr, i64 1 %2 = bitcast i128* %add.ptr to fp128* store fp128 %1, fp128* %2, align 16 diff --git a/llvm/test/CodeGen/AArch64/PBQP-chain.ll b/llvm/test/CodeGen/AArch64/PBQP-chain.ll index 47298423131..3e5fa741c24 100644 --- a/llvm/test/CodeGen/AArch64/PBQP-chain.ll +++ b/llvm/test/CodeGen/AArch64/PBQP-chain.ll @@ -22,79 +22,79 @@ target triple = "aarch64" ; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}} define void @fir(double* nocapture %rx, double* nocapture %ry, double* nocapture %c, double* nocapture %x, double* nocapture %y) { entry: - %0 = load double* %c, align 8 - %1 = load double* %x, align 8 + %0 = load double, double* %c, align 8 + %1 = load double, double* %x, align 8 %mul = fmul fast double %1, %0 - %2 = load double* %y, align 8 + %2 = load double, double* %y, align 8 %mul7 = fmul fast double %2, %0 %arrayidx.1 = getelementptr inbounds double, double* %c, i64 1 - %3 = load double* %arrayidx.1, align 8 + %3 = load double, double* %arrayidx.1, align 8 %arrayidx2.1 = getelementptr inbounds double, double* %x, i64 1 - %4 = load double* %arrayidx2.1, align 8 + %4 = load double, double* %arrayidx2.1, align 8 %mul.1 = fmul fast double %4, %3 %add.1 = fadd fast double %mul.1, %mul %arrayidx6.1 = getelementptr inbounds double, double* %y, i64 1 - %5 = load double* %arrayidx6.1, align 8 + %5 = load double, double* %arrayidx6.1, align 8 %mul7.1 = fmul fast double %5, %3 %add8.1 = fadd fast double %mul7.1, %mul7 %arrayidx.2 = getelementptr inbounds double, double* %c, i64 2 - %6 = load double* %arrayidx.2, align 8 + %6 = load double, double* %arrayidx.2, align 8 %arrayidx2.2 = getelementptr inbounds double, double* %x, i64 2 - %7 = load double* %arrayidx2.2, align 8 + %7 = load double, double* %arrayidx2.2, align 8 %mul.2 = fmul fast double %7, %6 %add.2 = fadd fast double %mul.2, %add.1 %arrayidx6.2 = getelementptr inbounds double, double* %y, i64 2 - %8 = load double* %arrayidx6.2, align 8 + %8 = load double, double* %arrayidx6.2, align 8 %mul7.2 = fmul fast double %8, %6 %add8.2 = fadd fast double %mul7.2, %add8.1 %arrayidx.3 = getelementptr inbounds double, double* %c, i64 3 - %9 = load double* %arrayidx.3, align 8 + %9 = load double, double* %arrayidx.3, align 8 %arrayidx2.3 = getelementptr inbounds double, double* %x, i64 3 - %10 = load double* %arrayidx2.3, align 8 + %10 = load double, double* %arrayidx2.3, align 8 %mul.3 = fmul fast double %10, %9 %add.3 = fadd fast double %mul.3, %add.2 %arrayidx6.3 = getelementptr inbounds double, double* %y, i64 3 - %11 = load double* %arrayidx6.3, align 8 + %11 = load double, double* %arrayidx6.3, align 8 %mul7.3 = fmul fast double %11, %9 %add8.3 = fadd fast double %mul7.3, %add8.2 %arrayidx.4 = getelementptr inbounds double, double* %c, i64 4 - %12 = load double* %arrayidx.4, align 8 + %12 = load double, double* %arrayidx.4, align 8 %arrayidx2.4 = getelementptr inbounds double, double* %x, i64 4 - %13 = load double* %arrayidx2.4, align 8 + %13 = load double, double* %arrayidx2.4, align 8 %mul.4 = fmul fast double %13, %12 %add.4 = fadd fast double %mul.4, %add.3 %arrayidx6.4 = getelementptr inbounds double, double* %y, i64 4 - %14 = load double* %arrayidx6.4, align 8 + %14 = load double, double* %arrayidx6.4, align 8 %mul7.4 = fmul fast double %14, %12 %add8.4 = fadd fast double %mul7.4, %add8.3 %arrayidx.5 = getelementptr inbounds double, double* %c, i64 5 - %15 = load double* %arrayidx.5, align 8 + %15 = load double, double* %arrayidx.5, align 8 %arrayidx2.5 = getelementptr inbounds double, double* %x, i64 5 - %16 = load double* %arrayidx2.5, align 8 + %16 = load double, double* %arrayidx2.5, align 8 %mul.5 = fmul fast double %16, %15 %add.5 = fadd fast double %mul.5, %add.4 %arrayidx6.5 = getelementptr inbounds double, double* %y, i64 5 - %17 = load double* %arrayidx6.5, align 8 + %17 = load double, double* %arrayidx6.5, align 8 %mul7.5 = fmul fast double %17, %15 %add8.5 = fadd fast double %mul7.5, %add8.4 %arrayidx.6 = getelementptr inbounds double, double* %c, i64 6 - %18 = load double* %arrayidx.6, align 8 + %18 = load double, double* %arrayidx.6, align 8 %arrayidx2.6 = getelementptr inbounds double, double* %x, i64 6 - %19 = load double* %arrayidx2.6, align 8 + %19 = load double, double* %arrayidx2.6, align 8 %mul.6 = fmul fast double %19, %18 %add.6 = fadd fast double %mul.6, %add.5 %arrayidx6.6 = getelementptr inbounds double, double* %y, i64 6 - %20 = load double* %arrayidx6.6, align 8 + %20 = load double, double* %arrayidx6.6, align 8 %mul7.6 = fmul fast double %20, %18 %add8.6 = fadd fast double %mul7.6, %add8.5 %arrayidx.7 = getelementptr inbounds double, double* %c, i64 7 - %21 = load double* %arrayidx.7, align 8 + %21 = load double, double* %arrayidx.7, align 8 %arrayidx2.7 = getelementptr inbounds double, double* %x, i64 7 - %22 = load double* %arrayidx2.7, align 8 + %22 = load double, double* %arrayidx2.7, align 8 %mul.7 = fmul fast double %22, %21 %add.7 = fadd fast double %mul.7, %add.6 %arrayidx6.7 = getelementptr inbounds double, double* %y, i64 7 - %23 = load double* %arrayidx6.7, align 8 + %23 = load double, double* %arrayidx6.7, align 8 %mul7.7 = fmul fast double %23, %21 %add8.7 = fadd fast double %mul7.7, %add8.6 store double %add.7, double* %rx, align 8 diff --git a/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll b/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll index e5d2d1ce13a..bd50b2d84b7 100644 --- a/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll +++ b/llvm/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll @@ -3,11 +3,11 @@ ; CHECK-LABEL: test: define i32 @test(i32 %acc, i32* nocapture readonly %c) { entry: - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 ; CHECK-NOT: mov w{{[0-9]*}}, w0 %add = add nsw i32 %0, %acc %arrayidx1 = getelementptr inbounds i32, i32* %c, i64 1 - %1 = load i32* %arrayidx1, align 4 + %1 = load i32, i32* %arrayidx1, align 4 %add2 = add nsw i32 %add, %1 ret i32 %add2 } diff --git a/llvm/test/CodeGen/AArch64/PBQP-csr.ll b/llvm/test/CodeGen/AArch64/PBQP-csr.ll index 644bc25f55f..16d7f8cb7a5 100644 --- a/llvm/test/CodeGen/AArch64/PBQP-csr.ll +++ b/llvm/test/CodeGen/AArch64/PBQP-csr.ll @@ -23,15 +23,15 @@ entry: %na = getelementptr inbounds %rs, %rs* %r, i64 0, i32 0 %0 = bitcast double* %x.i to i8* call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 72, i32 8, i1 false) - %1 = load i32* %na, align 4 + %1 = load i32, i32* %na, align 4 %cmp70 = icmp sgt i32 %1, 0 br i1 %cmp70, label %for.body.lr.ph, label %for.end for.body.lr.ph: ; preds = %entry %fn = getelementptr inbounds %rs, %rs* %r, i64 0, i32 4 - %2 = load %v** %fn, align 8 + %2 = load %v*, %v** %fn, align 8 %fs = getelementptr inbounds %rs, %rs* %r, i64 0, i32 5 - %3 = load %v** %fs, align 8 + %3 = load %v*, %v** %fs, align 8 %4 = sext i32 %1 to i64 br label %for.body @@ -46,27 +46,27 @@ for.body: ; preds = %for.body.lr.ph, %fo %x1.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 0 %y.i56 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 1 %10 = bitcast double* %x.i54 to <2 x double>* - %11 = load <2 x double>* %10, align 8 + %11 = load <2 x double>, <2 x double>* %10, align 8 %y2.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 1 %12 = bitcast double* %x1.i to <2 x double>* - %13 = load <2 x double>* %12, align 8 + %13 = load <2 x double>, <2 x double>* %12, align 8 %14 = fadd fast <2 x double> %13, %11 %z.i57 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 2 - %15 = load double* %z.i57, align 8 + %15 = load double, double* %z.i57, align 8 %z4.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 2 - %16 = load double* %z4.i, align 8 + %16 = load double, double* %z4.i, align 8 %add5.i = fadd fast double %16, %15 %17 = fadd fast <2 x double> %6, %11 %18 = bitcast double* %x.i to <2 x double>* store <2 x double> %17, <2 x double>* %18, align 8 - %19 = load double* %x1.i, align 8 + %19 = load double, double* %x1.i, align 8 %20 = insertelement <2 x double> undef, double %15, i32 0 %21 = insertelement <2 x double> %20, double %19, i32 1 %22 = fadd fast <2 x double> %7, %21 %23 = bitcast double* %z.i to <2 x double>* store <2 x double> %22, <2 x double>* %23, align 8 %24 = bitcast double* %y2.i to <2 x double>* - %25 = load <2 x double>* %24, align 8 + %25 = load <2 x double>, <2 x double>* %24, align 8 %26 = fadd fast <2 x double> %8, %25 %27 = bitcast double* %y.i62 to <2 x double>* store <2 x double> %26, <2 x double>* %27, align 8 diff --git a/llvm/test/CodeGen/AArch64/Redundantstore.ll b/llvm/test/CodeGen/AArch64/Redundantstore.ll index 40be61d0765..b2072682cd9 100644 --- a/llvm/test/CodeGen/AArch64/Redundantstore.ll +++ b/llvm/test/CodeGen/AArch64/Redundantstore.ll @@ -8,7 +8,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" ; CHECK-NOT: stur define i8* @test(i32 %size) { entry: - %0 = load i8** @end_of_array, align 8 + %0 = load i8*, i8** @end_of_array, align 8 %conv = sext i32 %size to i64 %and = and i64 %conv, -8 %conv2 = trunc i64 %and to i32 diff --git a/llvm/test/CodeGen/AArch64/a57-csel.ll b/llvm/test/CodeGen/AArch64/a57-csel.ll index 9d16d1a0f10..f5496f77776 100644 --- a/llvm/test/CodeGen/AArch64/a57-csel.ll +++ b/llvm/test/CodeGen/AArch64/a57-csel.ll @@ -3,7 +3,7 @@ ; Check that the select is expanded into a branch sequence. define i64 @f(i64 %a, i64 %b, i64* %c, i64 %d, i64 %e) { ; CHECK: cbz - %x0 = load i64* %c + %x0 = load i64, i64* %c %x1 = icmp eq i64 %x0, 0 %x2 = select i1 %x1, i64 %a, i64 %b %x3 = add i64 %x2, %d diff --git a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll index 4b3e6babd50..b0b83330389 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll @@ -9,7 +9,7 @@ entry: for.body: ; preds = %for.body, %entry %arrayidx5 = getelementptr inbounds i32, i32* null, i64 1, !dbg !43 - %0 = load i32* null, align 4, !dbg !45, !tbaa !46 + %0 = load i32, i32* null, align 4, !dbg !45, !tbaa !46 %s1 = sub nsw i32 0, %0, !dbg !50 %n1 = sext i32 %s1 to i64, !dbg !50 %arrayidx21 = getelementptr inbounds i32, i32* null, i64 3, !dbg !51 diff --git a/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll b/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll index 45532516650..b2ee517f886 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll @@ -9,7 +9,7 @@ define void @foo() { entry: ;CHECK-LABEL: foo: ;CHECK: __floatsisf - %0 = load i32* @x, align 4 + %0 = load i32, i32* @x, align 4 %conv = sitofp i32 %0 to float store float %conv, float* bitcast (i32* @t to float*), align 4 ret void diff --git a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll index dace22ef2b2..b0e9d4aa770 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll @@ -29,15 +29,15 @@ target triple = "aarch64" define void @f1(double* nocapture readonly %p, double* nocapture %q) #0 { entry: - %0 = load double* %p, align 8 + %0 = load double, double* %p, align 8 %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double* %arrayidx1, align 8 + %1 = load double, double* %arrayidx1, align 8 %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double* %arrayidx2, align 8 + %2 = load double, double* %arrayidx2, align 8 %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double* %arrayidx3, align 8 + %3 = load double, double* %arrayidx3, align 8 %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double* %arrayidx4, align 8 + %4 = load double, double* %arrayidx4, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %4 %mul5 = fmul fast double %1, %2 @@ -48,11 +48,11 @@ entry: %add9 = fadd fast double %mul8, %sub store double %add9, double* %q, align 8 %arrayidx11 = getelementptr inbounds double, double* %p, i64 5 - %5 = load double* %arrayidx11, align 8 + %5 = load double, double* %arrayidx11, align 8 %arrayidx12 = getelementptr inbounds double, double* %p, i64 6 - %6 = load double* %arrayidx12, align 8 + %6 = load double, double* %arrayidx12, align 8 %arrayidx13 = getelementptr inbounds double, double* %p, i64 7 - %7 = load double* %arrayidx13, align 8 + %7 = load double, double* %arrayidx13, align 8 %mul15 = fmul fast double %6, %7 %mul16 = fmul fast double %0, %5 %add17 = fadd fast double %mul16, %mul15 @@ -81,21 +81,21 @@ entry: define void @f2(double* nocapture readonly %p, double* nocapture %q) #0 { entry: - %0 = load double* %p, align 8 + %0 = load double, double* %p, align 8 %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double* %arrayidx1, align 8 + %1 = load double, double* %arrayidx1, align 8 %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double* %arrayidx2, align 8 + %2 = load double, double* %arrayidx2, align 8 %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double* %arrayidx3, align 8 + %3 = load double, double* %arrayidx3, align 8 %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double* %arrayidx4, align 8 + %4 = load double, double* %arrayidx4, align 8 %arrayidx5 = getelementptr inbounds double, double* %p, i64 5 - %5 = load double* %arrayidx5, align 8 + %5 = load double, double* %arrayidx5, align 8 %arrayidx6 = getelementptr inbounds double, double* %p, i64 6 - %6 = load double* %arrayidx6, align 8 + %6 = load double, double* %arrayidx6, align 8 %arrayidx7 = getelementptr inbounds double, double* %p, i64 7 - %7 = load double* %arrayidx7, align 8 + %7 = load double, double* %arrayidx7, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %7 %mul8 = fmul fast double %5, %6 @@ -127,15 +127,15 @@ entry: define void @f3(double* nocapture readonly %p, double* nocapture %q) #0 { entry: - %0 = load double* %p, align 8 + %0 = load double, double* %p, align 8 %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double* %arrayidx1, align 8 + %1 = load double, double* %arrayidx1, align 8 %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double* %arrayidx2, align 8 + %2 = load double, double* %arrayidx2, align 8 %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double* %arrayidx3, align 8 + %3 = load double, double* %arrayidx3, align 8 %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double* %arrayidx4, align 8 + %4 = load double, double* %arrayidx4, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %4 %mul5 = fmul fast double %1, %2 @@ -176,21 +176,21 @@ declare void @g(...) #1 define void @f4(float* nocapture readonly %p, float* nocapture %q) #0 { entry: - %0 = load float* %p, align 4 + %0 = load float, float* %p, align 4 %arrayidx1 = getelementptr inbounds float, float* %p, i64 1 - %1 = load float* %arrayidx1, align 4 + %1 = load float, float* %arrayidx1, align 4 %arrayidx2 = getelementptr inbounds float, float* %p, i64 2 - %2 = load float* %arrayidx2, align 4 + %2 = load float, float* %arrayidx2, align 4 %arrayidx3 = getelementptr inbounds float, float* %p, i64 3 - %3 = load float* %arrayidx3, align 4 + %3 = load float, float* %arrayidx3, align 4 %arrayidx4 = getelementptr inbounds float, float* %p, i64 4 - %4 = load float* %arrayidx4, align 4 + %4 = load float, float* %arrayidx4, align 4 %arrayidx5 = getelementptr inbounds float, float* %p, i64 5 - %5 = load float* %arrayidx5, align 4 + %5 = load float, float* %arrayidx5, align 4 %arrayidx6 = getelementptr inbounds float, float* %p, i64 6 - %6 = load float* %arrayidx6, align 4 + %6 = load float, float* %arrayidx6, align 4 %arrayidx7 = getelementptr inbounds float, float* %p, i64 7 - %7 = load float* %arrayidx7, align 4 + %7 = load float, float* %arrayidx7, align 4 %mul = fmul fast float %0, %1 %add = fadd fast float %mul, %7 %mul8 = fmul fast float %5, %6 @@ -222,15 +222,15 @@ entry: define void @f5(float* nocapture readonly %p, float* nocapture %q) #0 { entry: - %0 = load float* %p, align 4 + %0 = load float, float* %p, align 4 %arrayidx1 = getelementptr inbounds float, float* %p, i64 1 - %1 = load float* %arrayidx1, align 4 + %1 = load float, float* %arrayidx1, align 4 %arrayidx2 = getelementptr inbounds float, float* %p, i64 2 - %2 = load float* %arrayidx2, align 4 + %2 = load float, float* %arrayidx2, align 4 %arrayidx3 = getelementptr inbounds float, float* %p, i64 3 - %3 = load float* %arrayidx3, align 4 + %3 = load float, float* %arrayidx3, align 4 %arrayidx4 = getelementptr inbounds float, float* %p, i64 4 - %4 = load float* %arrayidx4, align 4 + %4 = load float, float* %arrayidx4, align 4 %mul = fmul fast float %0, %1 %add = fadd fast float %mul, %4 %mul5 = fmul fast float %1, %2 @@ -264,15 +264,15 @@ if.end: ; preds = %if.then, %entry define void @f6(double* nocapture readonly %p, double* nocapture %q) #0 { entry: - %0 = load double* %p, align 8 + %0 = load double, double* %p, align 8 %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double* %arrayidx1, align 8 + %1 = load double, double* %arrayidx1, align 8 %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double* %arrayidx2, align 8 + %2 = load double, double* %arrayidx2, align 8 %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double* %arrayidx3, align 8 + %3 = load double, double* %arrayidx3, align 8 %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double* %arrayidx4, align 8 + %4 = load double, double* %arrayidx4, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %4 %mul5 = fmul fast double %1, %2 @@ -299,15 +299,15 @@ declare double @hh(double) #1 define void @f7(double* nocapture readonly %p, double* nocapture %q) #0 { entry: - %0 = load double* %p, align 8 + %0 = load double, double* %p, align 8 %arrayidx1 = getelementptr inbounds double, double* %p, i64 1 - %1 = load double* %arrayidx1, align 8 + %1 = load double, double* %arrayidx1, align 8 %arrayidx2 = getelementptr inbounds double, double* %p, i64 2 - %2 = load double* %arrayidx2, align 8 + %2 = load double, double* %arrayidx2, align 8 %arrayidx3 = getelementptr inbounds double, double* %p, i64 3 - %3 = load double* %arrayidx3, align 8 + %3 = load double, double* %arrayidx3, align 8 %arrayidx4 = getelementptr inbounds double, double* %p, i64 4 - %4 = load double* %arrayidx4, align 8 + %4 = load double, double* %arrayidx4, align 8 %mul = fmul fast double %0, %1 %add = fadd fast double %mul, %4 %mul5 = fmul fast double %1, %2 diff --git a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll index 1ba54b275c7..0c6be21f890 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll @@ -11,7 +11,7 @@ invoke.cont145: br i1 %or.cond, label %if.then274, label %invoke.cont145 if.then274: - %0 = load i32* null, align 4 + %0 = load i32, i32* null, align 4 br i1 undef, label %invoke.cont291, label %if.else313 invoke.cont291: diff --git a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll index 7ac83b8928c..07e0ba654d2 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-address-type-promotion.ll @@ -15,11 +15,11 @@ entry: %add = add nsw i32 %i, 1 %idxprom = sext i32 %add to i64 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom - %0 = load i32* %arrayidx, align 4 + %0 = load i32, i32* %arrayidx, align 4 %add1 = add nsw i32 %i, 2 %idxprom2 = sext i32 %add1 to i64 %arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %idxprom2 - %1 = load i32* %arrayidx3, align 4 + %1 = load i32, i32* %arrayidx3, align 4 %add4 = add nsw i32 %1, %0 %idxprom5 = sext i32 %i to i64 %arrayidx6 = getelementptr inbounds i32, i32* %a, i64 %idxprom5 diff --git a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll index 01642a4f3bf..fb41156c09d 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-be-bv.ll @@ -8,7 +8,7 @@ define i16 @movi_modimm_t1() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -20,7 +20,7 @@ define i16 @movi_modimm_t2() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -32,7 +32,7 @@ define i16 @movi_modimm_t3() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #16 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -44,7 +44,7 @@ define i16 @movi_modimm_t4() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #24 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -56,7 +56,7 @@ define i16 @movi_modimm_t5() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].8h, #0x1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -68,7 +68,7 @@ define i16 @movi_modimm_t6() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].8h, #0x1, lsl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -80,7 +80,7 @@ define i16 @movi_modimm_t7() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, msl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -92,7 +92,7 @@ define i16 @movi_modimm_t8() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, msl #16 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -104,7 +104,7 @@ define i16 @movi_modimm_t9() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].16b, #0x1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -116,7 +116,7 @@ define i16 @movi_modimm_t10() nounwind { ; CHECK-NEXT: movi v[[REG2:[0-9]+]].2d, #0x00ffff0000ffff ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -128,7 +128,7 @@ define i16 @fmov_modimm_t11() nounwind { ; CHECK-NEXT: fmov v[[REG2:[0-9]+]].4s, #3.00000000 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -140,7 +140,7 @@ define i16 @fmov_modimm_t12() nounwind { ; CHECK-NEXT: fmov v[[REG2:[0-9]+]].2d, #0.17968750 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -152,7 +152,7 @@ define i16 @mvni_modimm_t1() nounwind { ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -164,7 +164,7 @@ define i16 @mvni_modimm_t2() nounwind { ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -176,7 +176,7 @@ define i16 @mvni_modimm_t3() nounwind { ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #16 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -188,7 +188,7 @@ define i16 @mvni_modimm_t4() nounwind { ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #24 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -200,7 +200,7 @@ define i16 @mvni_modimm_t5() nounwind { ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].8h, #0x1 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -212,7 +212,7 @@ define i16 @mvni_modimm_t6() nounwind { ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].8h, #0x1, lsl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -224,7 +224,7 @@ define i16 @mvni_modimm_t7() nounwind { ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, msl #8 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -236,7 +236,7 @@ define i16 @mvni_modimm_t8() nounwind { ; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, msl #16 ; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = add <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -247,7 +247,7 @@ define i16 @bic_modimm_t1() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -258,7 +258,7 @@ define i16 @bic_modimm_t2() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #8 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -269,7 +269,7 @@ define i16 @bic_modimm_t3() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #16 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -280,7 +280,7 @@ define i16 @bic_modimm_t4() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #24 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -291,7 +291,7 @@ define i16 @bic_modimm_t5() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: bic v[[REG2:[0-9]+]].8h, #0x1 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -302,7 +302,7 @@ define i16 @bic_modimm_t6() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: bic v[[REG2:[0-9]+]].8h, #0x1, lsl #8 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = and <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -313,7 +313,7 @@ define i16 @orr_modimm_t1() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -324,7 +324,7 @@ define i16 @orr_modimm_t2() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #8 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -335,7 +335,7 @@ define i16 @orr_modimm_t3() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #16 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -346,7 +346,7 @@ define i16 @orr_modimm_t4() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #24 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -357,7 +357,7 @@ define i16 @orr_modimm_t5() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: orr v[[REG2:[0-9]+]].8h, #0x1 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el @@ -368,7 +368,7 @@ define i16 @orr_modimm_t6() nounwind { ; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}] ; CHECK-NEXT: orr v[[REG2:[0-9]+]].8h, #0x1, lsl #8 ; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0] - %in = load <8 x i16>* @vec_v8i16 + %in = load <8 x i16>, <8 x i16>* @vec_v8i16 %rv = or <8 x i16> %in, %el = extractelement <8 x i16> %rv, i32 0 ret i16 %el diff --git a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll index 64d91eea973..2170e4b902d 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll @@ -22,7 +22,7 @@ target triple = "aarch64--linux-gnu" define i64 @f_load_madd_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { entry: - %0 = load i64* %c, align 8 + %0 = load i64, i64* %c, align 8 %mul = mul nsw i64 %0, %b %add = add nsw i64 %mul, %a ret i64 %add @@ -41,7 +41,7 @@ entry: define i32 @f_load_madd_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 { entry: - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %mul = mul nsw i32 %0, %b %add = add nsw i32 %mul, %a ret i32 %add @@ -56,7 +56,7 @@ entry: define i64 @f_load_msub_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { entry: - %0 = load i64* %c, align 8 + %0 = load i64, i64* %c, align 8 %mul = mul nsw i64 %0, %b %sub = sub nsw i64 %a, %mul ret i64 %sub @@ -72,7 +72,7 @@ entry: define i32 @f_load_msub_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 { entry: - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %mul = mul nsw i32 %0, %b %sub = sub nsw i32 %a, %mul ret i32 %sub @@ -87,7 +87,7 @@ entry: define i64 @f_load_mul_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { entry: - %0 = load i64* %c, align 8 + %0 = load i64, i64* %c, align 8 %mul = mul nsw i64 %0, %b ret i64 %mul } @@ -101,7 +101,7 @@ entry: define i32 @f_load_mul_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 { entry: - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %mul = mul nsw i32 %0, %b ret i32 %mul } @@ -115,7 +115,7 @@ entry: define i64 @f_load_mneg_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { entry: - %0 = load i64* %c, align 8 + %0 = load i64, i64* %c, align 8 %mul = sub i64 0, %b %sub = mul i64 %0, %mul ret i64 %sub @@ -133,7 +133,7 @@ entry: define i32 @f_load_mneg_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 { entry: - %0 = load i32* %c, align 4 + %0 = load i32, i32* %c, align 4 %mul = sub i32 0, %b %sub = mul i32 %0, %mul ret i32 %sub @@ -154,7 +154,7 @@ entry: %conv1 = sext i32 %c to i64 %mul = mul nsw i64 %conv1, %conv %add = add nsw i64 %mul, %a - %0 = load i32* %d, align 4 + %0 = load i32, i32* %d, align 4 %conv2 = sext i32 %0 to i64 %add3 = add nsw i64 %add, %conv2 ret i64 %add3 @@ -174,7 +174,7 @@ entry: %conv1 = sext i32 %c to i64 %mul = mul nsw i64 %conv1, %conv %sub = sub i64 %a, %mul - %0 = load i32* %d, align 4 + %0 = load i32, i32* %d, align 4 %conv2 = sext i32 %0 to i64 %add = add nsw i64 %sub, %conv2 ret i64 %add @@ -193,7 +193,7 @@ entry: %conv = sext i32 %b to i64 %conv1 = sext i32 %c to i64 %mul = mul nsw i64 %conv1, %conv - %0 = load i32* %d, align 4 + %0 = load i32, i32* %d, align 4 %conv2 = sext i32 %0 to i64 %div = sdiv i64 %mul, %conv2 ret i64 %div @@ -212,7 +212,7 @@ entry: %conv1 = sext i32 %c to i64 %mul = sub nsw i64 0, %conv %sub = mul i64 %conv1, %mul - %0 = load i32* %d, align 4 + %0 = load i32, i32* %d, align 4 %conv2 = sext i32 %0 to i64 %div = sdiv i64 %sub, %conv2 ret i64 %div @@ -229,7 +229,7 @@ entry: %conv1 = zext i32 %c to i64 %mul = mul i64 %conv1, %conv %add = add i64 %mul, %a - %0 = load i32* %d, align 4 + %0 = load i32, i32* %d, align 4 %conv2 = zext i32 %0 to i64 %add3 = add i64 %add, %conv2 ret i64 %add3 @@ -249,7 +249,7 @@ entry: %conv1 = zext i32 %c to i64 %mul = mul i64 %conv1, %conv %sub = sub i64 %a, %mul - %0 = load i32* %d, align 4 + %0 = load i32, i32* %d, align 4 %conv2 = zext i32 %0 to i64 %add = add i64 %sub, %conv2 ret i64 %add @@ -268,7 +268,7 @@ entry: %conv = zext i32 %b to i64 %conv1 = zext i32 %c to i64 %mul = mul i64 %conv1, %conv - %0 = load i32* %d, align 4 + %0 = load i32, i32* %d, align 4 %conv2 = zext i32 %0 to i64 %div = udiv i64 %mul, %conv2 ret i64 %div @@ -287,7 +287,7 @@ entry: %conv1 = zext i32 %c to i64 %mul = sub nsw i64 0, %conv %sub = mul i64 %conv1, %mul - %0 = load i32* %d, align 4 + %0 = load i32, i32* %d, align 4 %conv2 = zext i32 %0 to i64 %div = udiv i64 %sub, %conv2 ret i64 %div @@ -300,7 +300,7 @@ entry: define i64 @f_store_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { entry: - %0 = load i64* %cp, align 8 + %0 = load i64, i64* %cp, align 8 store i64 %a, i64* %e, align 8 %mul = mul nsw i64 %0, %b %add = add nsw i64 %mul, %a @@ -317,7 +317,7 @@ entry: define i32 @f_store_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { entry: - %0 = load i32* %cp, align 4 + %0 = load i32, i32* %cp, align 4 store i32 %a, i32* %e, align 4 %mul = mul nsw i32 %0, %b %add = add nsw i32 %mul, %a @@ -333,7 +333,7 @@ entry: define i64 @f_store_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { entry: - %0 = load i64* %cp, align 8 + %0 = load i64, i64* %cp, align 8 store i64 %a, i64* %e, align 8 %mul = mul nsw i64 %0, %b %sub = sub nsw i64 %a, %mul @@ -350,7 +350,7 @@ entry: define i32 @f_store_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { entry: - %0 = load i32* %cp, align 4 + %0 = load i32, i32* %cp, align 4 store i32 %a, i32* %e, align 4 %mul = mul nsw i32 %0, %b %sub = sub nsw i32 %a, %mul @@ -366,7 +366,7 @@ entry: define i64 @f_store_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { entry: - %0 = load i64* %cp, align 8 + %0 = load i64, i64* %cp, align 8 store i64 %a, i64* %e, align 8 %mul = mul nsw i64 %0, %b ret i64 %mul @@ -381,7 +381,7 @@ entry: define i32 @f_store_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { entry: - %0 = load i32* %cp, align 4 + %0 = load i32, i32* %cp, align 4 store i32 %a, i32* %e, align 4 %mul = mul nsw i32 %0, %b ret i32 %mul @@ -396,7 +396,7 @@ entry: define i64 @f_prefetch_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { entry: - %0 = load i64* %cp, align 8 + %0 = load i64, i64* %cp, align 8 %1 = bitcast i64* %e to i8* tail call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1) %mul = mul nsw i64 %0, %b @@ -415,7 +415,7 @@ declare void @llvm.prefetch(i8* nocapture, i32, i32, i32) #2 define i32 @f_prefetch_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { entry: - %0 = load i32* %cp, align 4 + %0 = load i32, i32* %cp, align 4 %1 = bitcast i32* %e to i8* tail call void @llvm.prefetch(i8* %1, i32 1, i32 0, i32 1) %mul = mul nsw i32 %0, %b @@ -431,7 +431,7 @@ entry: define i64 @f_prefetch_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { entry: - %0 = load i64* %cp, align 8 + %0 = load i64, i64* %cp, align 8 %1 = bitcast i64* %e to i8* tail call void @llvm.prefetch(i8* %1, i32 0, i32 1, i32 1) %mul = mul nsw i64 %0, %b @@ -448,7 +448,7 @@ entry: define i32 @f_prefetch_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { entry: - %0 = load i32* %cp, align 4 + %0 = load i32, i32* %cp, align 4 %1 = bitcast i32* %e to i8* tail call void @llvm.prefetch(i8* %1, i32 1, i32 1, i32 1) %mul = mul nsw i32 %0, %b @@ -464,7 +464,7 @@ entry: define i64 @f_prefetch_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 { entry: - %0 = load i64* %cp, align 8 + %0 = load i64, i64* %cp, align 8 %1 = bitcast i64* %e to i8* tail call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1) %mul = mul nsw i64 %0, %b @@ -479,7 +479,7 @@ entry: define i32 @f_prefetch_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 { entry: - %0 = load i32* %cp, align 4 + %0 = load i32, i32* %cp, align 4 %1 = bitcast i32* %e to i8* tail call void @llvm.prefetch(i8* %1, i32 1, i32 3, i32 1) %mul = mul nsw i32 %0, %b @@ -494,7 +494,7 @@ entry: define i64 @fall_through(i64 %a, i64 %b, i64* nocapture readonly %c) #0 { entry: - %0 = load i64* %c, align 8 + %0 = load i64, i64* %c, align 8 br label %block1 block1: diff --git a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll index 98d2edfd9db..ce6c8a06c64 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-gep-opt.ll @@ -15,13 +15,13 @@ target triple = "aarch64-linux-gnueabi" ; elimilate the common subexpression for the second use. define void @test_GEP_CSE([240 x %struct]* %string, i32* %adj, i32 %lib, i64 %idxprom) { %liberties = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3 - %1 = load i32* %liberties, align 4 + %1 = load i32, i32* %liberties, align 4 %cmp = icmp eq i32 %1, %lib br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry %origin = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2 - %2 = load i32* %origin, align 4 + %2 = load i32, i32* %origin, align 4 store i32 %2, i32* %adj, align 4 br label %if.end @@ -66,9 +66,9 @@ if.end: ; preds = %if.then, %entry ; use. define void @test_GEP_across_BB(%class.my* %this, i64 %idx) { %1 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 1 - %2 = load i32* %1, align 4 + %2 = load i32, i32* %1, align 4 %3 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 2 - %4 = load i32* %3, align 4 + %4 = load i32, i32* %3, align 4 %5 = icmp eq i32 %2, %4 br i1 %5, label %if.true, label %exit diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll index 92582d7d25e..ec0e2de92d0 100644 --- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll +++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll @@ -3,8 +3,8 @@ define <8 x i16> @smull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: smull_v8i8_v8i16: ; CHECK: smull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B + %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = sext <8 x i8> %tmp1 to <8 x i16> %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> %tmp5 = mul <8 x i16> %tmp3, %tmp4 @@ -14,8 +14,8 @@ define <8 x i16> @smull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i32> @smull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: smull_v4i16_v4i32: ; CHECK: smull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %tmp1 = load <4 x i16>* %A - %tmp2 = load <4 x i16>* %B + %tmp1 = load <4 x i16>, <4 x i16>* %A + %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = sext <4 x i16> %tmp1 to <4 x i32> %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> %tmp5 = mul <4 x i32> %tmp3, %tmp4 @@ -25,8 +25,8 @@ define <4 x i32> @smull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i64> @smull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: smull_v2i32_v2i64: ; CHECK: smull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp1 = load <2 x i32>* %A - %tmp2 = load <2 x i32>* %B + %tmp1 = load <2 x i32>, <2 x i32>* %A + %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = sext <2 x i32> %tmp1 to <2 x i64> %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> %tmp5 = mul <2 x i64> %tmp3, %tmp4 @@ -36,8 +36,8 @@ define <2 x i64> @smull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <8 x i16> @umull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: umull_v8i8_v8i16: ; CHECK: umull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B + %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = zext <8 x i8> %tmp1 to <8 x i16> %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = mul <8 x i16> %tmp3, %tmp4 @@ -47,8 +47,8 @@ define <8 x i16> @umull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <4 x i32> @umull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: umull_v4i16_v4i32: ; CHECK: umull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %tmp1 = load <4 x i16>* %A - %tmp2 = load <4 x i16>* %B + %tmp1 = load <4 x i16>, <4 x i16>* %A + %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = zext <4 x i16> %tmp1 to <4 x i32> %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = mul <4 x i32> %tmp3, %tmp4 @@ -58,8 +58,8 @@ define <4 x i32> @umull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <2 x i64> @umull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { ; CHECK-LABEL: umull_v2i32_v2i64: ; CHECK: umull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp1 = load <2 x i32>* %A - %tmp2 = load <2 x i32>* %B + %tmp1 = load <2 x i32>, <2 x i32>* %A + %tmp2 = load <2 x i32>, <2 x i32>* %B %tmp3 = zext <2 x i32> %tmp1 to <2 x i64> %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = mul <2 x i64> %tmp3, %tmp4 @@ -69,9 +69,9 @@ define <2 x i64> @umull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind { define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { ; CHECK-LABEL: smlal_v8i8_v8i16: ; CHECK: smlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i8>* %B - %tmp3 = load <8 x i8>* %C + %tmp1 = load <8 x i16>, <8 x i16>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp3 = load <8 x i8>, <8 x i8>* %C %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> %tmp5 = sext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -82,9 +82,9 @@ define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { ; CHECK-LABEL: smlal_v4i16_v4i32: ; CHECK: smlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %tmp1 = load <4 x i32>* %A - %tmp2 = load <4 x i16>* %B - %tmp3 = load <4 x i16>* %C + %tmp1 = load <4 x i32>, <4 x i32>* %A + %tmp2 = load <4 x i16>, <4 x i16>* %B + %tmp3 = load <4 x i16>, <4 x i16>* %C %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> %tmp5 = sext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -95,9 +95,9 @@ define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { ; CHECK-LABEL: smlal_v2i32_v2i64: ; CHECK: smlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp1 = load <2 x i64>* %A - %tmp2 = load <2 x i32>* %B - %tmp3 = load <2 x i32>* %C + %tmp1 = load <2 x i64>, <2 x i64>* %A + %tmp2 = load <2 x i32>, <2 x i32>* %B + %tmp3 = load <2 x i32>, <2 x i32>* %C %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> %tmp5 = sext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -108,9 +108,9 @@ define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { ; CHECK-LABEL: umlal_v8i8_v8i16: ; CHECK: umlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i8>* %B - %tmp3 = load <8 x i8>* %C + %tmp1 = load <8 x i16>, <8 x i16>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp3 = load <8 x i8>, <8 x i8>* %C %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = zext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -121,9 +121,9 @@ define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { ; CHECK-LABEL: umlal_v4i16_v4i32: ; CHECK: umlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %tmp1 = load <4 x i32>* %A - %tmp2 = load <4 x i16>* %B - %tmp3 = load <4 x i16>* %C + %tmp1 = load <4 x i32>, <4 x i32>* %A + %tmp2 = load <4 x i16>, <4 x i16>* %B + %tmp3 = load <4 x i16>, <4 x i16>* %C %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = zext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -134,9 +134,9 @@ define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { ; CHECK-LABEL: umlal_v2i32_v2i64: ; CHECK: umlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp1 = load <2 x i64>* %A - %tmp2 = load <2 x i32>* %B - %tmp3 = load <2 x i32>* %C + %tmp1 = load <2 x i64>, <2 x i64>* %A + %tmp2 = load <2 x i32>, <2 x i32>* %B + %tmp3 = load <2 x i32>, <2 x i32>* %C %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = zext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -147,9 +147,9 @@ define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { ; CHECK-LABEL: smlsl_v8i8_v8i16: ; CHECK: smlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i8>* %B - %tmp3 = load <8 x i8>* %C + %tmp1 = load <8 x i16>, <8 x i16>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp3 = load <8 x i8>, <8 x i8>* %C %tmp4 = sext <8 x i8> %tmp2 to <8 x i16> %tmp5 = sext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -160,9 +160,9 @@ define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { ; CHECK-LABEL: smlsl_v4i16_v4i32: ; CHECK: smlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %tmp1 = load <4 x i32>* %A - %tmp2 = load <4 x i16>* %B - %tmp3 = load <4 x i16>* %C + %tmp1 = load <4 x i32>, <4 x i32>* %A + %tmp2 = load <4 x i16>, <4 x i16>* %B + %tmp3 = load <4 x i16>, <4 x i16>* %C %tmp4 = sext <4 x i16> %tmp2 to <4 x i32> %tmp5 = sext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -173,9 +173,9 @@ define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { ; CHECK-LABEL: smlsl_v2i32_v2i64: ; CHECK: smlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp1 = load <2 x i64>* %A - %tmp2 = load <2 x i32>* %B - %tmp3 = load <2 x i32>* %C + %tmp1 = load <2 x i64>, <2 x i64>* %A + %tmp2 = load <2 x i32>, <2 x i32>* %B + %tmp3 = load <2 x i32>, <2 x i32>* %C %tmp4 = sext <2 x i32> %tmp2 to <2 x i64> %tmp5 = sext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 @@ -186,9 +186,9 @@ define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { ; CHECK-LABEL: umlsl_v8i8_v8i16: ; CHECK: umlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i8>* %B - %tmp3 = load <8 x i8>* %C + %tmp1 = load <8 x i16>, <8 x i16>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B + %tmp3 = load <8 x i8>, <8 x i8>* %C %tmp4 = zext <8 x i8> %tmp2 to <8 x i16> %tmp5 = zext <8 x i8> %tmp3 to <8 x i16> %tmp6 = mul <8 x i16> %tmp4, %tmp5 @@ -199,9 +199,9 @@ define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) no define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { ; CHECK-LABEL: umlsl_v4i16_v4i32: ; CHECK: umlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %tmp1 = load <4 x i32>* %A - %tmp2 = load <4 x i16>* %B - %tmp3 = load <4 x i16>* %C + %tmp1 = load <4 x i32>, <4 x i32>* %A + %tmp2 = load <4 x i16>, <4 x i16>* %B + %tmp3 = load <4 x i16>, <4 x i16>* %C %tmp4 = zext <4 x i16> %tmp2 to <4 x i32> %tmp5 = zext <4 x i16> %tmp3 to <4 x i32> %tmp6 = mul <4 x i32> %tmp4, %tmp5 @@ -212,9 +212,9 @@ define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) define <2 x i64> @umlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { ; CHECK-LABEL: umlsl_v2i32_v2i64: ; CHECK: umlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp1 = load <2 x i64>* %A - %tmp2 = load <2 x i32>* %B - %tmp3 = load <2 x i32>* %C + %tmp1 = load <2 x i64>, <2 x i64>* %A + %tmp2 = load <2 x i32>, <2 x i32>* %B + %tmp3 = load <2 x i32>, <2 x i32>* %C %tmp4 = zext <2 x i32> %tmp2 to <2 x i64> %tmp5 = zext <2 x i32> %tmp3 to <2 x i64> %tmp6 = mul <2 x i64> %tmp4, %tmp5 diff --git a/llvm/test/CodeGen/AArch64/addsub-shifted.ll b/llvm/test/CodeGen/AArch64/addsub-shifted.ll index 1d963f41e99..7c7d6545993 100644 --- a/llvm/test/CodeGen/AArch64/addsub-shifted.ll +++ b/llvm/test/CodeGen/AArch64/addsub-shifted.ll @@ -6,63 +6,63 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { ; CHECK-LABEL: test_lsl_arith: - %rhs1 = load volatile i32* @var32 + %rhs1 = load volatile i32, i32* @var32 %shift1 = shl i32 %rhs1, 18 %val1 = add i32 %lhs32, %shift1 store volatile i32 %val1, i32* @var32 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18 - %rhs2 = load volatile i32* @var32 + %rhs2 = load volatile i32, i32* @var32 %shift2 = shl i32 %rhs2, 31 %val2 = add i32 %shift2, %lhs32 store volatile i32 %val2, i32* @var32 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31 - %rhs3 = load volatile i32* @var32 + %rhs3 = load volatile i32, i32* @var32 %shift3 = shl i32 %rhs3, 5 %val3 = sub i32 %lhs32, %shift3 store volatile i32 %val3, i32* @var32 ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5 ; Subtraction is not commutative! - %rhs4 = load volatile i32* @var32 + %rhs4 = load volatile i32, i32* @var32 %shift4 = shl i32 %rhs4, 19 %val4 = sub i32 %shift4, %lhs32 store volatile i32 %val4, i32* @var32 ; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19 - %lhs4a = load volatile i32* @var32 + %lhs4a = load volatile i32, i32* @var32 %shift4a = shl i32 %lhs4a, 15 %val4a = sub i32 0, %shift4a store volatile i32 %val4a, i32* @var32 ; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15 - %rhs5 = load volatile i64* @var64 + %rhs5 = load volatile i64, i64* @var64 %shift5 = shl i64 %rhs5, 18 %val5 = add i64 %lhs64, %shift5 store volatile i64 %val5, i64* @var64 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18 - %rhs6 = load volatile i64* @var64 + %rhs6 = load volatile i64, i64* @var64 %shift6 = shl i64 %rhs6, 31 %val6 = add i64 %shift6, %lhs64 store volatile i64 %val6, i64* @var64 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31 - %rhs7 = load volatile i64* @var64 + %rhs7 = load volatile i64, i64* @var64 %shift7 = shl i64 %rhs7, 5 %val7 = sub i64 %lhs64, %shift7 store volatile i64 %val7, i64* @var64 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5 ; Subtraction is not commutative! - %rhs8 = load volatile i64* @var64 + %rhs8 = load volatile i64, i64* @var64 %shift8 = shl i64 %rhs8, 19 %val8 = sub i64 %shift8, %lhs64 store volatile i64 %val8, i64* @var64 ; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19 - %lhs8a = load volatile i64* @var64 + %lhs8a = load volatile i64, i64* @var64 %shift8a = shl i64 %lhs8a, 60 %val8a = sub i64 0, %shift8a store volatile i64 %val8a, i64* @var64 diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll index b85fdbb14ce..09b9f629973 100644 --- a/llvm/test/CodeGen/AArch64/addsub.ll +++ b/llvm/test/CodeGen/AArch64/addsub.ll @@ -12,12 +12,12 @@ define void @add_small() { ; CHECK-LABEL: add_small: ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #4095 - %val32 = load i32* @var_i32 + %val32 = load i32, i32* @var_i32 %newval32 = add i32 %val32, 4095 store i32 %newval32, i32* @var_i32 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #52 - %val64 = load i64* @var_i64 + %val64 = load i64, i64* @var_i64 %newval64 = add i64 %val64, 52 store i64 %newval64, i64* @var_i64 @@ -29,12 +29,12 @@ define void @add_med() { ; CHECK-LABEL: add_med: ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{#3567, lsl #12|#14610432}} - %val32 = load i32* @var_i32 + %val32 = load i32, i32* @var_i32 %newval32 = add i32 %val32, 14610432 ; =0xdef000 store i32 %newval32, i32* @var_i32 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{#4095, lsl #12|#16773120}} - %val64 = load i64* @var_i64 + %val64 = load i64, i64* @var_i64 %newval64 = add i64 %val64, 16773120 ; =0xfff000 store i64 %newval64, i64* @var_i64 @@ -46,12 +46,12 @@ define void @sub_small() { ; CHECK-LABEL: sub_small: ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #4095 - %val32 = load i32* @var_i32 + %val32 = load i32, i32* @var_i32 %newval32 = sub i32 %val32, 4095 store i32 %newval32, i32* @var_i32 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, #52 - %val64 = load i64* @var_i64 + %val64 = load i64, i64* @var_i64 %newval64 = sub i64 %val64, 52 store i64 %newval64, i64* @var_i64 @@ -63,12 +63,12 @@ define void @sub_med() { ; CHECK-LABEL: sub_med: ; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{#3567, lsl #12|#14610432}} - %val32 = load i32* @var_i32 + %val32 = load i32, i32* @var_i32 %newval32 = sub i32 %val32, 14610432 ; =0xdef000 store i32 %newval32, i32* @var_i32 ; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{#4095, lsl #12|#16773120}} - %val64 = load i64* @var_i64 + %val64 = load i64, i64* @var_i64 %newval64 = sub i64 %val64, 16773120 ; =0xfff000 store i64 %newval64, i64* @var_i64 @@ -77,7 +77,7 @@ define void @sub_med() { define void @testing() { ; CHECK-LABEL: testing: - %val = load i32* @var_i32 + %val = load i32, i32* @var_i32 ; CHECK: cmp {{w[0-9]+}}, #4095 ; CHECK: b.ne [[RET:.?LBB[0-9]+_[0-9]+]] diff --git a/llvm/test/CodeGen/AArch64/addsub_ext.ll b/llvm/test/CodeGen/AArch64/addsub_ext.ll index ceea8a08ece..f0c7572ebf1 100644 --- a/llvm/test/CodeGen/AArch64/addsub_ext.ll +++ b/llvm/test/CodeGen/AArch64/addsub_ext.ll @@ -7,9 +7,9 @@ define void @addsub_i8rhs() minsize { ; CHECK-LABEL: addsub_i8rhs: - %val8_tmp = load i8* @var8 - %lhs32 = load i32* @var32 - %lhs64 = load i64* @var64 + %val8_tmp = load i8, i8* @var8 + %lhs32 = load i32, i32* @var32 + %lhs64 = load i64, i64* @var64 ; Need this to prevent extension upon load and give a vanilla i8 operand. %val8 = add i8 %val8_tmp, 123 @@ -82,9 +82,9 @@ end: define void @addsub_i16rhs() minsize { ; CHECK-LABEL: addsub_i16rhs: - %val16_tmp = load i16* @var16 - %lhs32 = load i32* @var32 - %lhs64 = load i64* @var64 + %val16_tmp = load i16, i16* @var16 + %lhs32 = load i32, i32* @var32 + %lhs64 = load i64, i64* @var64 ; Need this to prevent extension upon load and give a vanilla i16 operand. %val16 = add i16 %val16_tmp, 123 @@ -160,8 +160,8 @@ end: ; in the face of "add/sub (shifted register)" so I don't intend to. define void @addsub_i32rhs() minsize { ; CHECK-LABEL: addsub_i32rhs: - %val32_tmp = load i32* @var32 - %lhs64 = load i64* @var64 + %val32_tmp = load i32, i32* @var32 + %lhs64 = load i64, i64* @var64 %val32 = add i32 %val32_tmp, 123 diff --git a/llvm/test/CodeGen/AArch64/alloca.ll b/llvm/test/CodeGen/AArch64/alloca.ll index f93efbc42e6..5b2278ce8a3 100644 --- a/llvm/test/CodeGen/AArch64/alloca.ll +++ b/llvm/test/CodeGen/AArch64/alloca.ll @@ -51,7 +51,7 @@ define i64 @test_alloca_with_local(i64 %n) { call void @use_addr_loc(i8* %buf, i64* %loc) ; CHECK: bl use_addr - %val = load i64* %loc + %val = load i64, i64* %loc ; CHECK: ldur x0, [x29, #-[[LOC_FROM_FP]]] diff --git a/llvm/test/CodeGen/AArch64/and-mask-removal.ll b/llvm/test/CodeGen/AArch64/and-mask-removal.ll index 6bbec17ad1c..8291516d81e 100644 --- a/llvm/test/CodeGen/AArch64/and-mask-removal.ll +++ b/llvm/test/CodeGen/AArch64/and-mask-removal.ll @@ -9,13 +9,13 @@ define void @new_position(i32 %pos) { entry: %idxprom = sext i32 %pos to i64 %arrayidx = getelementptr inbounds [400 x i8], [400 x i8]* @board, i64 0, i64 %idxprom - %tmp = load i8* %arrayidx, align 1 + %tmp = load i8, i8* %arrayidx, align 1 %.off = add i8 %tmp, -1 %switch = icmp ult i8 %.off, 2 br i1 %switch, label %if.then, label %if.end if.then: ; preds = %entry - %tmp1 = load i32* @next_string, align 4 + %tmp1 = load i32, i32* @next_string, align 4 %arrayidx8 = getelementptr inbounds [400 x i32], [400 x i32]* @string_number, i64 0, i64 %idxprom store i32 %tmp1, i32* %arrayidx8, align 4 br label %if.end diff --git a/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll b/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll index 88232fcc0b4..e2c39e0b623 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll @@ -8,7 +8,7 @@ entry: %cmp = icmp eq i32* null, undef %frombool = zext i1 %cmp to i8 store i8 %frombool, i8* undef, align 1 - %tmp4 = load i8* undef, align 1 + %tmp4 = load i8, i8* undef, align 1 %tobool = trunc i8 %tmp4 to i1 br i1 %tobool, label %land.lhs.true, label %if.end diff --git a/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll b/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll index a83f1646ef6..b69cd242116 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll @@ -19,7 +19,7 @@ for.body: %0 = shl nsw i64 %indvars.iv, 12 %add = add nsw i64 %0, 34628173824 %1 = inttoptr i64 %add to i32* - %2 = load volatile i32* %1, align 4096 + %2 = load volatile i32, i32* %1, align 4096 store volatile i32 %2, i32* @test_data, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll b/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll index d47dbb28164..8d0b1b6f84c 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll @@ -13,7 +13,7 @@ lor.lhs.false: br i1 undef, label %return, label %if.end if.end: - %tmp.i = load i64* undef, align 8 + %tmp.i = load i64, i64* undef, align 8 %and.i.i.i = and i64 %tmp.i, -16 br i1 %IsArrow, label %if.else_crit_edge, label %if.end32 diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll index 52e173481f6..ef8d6f3b4ef 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll @@ -15,23 +15,23 @@ define hidden %struct.CGRect @t(%0* nocapture %self, i8* nocapture %_cmd) nounwi entry: ; CHECK-LABEL: t: ; CHECK: ldp d{{[0-9]+}}, d{{[0-9]+}} - %ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4 + %ivar = load i64, i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4 %0 = bitcast %0* %self to i8* %add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar %add.ptr10.0 = bitcast i8* %add.ptr to double* - %tmp11 = load double* %add.ptr10.0, align 8 + %tmp11 = load double, double* %add.ptr10.0, align 8 %add.ptr.sum = add i64 %ivar, 8 %add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum %1 = bitcast i8* %add.ptr10.1 to double* - %tmp12 = load double* %1, align 8 + %tmp12 = load double, double* %1, align 8 %add.ptr.sum17 = add i64 %ivar, 16 %add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum17 %add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double* - %tmp = load double* %add.ptr4.1.0, align 8 + %tmp = load double, double* %add.ptr4.1.0, align 8 %add.ptr4.1.sum = add i64 %ivar, 24 %add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr4.1.sum %2 = bitcast i8* %add.ptr4.1.1 to double* - %tmp5 = load double* %2, align 8 + %tmp5 = load double, double* %2, align 8 %insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0 %insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1 %insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0 diff --git a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll index 4db1f59a2c6..04364b01eb5 100644 --- a/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll +++ b/llvm/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll @@ -13,12 +13,12 @@ define void @testDouble(double %d) ssp { entry: %d.addr = alloca double, align 8 store double %d, double* %d.addr, align 8 - %0 = load double* %d.addr, align 8 - %1 = load double* %d.addr, align 8 + %0 = load double, double* %d.addr, align 8 + %1 = load double, double* %d.addr, align 8 %conv = fptoui double %1 to i64 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), double %0, i64 %conv) - %2 = load double* %d.addr, align 8 - %3 = load double* %d.addr, align 8 + %2 = load double, double* %d.addr, align 8 + %3 = load double, double* %d.addr, align 8 %conv1 = fptoui double %3 to i32 %call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str1, i32 0, i32 0), double %2, i32 %conv1) ret void @@ -33,14 +33,14 @@ define void @testFloat(float %f) ssp { entry: %f.addr = alloca float, align 4 store float %f, float* %f.addr, align 4 - %0 = load float* %f.addr, align 4 + %0 = load float, float* %f.addr, align 4 %conv = fpext float %0 to double - %1 = load float* %f.addr, align 4 + %1 = load float, float* %f.addr, align 4 %conv1 = fptoui float %1 to i64 %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str2, i32 0, i32 0), double %conv, i64 %conv1) - %2 = load float* %f.addr, align 4 + %2 = load float, float* %f.addr, align 4 %conv2 = fpext float %2 to double - %3 = load float* %f.addr, align 4 + %3 = load float, float* %f.addr, align 4 %conv3 = fptoui float %3 to i32 %call4 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str3, i32 0, i32 0), double %conv2, i32 %conv3) ret void diff --git a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll index 15c007739f2..b6826e1fa56 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi-varargs.ll @@ -82,18 +82,18 @@ define i32 @main() nounwind ssp { store i32 10, i32* %a10, align 4 store i32 11, i32* %a11, align 4 store i32 12, i32* %a12, align 4 - %1 = load i32* %a1, align 4 - %2 = load i32* %a2, align 4 - %3 = load i32* %a3, align 4 - %4 = load i32* %a4, align 4 - %5 = load i32* %a5, align 4 - %6 = load i32* %a6, align 4 - %7 = load i32* %a7, align 4 - %8 = load i32* %a8, align 4 - %9 = load i32* %a9, align 4 - %10 = load i32* %a10, align 4 - %11 = load i32* %a11, align 4 - %12 = load i32* %a12, align 4 + %1 = load i32, i32* %a1, align 4 + %2 = load i32, i32* %a2, align 4 + %3 = load i32, i32* %a3, align 4 + %4 = load i32, i32* %a4, align 4 + %5 = load i32, i32* %a5, align 4 + %6 = load i32, i32* %a6, align 4 + %7 = load i32, i32* %a7, align 4 + %8 = load i32, i32* %a8, align 4 + %9 = load i32, i32* %a9, align 4 + %10 = load i32, i32* %a10, align 4 + %11 = load i32, i32* %a11, align 4 + %12 = load i32, i32* %a12, align 4 call void (i32, i32, i32, i32, i32, i32, i32, i32, i32, ...)* @fn9(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12) ret i32 0 } @@ -131,8 +131,8 @@ entry: %y.addr = alloca <4 x i32>, align 16 store i32 %x, i32* %x.addr, align 4 store <4 x i32> %y, <4 x i32>* %y.addr, align 16 - %0 = load i32* %x.addr, align 4 - %1 = load <4 x i32>* %y.addr, align 16 + %0 = load i32, i32* %x.addr, align 4 + %1 = load <4 x i32>, <4 x i32>* %y.addr, align 16 call void (i8*, ...)* @foo(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %0, <4 x i32> %1) ret void } @@ -158,7 +158,7 @@ entry: call void @llvm.va_start(i8* %args1) %0 = va_arg i8** %args, i32 store i32 %0, i32* %vc, align 4 - %ap.cur = load i8** %args + %ap.cur = load i8*, i8** %args %1 = getelementptr i8, i8* %ap.cur, i32 15 %2 = ptrtoint i8* %1 to i64 %3 = and i64 %2, -16 @@ -183,9 +183,9 @@ entry: store i32 %x, i32* %x.addr, align 4 %0 = bitcast %struct.s41* %s41 to i128* store i128 %s41.coerce, i128* %0, align 1 - %1 = load i32* %x.addr, align 4 + %1 = load i32, i32* %x.addr, align 4 %2 = bitcast %struct.s41* %s41 to i128* - %3 = load i128* %2, align 1 + %3 = load i128, i128* %2, align 1 call void (i8*, ...)* @foo2(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %1, i128 %3) ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-abi.ll b/llvm/test/CodeGen/AArch64/arm64-abi.ll index 8a6b64d6ff3..36a682242aa 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi.ll @@ -79,7 +79,7 @@ entry: ; FAST: sub sp, sp ; FAST: mov x[[ADDR:[0-9]+]], sp ; FAST: str [[REG_1:q[0-9]+]], [x[[ADDR]], #16] - %0 = load <4 x i32>* %in, align 16 + %0 = load <4 x i32>, <4 x i32>* %in, align 16 %call = tail call double @args_vec_4i(double 3.000000e+00, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, double 3.000000e+00, <4 x i32> %0, i8 signext 3) ret double %call } @@ -133,7 +133,7 @@ entry: ; FAST: sub sp, sp, #32 ; FAST: mov x[[ADDR:[0-9]+]], sp ; FAST: str [[REG_1:d[0-9]+]], [x[[ADDR]], #8] - %0 = load <2 x i32>* %in, align 8 + %0 = load <2 x i32>, <2 x i32>* %in, align 8 %call = tail call double @args_vec_2i(double 3.000000e+00, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, float 3.000000e+00, <2 x i32> %0, i8 signext 3) @@ -148,7 +148,7 @@ entry: ; CHECK: str [[REG_1:d[0-9]+]], [sp, #8] ; CHECK: str [[REG_2:w[0-9]+]], [sp] ; CHECK: orr w0, wzr, #0x3 - %0 = load double* %in, align 8 + %0 = load double, double* %in, align 8 %call = tail call double @args_f64(double 3.000000e+00, double %0, double %0, double %0, double %0, double %0, double %0, double %0, float 3.000000e+00, double %0, i8 signext 3) @@ -163,7 +163,7 @@ entry: ; CHECK: strb [[REG_3:w[0-9]+]], [sp, #16] ; CHECK: str [[REG_1:x[0-9]+]], [sp, #8] ; CHECK: str [[REG_2:w[0-9]+]], [sp] - %0 = load i64* %in, align 8 + %0 = load i64, i64* %in, align 8 %call = tail call i64 @args_i64(i64 3, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0, i32 3, i64 %0, i8 signext 3) ret i64 %call @@ -177,7 +177,7 @@ entry: ; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8] ; CHECK: str [[REG_1:s[0-9]+]], [sp, #4] ; CHECK: strh [[REG_3:w[0-9]+]], [sp] - %0 = load float* %in, align 4 + %0 = load float, float* %in, align 4 %call = tail call i32 @args_f32(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, i16 signext 3, float %0, @@ -194,7 +194,7 @@ entry: ; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8] ; CHECK: str [[REG_1:w[0-9]+]], [sp, #4] ; CHECK: strh [[REG_3:w[0-9]+]], [sp] - %0 = load i32* %in, align 4 + %0 = load i32, i32* %in, align 4 %call = tail call i32 @args_i32(i32 3, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0, i16 signext 3, i32 %0, i8 signext 4) ret i32 %call diff --git a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll index 6173e07bc1d..1c1b58b8b14 100644 --- a/llvm/test/CodeGen/AArch64/arm64-abi_align.ll +++ b/llvm/test/CodeGen/AArch64/arm64-abi_align.ll @@ -59,8 +59,8 @@ entry: ; CHECK-LABEL: caller38 ; CHECK: ldr x1, ; CHECK: ldr x2, - %0 = load i64* bitcast (%struct.s38* @g38 to i64*), align 4 - %1 = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 + %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 + %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 %call = tail call i32 @f38(i32 3, i64 %0, i64 %1) #5 ret i32 %call } @@ -76,8 +76,8 @@ entry: ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] ; CHECK: movz w[[C:[0-9]+]], #0x9 ; CHECK: str w[[C]], [sp] - %0 = load i64* bitcast (%struct.s38* @g38 to i64*), align 4 - %1 = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 + %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 + %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 %call = tail call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i64 %0, i64 %1) #5 ret i32 %call @@ -112,8 +112,8 @@ entry: ; CHECK-LABEL: caller39 ; CHECK: ldp x1, x2, ; CHECK: ldp x3, x4, - %0 = load i128* bitcast (%struct.s39* @g39 to i128*), align 16 - %1 = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 + %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 + %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 %call = tail call i32 @f39(i32 3, i128 %0, i128 %1) #5 ret i32 %call } @@ -130,8 +130,8 @@ entry: ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16] ; CHECK: movz w[[C:[0-9]+]], #0x9 ; CHECK: str w[[C]], [sp] - %0 = load i128* bitcast (%struct.s39* @g39 to i128*), align 16 - %1 = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 + %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 + %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 %call = tail call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %0, i128 %1) #5 ret i32 %call @@ -168,8 +168,8 @@ entry: ; CHECK-LABEL: caller40 ; CHECK: ldp x1, x2, ; CHECK: ldp x3, x4, - %0 = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 - %1 = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 + %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 + %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 %call = tail call i32 @f40(i32 3, [2 x i64] %0, [2 x i64] %1) #5 ret i32 %call } @@ -186,8 +186,8 @@ entry: ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8] ; CHECK: movz w[[C:[0-9]+]], #0x9 ; CHECK: str w[[C]], [sp] - %0 = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 - %1 = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 + %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 + %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 %call = tail call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, [2 x i64] %0, [2 x i64] %1) #5 ret i32 %call @@ -222,8 +222,8 @@ entry: ; CHECK-LABEL: caller41 ; CHECK: ldp x1, x2, ; CHECK: ldp x3, x4, - %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16 - %1 = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 + %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 + %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 %call = tail call i32 @f41(i32 3, i128 %0, i128 %1) #5 ret i32 %call } @@ -240,8 +240,8 @@ entry: ; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16] ; CHECK: movz w[[C:[0-9]+]], #0x9 ; CHECK: str w[[C]], [sp] - %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16 - %1 = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 + %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 + %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 %call = tail call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %0, i128 %1) #5 ret i32 %call @@ -261,14 +261,14 @@ entry: ; FAST: add w[[C:[0-9]+]], w[[A]], w0 ; FAST: add {{w[0-9]+}}, w[[C]], w[[B]] %i1 = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 0 - %0 = load i32* %i1, align 4, !tbaa !0 + %0 = load i32, i32* %i1, align 4, !tbaa !0 %i2 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 0 - %1 = load i32* %i2, align 4, !tbaa !0 + %1 = load i32, i32* %i2, align 4, !tbaa !0 %s = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 1 - %2 = load i16* %s, align 2, !tbaa !3 + %2 = load i16, i16* %s, align 2, !tbaa !3 %conv = sext i16 %2 to i32 %s5 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 1 - %3 = load i16* %s5, align 2, !tbaa !3 + %3 = load i16, i16* %s5, align 2, !tbaa !3 %conv6 = sext i16 %3 to i32 %add = add i32 %0, %i %add3 = add i32 %add, %1 @@ -370,14 +370,14 @@ entry: ; FAST: add w[[C:[0-9]+]], w[[A]], w0 ; FAST: add {{w[0-9]+}}, w[[C]], w[[B]] %i1 = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 0 - %0 = load i32* %i1, align 4, !tbaa !0 + %0 = load i32, i32* %i1, align 4, !tbaa !0 %i2 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 0 - %1 = load i32* %i2, align 4, !tbaa !0 + %1 = load i32, i32* %i2, align 4, !tbaa !0 %s = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 1 - %2 = load i16* %s, align 2, !tbaa !3 + %2 = load i16, i16* %s, align 2, !tbaa !3 %conv = sext i16 %2 to i32 %s5 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 1 - %3 = load i16* %s5, align 2, !tbaa !3 + %3 = load i16, i16* %s5, align 2, !tbaa !3 %conv6 = sext i16 %3 to i32 %add = add i32 %0, %i %add3 = add i32 %add, %1 @@ -493,7 +493,7 @@ entry: ; Load/Store opt is disabled with -O0, so the i128 is split. ; FAST: str {{x[0-9]+}}, [x[[ADDR]], #8] ; FAST: str {{x[0-9]+}}, [x[[ADDR]]] - %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16 + %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 %call = tail call i32 @callee_i128_split(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 %0, i32 8) #5 ret i32 %call @@ -514,7 +514,7 @@ entry: ; FAST: mov x[[R0:[0-9]+]], sp ; FAST: orr w[[R1:[0-9]+]], wzr, #0x8 ; FAST: str w[[R1]], {{\[}}x[[R0]]{{\]}} - %0 = load i64* bitcast (%struct.s41* @g41 to i64*), align 16 + %0 = load i64, i64* bitcast (%struct.s41* @g41 to i64*), align 16 %call = tail call i32 @callee_i64(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i64 %0, i32 8) #5 ret i32 %call diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll index 17676472c87..3197f5bd27e 100644 --- a/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll +++ b/llvm/test/CodeGen/AArch64/arm64-addr-mode-folding.ll @@ -11,12 +11,12 @@ define i32 @fct(i32 %i1, i32 %i2) { ; _CHECK-NOT_: , sxtw] entry: %idxprom = sext i32 %i1 to i64 - %0 = load i8** @block, align 8 + %0 = load i8*, i8** @block, align 8 %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom - %1 = load i8* %arrayidx, align 1 + %1 = load i8, i8* %arrayidx, align 1 %idxprom1 = sext i32 %i2 to i64 %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1 - %2 = load i8* %arrayidx2, align 1 + %2 = load i8, i8* %arrayidx2, align 1 %cmp = icmp eq i8 %1, %2 br i1 %cmp, label %if.end, label %if.then @@ -30,10 +30,10 @@ if.end: ; preds = %entry %inc9 = add nsw i32 %i2, 1 %idxprom10 = sext i32 %inc to i64 %arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10 - %3 = load i8* %arrayidx11, align 1 + %3 = load i8, i8* %arrayidx11, align 1 %idxprom12 = sext i32 %inc9 to i64 %arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12 - %4 = load i8* %arrayidx13, align 1 + %4 = load i8, i8* %arrayidx13, align 1 %cmp16 = icmp eq i8 %3, %4 br i1 %cmp16, label %if.end23, label %if.then18 @@ -47,10 +47,10 @@ if.end23: ; preds = %if.end %inc25 = add nsw i32 %i2, 2 %idxprom26 = sext i32 %inc24 to i64 %arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26 - %5 = load i8* %arrayidx27, align 1 + %5 = load i8, i8* %arrayidx27, align 1 %idxprom28 = sext i32 %inc25 to i64 %arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28 - %6 = load i8* %arrayidx29, align 1 + %6 = load i8, i8* %arrayidx29, align 1 %cmp32 = icmp eq i8 %5, %6 br i1 %cmp32, label %return, label %if.then34 @@ -71,12 +71,12 @@ define i32 @fct1(i32 %i1, i32 %i2) optsize { ; CHECK: , sxtw] entry: %idxprom = sext i32 %i1 to i64 - %0 = load i8** @block, align 8 + %0 = load i8*, i8** @block, align 8 %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom - %1 = load i8* %arrayidx, align 1 + %1 = load i8, i8* %arrayidx, align 1 %idxprom1 = sext i32 %i2 to i64 %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1 - %2 = load i8* %arrayidx2, align 1 + %2 = load i8, i8* %arrayidx2, align 1 %cmp = icmp eq i8 %1, %2 br i1 %cmp, label %if.end, label %if.then @@ -90,10 +90,10 @@ if.end: ; preds = %entry %inc9 = add nsw i32 %i2, 1 %idxprom10 = sext i32 %inc to i64 %arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10 - %3 = load i8* %arrayidx11, align 1 + %3 = load i8, i8* %arrayidx11, align 1 %idxprom12 = sext i32 %inc9 to i64 %arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12 - %4 = load i8* %arrayidx13, align 1 + %4 = load i8, i8* %arrayidx13, align 1 %cmp16 = icmp eq i8 %3, %4 br i1 %cmp16, label %if.end23, label %if.then18 @@ -107,10 +107,10 @@ if.end23: ; preds = %if.end %inc25 = add nsw i32 %i2, 2 %idxprom26 = sext i32 %inc24 to i64 %arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26 - %5 = load i8* %arrayidx27, align 1 + %5 = load i8, i8* %arrayidx27, align 1 %idxprom28 = sext i32 %inc25 to i64 %arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28 - %6 = load i8* %arrayidx29, align 1 + %6 = load i8, i8* %arrayidx29, align 1 %cmp32 = icmp eq i8 %5, %6 br i1 %cmp32, label %return, label %if.then34 @@ -136,8 +136,8 @@ entry: if.then: ; preds = %entry %idxprom = zext i8 %c to i64 %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom - %0 = load volatile i32* %arrayidx, align 4 - %1 = load volatile i32* %arrayidx, align 4 + %0 = load volatile i32, i32* %arrayidx, align 4 + %1 = load volatile i32, i32* %arrayidx, align 4 %add3 = add nsw i32 %1, %0 br label %if.end @@ -160,8 +160,8 @@ entry: if.then: ; preds = %entry %idxprom = zext i8 %c to i64 %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom - %0 = load volatile i32* %arrayidx, align 4 - %1 = load volatile i32* %arrayidx, align 4 + %0 = load volatile i32, i32* %arrayidx, align 4 + %1 = load volatile i32, i32* %arrayidx, align 4 %add3 = add nsw i32 %1, %0 br label %if.end diff --git a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll index 2bee1d5c74a..4703d25a601 100644 --- a/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll +++ b/llvm/test/CodeGen/AArch64/arm64-addr-type-promotion.ll @@ -28,12 +28,12 @@ define zeroext i8 @fullGtU(i32 %i1, i32 %i2) { ; CHECK-NEXT: cmp [[LOADEDVAL3]], [[LOADEDVAL4]] entry: %idxprom = sext i32 %i1 to i64 - %tmp = load i8** @block, align 8 + %tmp = load i8*, i8** @block, align 8 %arrayidx = getelementptr inbounds i8, i8* %tmp, i64 %idxprom - %tmp1 = load i8* %arrayidx, align 1 + %tmp1 = load i8, i8* %arrayidx, align 1 %idxprom1 = sext i32 %i2 to i64 %arrayidx2 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom1 - %tmp2 = load i8* %arrayidx2, align 1 + %tmp2 = load i8, i8* %arrayidx2, align 1 %cmp = icmp eq i8 %tmp1, %tmp2 br i1 %cmp, label %if.end, label %if.then @@ -47,10 +47,10 @@ if.end: ; preds = %entry %inc10 = add nsw i32 %i2, 1 %idxprom11 = sext i32 %inc to i64 %arrayidx12 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom11 - %tmp3 = load i8* %arrayidx12, align 1 + %tmp3 = load i8, i8* %arrayidx12, align 1 %idxprom13 = sext i32 %inc10 to i64 %arrayidx14 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom13 - %tmp4 = load i8* %arrayidx14, align 1 + %tmp4 = load i8, i8* %arrayidx14, align 1 %cmp17 = icmp eq i8 %tmp3, %tmp4 br i1 %cmp17, label %if.end25, label %if.then19 @@ -64,10 +64,10 @@ if.end25: ; preds = %if.end %inc27 = add nsw i32 %i2, 2 %idxprom28 = sext i32 %inc26 to i64 %arrayidx29 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom28 - %tmp5 = load i8* %arrayidx29, align 1 + %tmp5 = load i8, i8* %arrayidx29, align 1 %idxprom30 = sext i32 %inc27 to i64 %arrayidx31 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom30 - %tmp6 = load i8* %arrayidx31, align 1 + %tmp6 = load i8, i8* %arrayidx31, align 1 %cmp34 = icmp eq i8 %tmp5, %tmp6 br i1 %cmp34, label %return, label %if.then36 diff --git a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll index d897a79b26e..c22d0312b24 100644 --- a/llvm/test/CodeGen/AArch64/arm64-addrmode.ll +++ b/llvm/test/CodeGen/AArch64/arm64-addrmode.ll @@ -9,7 +9,7 @@ ; CHECK: ret define void @t1() { %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 1 - %tmp = load volatile i64* %incdec.ptr, align 8 + %tmp = load volatile i64, i64* %incdec.ptr, align 8 ret void } @@ -21,7 +21,7 @@ define void @t1() { ; CHECK: ret define void @t2() { %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 -33 - %tmp = load volatile i64* %incdec.ptr, align 8 + %tmp = load volatile i64, i64* %incdec.ptr, align 8 ret void } @@ -31,7 +31,7 @@ define void @t2() { ; CHECK: ret define void @t3() { %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4095 - %tmp = load volatile i64* %incdec.ptr, align 8 + %tmp = load volatile i64, i64* %incdec.ptr, align 8 ret void } @@ -42,7 +42,7 @@ define void @t3() { ; CHECK: ret define void @t4() { %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4096 - %tmp = load volatile i64* %incdec.ptr, align 8 + %tmp = load volatile i64, i64* %incdec.ptr, align 8 ret void } @@ -52,7 +52,7 @@ define void @t4() { ; CHECK: ret define void @t5(i64 %a) { %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a - %tmp = load volatile i64* %incdec.ptr, align 8 + %tmp = load volatile i64, i64* %incdec.ptr, align 8 ret void } @@ -65,7 +65,7 @@ define void @t5(i64 %a) { define void @t6(i64 %a) { %tmp1 = getelementptr inbounds i64, i64* @object, i64 %a %incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096 - %tmp = load volatile i64* %incdec.ptr, align 8 + %tmp = load volatile i64, i64* %incdec.ptr, align 8 ret void } @@ -76,7 +76,7 @@ define void @t7(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]] %1 = add i64 %a, 65535 ;0xffff %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -86,7 +86,7 @@ define void @t8(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = sub i64 %a, 4662 ;-4662 is 0xffffffffffffedca %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -96,7 +96,7 @@ define void @t9(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = add i64 -305463297, %a ;-305463297 is 0xffffffffedcaffff %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -106,7 +106,7 @@ define void @t10(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, [[REG]]] %1 = add i64 %a, 81909218222800896 ;0x123000000000000 %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -117,7 +117,7 @@ define void @t11(i64 %a) { ; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]] %1 = add i64 %a, 19088743 ;0x1234567 %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -128,7 +128,7 @@ define void @t12(i64 %a) { ; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]] %1 = add i64 %a, 4095 ;0xfff %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -138,7 +138,7 @@ define void @t13(i64 %a) { ; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]] %1 = add i64 %a, -4095 ;-0xfff %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -148,7 +148,7 @@ define void @t14(i64 %a) { ; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]] %1 = add i64 %a, 1191936 ;0x123000 %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -158,7 +158,7 @@ define void @t15(i64 %a) { ; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]] %1 = add i64 %a, -1191936 ;0xFFFFFFFFFFEDD000 %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -167,7 +167,7 @@ define void @t16(i64 %a) { ; CHECK: ldr xzr, [x0, #28672] %1 = add i64 %a, 28672 ;0x7000 %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } @@ -176,6 +176,6 @@ define void @t17(i64 %a) { ; CHECK: ldur xzr, [x0, #-256] %1 = add i64 %a, -256 ;-0x100 %2 = inttoptr i64 %1 to i64* - %3 = load volatile i64* %2, align 8 + %3 = load volatile i64, i64* %2, align 8 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll b/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll index f396bc99170..bf2d2cfa606 100644 --- a/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll +++ b/llvm/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll @@ -13,9 +13,9 @@ entry: ; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], {{\[}}[[BASE:x[0-9]+]], #32] ; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], {{\[}}[[BASE]]] %retval = alloca <16 x float>, align 16 - %0 = load <16 x float>* @T3_retval, align 16 + %0 = load <16 x float>, <16 x float>* @T3_retval, align 16 store <16 x float> %0, <16 x float>* %retval - %1 = load <16 x float>* %retval + %1 = load <16 x float>, <16 x float>* %retval store <16 x float> %1, <16 x float>* %agg.result, align 16 ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll b/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll index 3750f31b373..eb0cd3547bd 100644 --- a/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll +++ b/llvm/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll @@ -13,17 +13,17 @@ define i32 @foo(i32 %a) nounwind { %arr2 = alloca [32 x i32], align 4 %j = alloca i32, align 4 store i32 %a, i32* %a.addr, align 4 - %tmp = load i32* %a.addr, align 4 + %tmp = load i32, i32* %a.addr, align 4 %tmp1 = zext i32 %tmp to i64 %v = mul i64 4, %tmp1 %vla = alloca i8, i64 %v, align 4 %tmp2 = bitcast i8* %vla to i32* - %tmp3 = load i32* %a.addr, align 4 + %tmp3 = load i32, i32* %a.addr, align 4 store i32 %tmp3, i32* %i, align 4 - %tmp4 = load i32* %a.addr, align 4 + %tmp4 = load i32, i32* %a.addr, align 4 store i32 %tmp4, i32* %j, align 4 - %tmp5 = load i32* %j, align 4 + %tmp5 = load i32, i32* %j, align 4 store i32 %tmp5, i32* %retval - %x = load i32* %retval + %x = load i32, i32* %retval ret i32 %x } diff --git a/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll b/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll index 419497722f4..71e64807f52 100644 --- a/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll +++ b/llvm/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll @@ -29,7 +29,7 @@ _ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit: ; preds = %lor.rhs.i.i br i1 %cmp.i.i.i.i, label %if.then3, label %if.end5 if.then3: ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, %land.rhs.i - %tmp11 = load i8* %str14, align 8 + %tmp11 = load i8, i8* %str14, align 8 %tmp12 = and i8 %tmp11, 2 %tmp13 = icmp ne i8 %tmp12, 0 br label %return @@ -55,7 +55,7 @@ if.then7: ; preds = %_ZNK7WebCore4Node10 br i1 %isTextField, label %if.then9, label %if.end12 if.then9: ; preds = %if.then7 - %tmp23 = load i8* %str5, align 8 + %tmp23 = load i8, i8* %str5, align 8 %tmp24 = and i8 %tmp23, 2 %tmp25 = icmp ne i8 %tmp24, 0 br label %return diff --git a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll index 642d72aac47..a76cf74a6d0 100644 --- a/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll +++ b/llvm/test/CodeGen/AArch64/arm64-atomic-128.ll @@ -169,7 +169,7 @@ define i128 @atomic_load_seq_cst(i128* %p) { ; CHECK-NOT: dmb ; CHECK-LABEL: ldaxp ; CHECK-NOT: dmb - %r = load atomic i128* %p seq_cst, align 16 + %r = load atomic i128, i128* %p seq_cst, align 16 ret i128 %r } @@ -178,7 +178,7 @@ define i128 @atomic_load_relaxed(i128* %p) { ; CHECK-NOT: dmb ; CHECK: ldxp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0] ; CHECK-NOT: dmb - %r = load atomic i128* %p monotonic, align 16 + %r = load atomic i128, i128* %p monotonic, align 16 ret i128 %r } diff --git a/llvm/test/CodeGen/AArch64/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/arm64-atomic.ll index fc6e42f3fbe..81b555a42d1 100644 --- a/llvm/test/CodeGen/AArch64/arm64-atomic.ll +++ b/llvm/test/CodeGen/AArch64/arm64-atomic.ll @@ -107,7 +107,7 @@ define void @seq_cst_fence() { } define i32 @atomic_load(i32* %p) { - %r = load atomic i32* %p seq_cst, align 4 + %r = load atomic i32, i32* %p seq_cst, align 4 ret i32 %r ; CHECK-LABEL: atomic_load: ; CHECK: ldar @@ -116,21 +116,21 @@ define i32 @atomic_load(i32* %p) { define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) { ; CHECK-LABEL: atomic_load_relaxed_8: %ptr_unsigned = getelementptr i8, i8* %p, i32 4095 - %val_unsigned = load atomic i8* %ptr_unsigned monotonic, align 1 + %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1 ; CHECK: ldrb {{w[0-9]+}}, [x0, #4095] %ptr_regoff = getelementptr i8, i8* %p, i32 %off32 - %val_regoff = load atomic i8* %ptr_regoff unordered, align 1 + %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1 %tot1 = add i8 %val_unsigned, %val_regoff ; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw] %ptr_unscaled = getelementptr i8, i8* %p, i32 -256 - %val_unscaled = load atomic i8* %ptr_unscaled monotonic, align 1 + %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1 %tot2 = add i8 %tot1, %val_unscaled ; CHECK: ldurb {{w[0-9]+}}, [x0, #-256] %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) - %val_random = load atomic i8* %ptr_random unordered, align 1 + %val_random = load atomic i8, i8* %ptr_random unordered, align 1 %tot3 = add i8 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]] @@ -141,21 +141,21 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) { define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) { ; CHECK-LABEL: atomic_load_relaxed_16: %ptr_unsigned = getelementptr i16, i16* %p, i32 4095 - %val_unsigned = load atomic i16* %ptr_unsigned monotonic, align 2 + %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2 ; CHECK: ldrh {{w[0-9]+}}, [x0, #8190] %ptr_regoff = getelementptr i16, i16* %p, i32 %off32 - %val_regoff = load atomic i16* %ptr_regoff unordered, align 2 + %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2 %tot1 = add i16 %val_unsigned, %val_regoff ; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1] %ptr_unscaled = getelementptr i16, i16* %p, i32 -128 - %val_unscaled = load atomic i16* %ptr_unscaled monotonic, align 2 + %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2 %tot2 = add i16 %tot1, %val_unscaled ; CHECK: ldurh {{w[0-9]+}}, [x0, #-256] %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) - %val_random = load atomic i16* %ptr_random unordered, align 2 + %val_random = load atomic i16, i16* %ptr_random unordered, align 2 %tot3 = add i16 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]] @@ -166,21 +166,21 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) { define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) { ; CHECK-LABEL: atomic_load_relaxed_32: %ptr_unsigned = getelementptr i32, i32* %p, i32 4095 - %val_unsigned = load atomic i32* %ptr_unsigned monotonic, align 4 + %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4 ; CHECK: ldr {{w[0-9]+}}, [x0, #16380] %ptr_regoff = getelementptr i32, i32* %p, i32 %off32 - %val_regoff = load atomic i32* %ptr_regoff unordered, align 4 + %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4 %tot1 = add i32 %val_unsigned, %val_regoff ; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2] %ptr_unscaled = getelementptr i32, i32* %p, i32 -64 - %val_unscaled = load atomic i32* %ptr_unscaled monotonic, align 4 + %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4 %tot2 = add i32 %tot1, %val_unscaled ; CHECK: ldur {{w[0-9]+}}, [x0, #-256] %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) - %val_random = load atomic i32* %ptr_random unordered, align 4 + %val_random = load atomic i32, i32* %ptr_random unordered, align 4 %tot3 = add i32 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]] @@ -191,21 +191,21 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) { define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) { ; CHECK-LABEL: atomic_load_relaxed_64: %ptr_unsigned = getelementptr i64, i64* %p, i32 4095 - %val_unsigned = load atomic i64* %ptr_unsigned monotonic, align 8 + %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8 ; CHECK: ldr {{x[0-9]+}}, [x0, #32760] %ptr_regoff = getelementptr i64, i64* %p, i32 %off32 - %val_regoff = load atomic i64* %ptr_regoff unordered, align 8 + %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8 %tot1 = add i64 %val_unsigned, %val_regoff ; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3] %ptr_unscaled = getelementptr i64, i64* %p, i32 -32 - %val_unscaled = load atomic i64* %ptr_unscaled monotonic, align 8 + %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8 %tot2 = add i64 %tot1, %val_unscaled ; CHECK: ldur {{x[0-9]+}}, [x0, #-256] %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) - %val_random = load atomic i64* %ptr_random unordered, align 8 + %val_random = load atomic i64, i64* %ptr_random unordered, align 8 %tot3 = add i64 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]] diff --git a/llvm/test/CodeGen/AArch64/arm64-basic-pic.ll b/llvm/test/CodeGen/AArch64/arm64-basic-pic.ll index 9fdb1e91385..e11274e45ff 100644 --- a/llvm/test/CodeGen/AArch64/arm64-basic-pic.ll +++ b/llvm/test/CodeGen/AArch64/arm64-basic-pic.ll @@ -5,7 +5,7 @@ define i32 @get_globalvar() { ; CHECK-LABEL: get_globalvar: - %val = load i32* @var + %val = load i32, i32* @var ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var ; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], :got_lo12:var] ; CHECK: ldr w0, [x[[GOTLOC]]] @@ -16,7 +16,7 @@ define i32 @get_globalvar() { define i32* @get_globalvaraddr() { ; CHECK-LABEL: get_globalvaraddr: - %val = load i32* @var + %val = load i32, i32* @var ; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var ; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:var] @@ -28,7 +28,7 @@ define i32* @get_globalvaraddr() { define i32 @get_hiddenvar() { ; CHECK-LABEL: get_hiddenvar: - %val = load i32* @hiddenvar + %val = load i32, i32* @hiddenvar ; CHECK: adrp x[[HI:[0-9]+]], hiddenvar ; CHECK: ldr w0, [x[[HI]], :lo12:hiddenvar] @@ -38,7 +38,7 @@ define i32 @get_hiddenvar() { define i32* @get_hiddenvaraddr() { ; CHECK-LABEL: get_hiddenvaraddr: - %val = load i32* @hiddenvar + %val = load i32, i32* @hiddenvar ; CHECK: adrp [[HI:x[0-9]+]], hiddenvar ; CHECK: add x0, [[HI]], :lo12:hiddenvar diff --git a/llvm/test/CodeGen/AArch64/arm64-bcc.ll b/llvm/test/CodeGen/AArch64/arm64-bcc.ll index 94e6b6bcbb1..66d2f52ab96 100644 --- a/llvm/test/CodeGen/AArch64/arm64-bcc.ll +++ b/llvm/test/CodeGen/AArch64/arm64-bcc.ll @@ -28,9 +28,9 @@ define { i64, i1 } @foo(i64* , %Sstruct* , i1, i64) { entry: %.sroa.0 = alloca i72, align 16 %.count.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 0, i32 0 - %4 = load i64* %.count.value, align 8 + %4 = load i64, i64* %.count.value, align 8 %.repeatedValue.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 1, i32 0 - %5 = load i32* %.repeatedValue.value, align 8 + %5 = load i32, i32* %.repeatedValue.value, align 8 %6 = icmp eq i64 %4, 0 br label %7 diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll index d2985f4dd66..876a69193b4 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll @@ -5,7 +5,7 @@ define void @test_i64_f64(double* %p, i64* %q) { ; CHECK: ldr ; CHECK: str - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = bitcast double %2 to i64 %4 = add i64 %3, %3 @@ -17,7 +17,7 @@ define void @test_i64_f64(double* %p, i64* %q) { define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) { ; CHECK: ldr ; CHECK: str - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to i64 %4 = add i64 %3, %3 @@ -30,7 +30,7 @@ define void @test_i64_v2f32(<2 x float>* %p, i64* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to i64 %4 = add i64 %3, %3 @@ -43,7 +43,7 @@ define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to i64 %4 = add i64 %3, %3 @@ -56,7 +56,7 @@ define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: str - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to i64 %4 = add i64 %3, %3 @@ -69,7 +69,7 @@ define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: str - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to i64 %4 = add i64 %3, %3 @@ -81,7 +81,7 @@ define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) { define void @test_f64_i64(i64* %p, double* %q) { ; CHECK: ldr ; CHECK: str - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to double %4 = fadd double %3, %3 @@ -93,7 +93,7 @@ define void @test_f64_i64(i64* %p, double* %q) { define void @test_f64_v1i64(<1 x i64>* %p, double* %q) { ; CHECK: ldr ; CHECK: str - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to double %4 = fadd double %3, %3 @@ -106,7 +106,7 @@ define void @test_f64_v2f32(<2 x float>* %p, double* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to double %4 = fadd double %3, %3 @@ -119,7 +119,7 @@ define void @test_f64_v2i32(<2 x i32>* %p, double* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to double %4 = fadd double %3, %3 @@ -132,7 +132,7 @@ define void @test_f64_v4i16(<4 x i16>* %p, double* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: str - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to double %4 = fadd double %3, %3 @@ -145,7 +145,7 @@ define void @test_f64_v8i8(<8 x i8>* %p, double* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: str - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to double %4 = fadd double %3, %3 @@ -157,7 +157,7 @@ define void @test_f64_v8i8(<8 x i8>* %p, double* %q) { define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) { ; CHECK: ldr ; CHECK: str - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <1 x i64> %4 = add <1 x i64> %3, %3 @@ -169,7 +169,7 @@ define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) { define void @test_v1i64_f64(double* %p, <1 x i64>* %q) { ; CHECK: ldr ; CHECK: str - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <1 x i64> %4 = add <1 x i64> %3, %3 @@ -182,7 +182,7 @@ define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 @@ -195,7 +195,7 @@ define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: str - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 @@ -208,7 +208,7 @@ define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: str - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 @@ -221,7 +221,7 @@ define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: str - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <1 x i64> %4 = add <1 x i64> %3, %3 @@ -234,7 +234,7 @@ define void @test_v2f32_i64(i64* %p, <2 x float>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <2 x float> %4 = fadd <2 x float> %3, %3 @@ -247,7 +247,7 @@ define void @test_v2f32_f64(double* %p, <2 x float>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <2 x float> %4 = fadd <2 x float> %3, %3 @@ -260,7 +260,7 @@ define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 @@ -272,7 +272,7 @@ define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) { define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 @@ -285,7 +285,7 @@ define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 @@ -298,7 +298,7 @@ define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev32 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <2 x float> %4 = fadd <2 x float> %3, %3 @@ -311,7 +311,7 @@ define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <2 x i32> %4 = add <2 x i32> %3, %3 @@ -324,7 +324,7 @@ define void @test_v2i32_f64(double* %p, <2 x i32>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <2 x i32> %4 = add <2 x i32> %3, %3 @@ -337,7 +337,7 @@ define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <2 x i32> %4 = add <2 x i32> %3, %3 @@ -349,7 +349,7 @@ define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) { define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <2 x i32> %4 = add <2 x i32> %3, %3 @@ -362,7 +362,7 @@ define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <2 x i32> %4 = add <2 x i32> %3, %3 @@ -375,7 +375,7 @@ define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev32 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.2s } - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <2 x i32> %4 = add <2 x i32> %3, %3 @@ -388,7 +388,7 @@ define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <4 x i16> %4 = add <4 x i16> %3, %3 @@ -401,7 +401,7 @@ define void @test_v4i16_f64(double* %p, <4 x i16>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <4 x i16> %4 = add <4 x i16> %3, %3 @@ -414,7 +414,7 @@ define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 @@ -427,7 +427,7 @@ define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 @@ -440,7 +440,7 @@ define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.4h ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 @@ -453,7 +453,7 @@ define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8b } ; CHECK: rev16 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.4h } - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = bitcast <8 x i8> %2 to <4 x i16> %4 = add <4 x i16> %3, %3 @@ -466,7 +466,7 @@ define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = bitcast i64 %2 to <8 x i8> %4 = add <8 x i8> %3, %3 @@ -479,7 +479,7 @@ define void @test_v8i8_f64(double* %p, <8 x i8>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = bitcast double %2 to <8 x i8> %4 = add <8 x i8> %3, %3 @@ -492,7 +492,7 @@ define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) { ; CHECK: ldr ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = bitcast <1 x i64> %2 to <8 x i8> %4 = add <8 x i8> %3, %3 @@ -505,7 +505,7 @@ define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = bitcast <2 x float> %2 to <8 x i8> %4 = add <8 x i8> %3, %3 @@ -518,7 +518,7 @@ define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2s } ; CHECK: rev32 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = bitcast <2 x i32> %2 to <8 x i8> %4 = add <8 x i8> %3, %3 @@ -531,7 +531,7 @@ define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4h } ; CHECK: rev16 v{{[0-9]+}}.8b ; CHECK: st1 { v{{[0-9]+}}.8b } - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = bitcast <4 x i16> %2 to <8 x i8> %4 = add <8 x i8> %3, %3 @@ -544,7 +544,7 @@ define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: ext ; CHECK: str - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to fp128 %4 = fadd fp128 %3, %3 @@ -557,7 +557,7 @@ define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: ext ; CHECK: str - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to fp128 %4 = fadd fp128 %3, %3 @@ -572,7 +572,7 @@ define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: str q - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to fp128 %4 = fadd fp128 %3, %3 @@ -586,7 +586,7 @@ define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: str - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to fp128 %4 = fadd fp128 %3, %3 @@ -600,7 +600,7 @@ define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: str - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to fp128 %4 = fadd fp128 %3, %3 @@ -613,7 +613,7 @@ define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: ext ; CHECK: str q - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to fp128 %4 = fadd fp128 %3, %3 @@ -626,7 +626,7 @@ define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) { ; CHECK: ldr ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <2 x double> %4 = fadd <2 x double> %3, %3 @@ -638,7 +638,7 @@ define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) { define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 @@ -652,7 +652,7 @@ define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 @@ -665,7 +665,7 @@ define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 @@ -678,7 +678,7 @@ define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 @@ -691,7 +691,7 @@ define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <2 x double> %4 = fadd <2 x double> %3, %3 @@ -704,7 +704,7 @@ define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) { ; CHECK: ldr ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <2 x i64> %4 = add <2 x i64> %3, %3 @@ -716,7 +716,7 @@ define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) { define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 @@ -730,7 +730,7 @@ define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 @@ -743,7 +743,7 @@ define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 @@ -756,7 +756,7 @@ define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 @@ -769,7 +769,7 @@ define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <2 x i64> %4 = add <2 x i64> %3, %3 @@ -784,7 +784,7 @@ define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <4 x float> %4 = fadd <4 x float> %3, %3 @@ -798,7 +798,7 @@ define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 @@ -812,7 +812,7 @@ define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 @@ -825,7 +825,7 @@ define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 @@ -839,7 +839,7 @@ define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) { ; CHECK: rev32 v{{[0-9]+}}.8h ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 @@ -853,7 +853,7 @@ define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) { ; CHECK: rev32 v{{[0-9]+}}.16b ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.2d } - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <4 x float> %4 = fadd <4 x float> %3, %3 @@ -867,7 +867,7 @@ define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <4 x i32> %4 = add <4 x i32> %3, %3 @@ -880,7 +880,7 @@ define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 @@ -893,7 +893,7 @@ define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 @@ -906,7 +906,7 @@ define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 @@ -919,7 +919,7 @@ define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev32 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 @@ -932,7 +932,7 @@ define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev32 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.4s } - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <4 x i32> %4 = add <4 x i32> %3, %3 @@ -946,7 +946,7 @@ define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <8 x i16> %4 = add <8 x i16> %3, %3 @@ -959,7 +959,7 @@ define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 @@ -972,7 +972,7 @@ define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 @@ -986,7 +986,7 @@ define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: rev32 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 @@ -999,7 +999,7 @@ define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev32 v{{[0-9]+}}.8h ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 @@ -1012,7 +1012,7 @@ define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.16b } ; CHECK: rev16 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.8h } - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = bitcast <16 x i8> %2 to <8 x i16> %4 = add <8 x i16> %3, %3 @@ -1026,7 +1026,7 @@ define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = bitcast fp128 %2 to <16 x i8> %4 = add <16 x i8> %3, %3 @@ -1039,7 +1039,7 @@ define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = bitcast <2 x double> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 @@ -1052,7 +1052,7 @@ define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.2d } ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = bitcast <2 x i64> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 @@ -1066,7 +1066,7 @@ define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: rev32 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = bitcast <4 x float> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 @@ -1079,7 +1079,7 @@ define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.4s } ; CHECK: rev32 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = bitcast <4 x i32> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 @@ -1092,7 +1092,7 @@ define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) { ; CHECK: ld1 { v{{[0-9]+}}.8h } ; CHECK: rev16 v{{[0-9]+}}.16b ; CHECK: st1 { v{{[0-9]+}}.16b } - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = bitcast <8 x i16> %2 to <16 x i8> %4 = add <16 x i8> %3, %3 diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll index 296eb19fc8b..e5e16848a4b 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-varargs.ll @@ -22,7 +22,7 @@ entry: %vl1 = bitcast %struct.__va_list* %vl to i8* call void @llvm.va_start(i8* %vl1) %vr_offs_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 4 - %vr_offs = load i32* %vr_offs_p, align 4 + %vr_offs = load i32, i32* %vr_offs_p, align 4 %0 = icmp sgt i32 %vr_offs, -1 br i1 %0, label %vaarg.on_stack, label %vaarg.maybe_reg @@ -34,7 +34,7 @@ vaarg.maybe_reg: ; preds = %entry vaarg.in_reg: ; preds = %vaarg.maybe_reg %reg_top_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 2 - %reg_top = load i8** %reg_top_p, align 8 + %reg_top = load i8*, i8** %reg_top_p, align 8 %1 = sext i32 %vr_offs to i64 %2 = getelementptr i8, i8* %reg_top, i64 %1 %3 = ptrtoint i8* %2 to i64 @@ -44,7 +44,7 @@ vaarg.in_reg: ; preds = %vaarg.maybe_reg vaarg.on_stack: ; preds = %vaarg.maybe_reg, %entry %stack_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 0 - %stack = load i8** %stack_p, align 8 + %stack = load i8*, i8** %stack_p, align 8 %new_stack = getelementptr i8, i8* %stack, i64 8 store i8* %new_stack, i8** %stack_p, align 8 br label %vaarg.end @@ -52,7 +52,7 @@ vaarg.on_stack: ; preds = %vaarg.maybe_reg, %e vaarg.end: ; preds = %vaarg.on_stack, %vaarg.in_reg %.sink = phi i8* [ %4, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ] %5 = bitcast i8* %.sink to double* - %6 = load double* %5, align 8 + %6 = load double, double* %5, align 8 call void @llvm.va_end(i8* %vl1) ret double %6 } diff --git a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll index d72d0a5db41..c280bef0977 100644 --- a/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll +++ b/llvm/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll @@ -5,7 +5,7 @@ declare i64 @test_i64_f64_helper(double %p) define void @test_i64_f64(double* %p, i64* %q) { ; CHECK-NOT: rev - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = call i64 @test_i64_f64_helper(double %2) %4 = add i64 %3, %3 @@ -17,7 +17,7 @@ define void @test_i64_f64(double* %p, i64* %q) { declare i64 @test_i64_v1i64_helper(<1 x i64> %p) define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) { ; CHECK-NOT: rev - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = call i64 @test_i64_v1i64_helper(<1 x i64> %2) %4 = add i64 %3, %3 @@ -29,7 +29,7 @@ define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) { declare i64 @test_i64_v2f32_helper(<2 x float> %p) define void @test_i64_v2f32(<2 x float>* %p, i64* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = call i64 @test_i64_v2f32_helper(<2 x float> %2) %4 = add i64 %3, %3 @@ -41,7 +41,7 @@ define void @test_i64_v2f32(<2 x float>* %p, i64* %q) { declare i64 @test_i64_v2i32_helper(<2 x i32> %p) define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = call i64 @test_i64_v2i32_helper(<2 x i32> %2) %4 = add i64 %3, %3 @@ -53,7 +53,7 @@ define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) { declare i64 @test_i64_v4i16_helper(<4 x i16> %p) define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = call i64 @test_i64_v4i16_helper(<4 x i16> %2) %4 = add i64 %3, %3 @@ -65,7 +65,7 @@ define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) { declare i64 @test_i64_v8i8_helper(<8 x i8> %p) define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = call i64 @test_i64_v8i8_helper(<8 x i8> %2) %4 = add i64 %3, %3 @@ -77,7 +77,7 @@ define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) { declare double @test_f64_i64_helper(i64 %p) define void @test_f64_i64(i64* %p, double* %q) { ; CHECK-NOT: rev - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = call double @test_f64_i64_helper(i64 %2) %4 = fadd double %3, %3 @@ -89,7 +89,7 @@ define void @test_f64_i64(i64* %p, double* %q) { declare double @test_f64_v1i64_helper(<1 x i64> %p) define void @test_f64_v1i64(<1 x i64>* %p, double* %q) { ; CHECK-NOT: rev - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = call double @test_f64_v1i64_helper(<1 x i64> %2) %4 = fadd double %3, %3 @@ -101,7 +101,7 @@ define void @test_f64_v1i64(<1 x i64>* %p, double* %q) { declare double @test_f64_v2f32_helper(<2 x float> %p) define void @test_f64_v2f32(<2 x float>* %p, double* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = call double @test_f64_v2f32_helper(<2 x float> %2) %4 = fadd double %3, %3 @@ -113,7 +113,7 @@ define void @test_f64_v2f32(<2 x float>* %p, double* %q) { declare double @test_f64_v2i32_helper(<2 x i32> %p) define void @test_f64_v2i32(<2 x i32>* %p, double* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = call double @test_f64_v2i32_helper(<2 x i32> %2) %4 = fadd double %3, %3 @@ -125,7 +125,7 @@ define void @test_f64_v2i32(<2 x i32>* %p, double* %q) { declare double @test_f64_v4i16_helper(<4 x i16> %p) define void @test_f64_v4i16(<4 x i16>* %p, double* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = call double @test_f64_v4i16_helper(<4 x i16> %2) %4 = fadd double %3, %3 @@ -137,7 +137,7 @@ define void @test_f64_v4i16(<4 x i16>* %p, double* %q) { declare double @test_f64_v8i8_helper(<8 x i8> %p) define void @test_f64_v8i8(<8 x i8>* %p, double* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = call double @test_f64_v8i8_helper(<8 x i8> %2) %4 = fadd double %3, %3 @@ -149,7 +149,7 @@ define void @test_f64_v8i8(<8 x i8>* %p, double* %q) { declare <1 x i64> @test_v1i64_i64_helper(i64 %p) define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) { ; CHECK-NOT: rev - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = call <1 x i64> @test_v1i64_i64_helper(i64 %2) %4 = add <1 x i64> %3, %3 @@ -161,7 +161,7 @@ define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) { declare <1 x i64> @test_v1i64_f64_helper(double %p) define void @test_v1i64_f64(double* %p, <1 x i64>* %q) { ; CHECK-NOT: rev - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = call <1 x i64> @test_v1i64_f64_helper(double %2) %4 = add <1 x i64> %3, %3 @@ -173,7 +173,7 @@ define void @test_v1i64_f64(double* %p, <1 x i64>* %q) { declare <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %p) define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = call <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %2) %4 = add <1 x i64> %3, %3 @@ -185,7 +185,7 @@ define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) { declare <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %p) define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = call <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %2) %4 = add <1 x i64> %3, %3 @@ -197,7 +197,7 @@ define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) { declare <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %p) define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = call <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %2) %4 = add <1 x i64> %3, %3 @@ -209,7 +209,7 @@ define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) { declare <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %p) define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = call <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %2) %4 = add <1 x i64> %3, %3 @@ -221,7 +221,7 @@ define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) { declare <2 x float> @test_v2f32_i64_helper(i64 %p) define void @test_v2f32_i64(i64* %p, <2 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = call <2 x float> @test_v2f32_i64_helper(i64 %2) %4 = fadd <2 x float> %3, %3 @@ -233,7 +233,7 @@ define void @test_v2f32_i64(i64* %p, <2 x float>* %q) { declare <2 x float> @test_v2f32_f64_helper(double %p) define void @test_v2f32_f64(double* %p, <2 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = call <2 x float> @test_v2f32_f64_helper(double %2) %4 = fadd <2 x float> %3, %3 @@ -245,7 +245,7 @@ define void @test_v2f32_f64(double* %p, <2 x float>* %q) { declare <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %p) define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = call <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %2) %4 = fadd <2 x float> %3, %3 @@ -258,7 +258,7 @@ declare <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %p) define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = call <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %2) %4 = fadd <2 x float> %3, %3 @@ -271,7 +271,7 @@ declare <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %p) define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = call <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %2) %4 = fadd <2 x float> %3, %3 @@ -284,7 +284,7 @@ declare <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %p) define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = call <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %2) %4 = fadd <2 x float> %3, %3 @@ -296,7 +296,7 @@ define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) { declare <2 x i32> @test_v2i32_i64_helper(i64 %p) define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = call <2 x i32> @test_v2i32_i64_helper(i64 %2) %4 = add <2 x i32> %3, %3 @@ -308,7 +308,7 @@ define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) { declare <2 x i32> @test_v2i32_f64_helper(double %p) define void @test_v2i32_f64(double* %p, <2 x i32>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = call <2 x i32> @test_v2i32_f64_helper(double %2) %4 = add <2 x i32> %3, %3 @@ -320,7 +320,7 @@ define void @test_v2i32_f64(double* %p, <2 x i32>* %q) { declare <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %p) define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = call <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %2) %4 = add <2 x i32> %3, %3 @@ -333,7 +333,7 @@ declare <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %p) define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = call <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %2) %4 = add <2 x i32> %3, %3 @@ -346,7 +346,7 @@ declare <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %p) define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = call <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %2) %4 = add <2 x i32> %3, %3 @@ -359,7 +359,7 @@ declare <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %p) define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: rev64 v{{[0-9]+}}.2s - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = call <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %2) %4 = add <2 x i32> %3, %3 @@ -371,7 +371,7 @@ define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) { declare <4 x i16> @test_v4i16_i64_helper(i64 %p) define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = call <4 x i16> @test_v4i16_i64_helper(i64 %2) %4 = add <4 x i16> %3, %3 @@ -383,7 +383,7 @@ define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) { declare <4 x i16> @test_v4i16_f64_helper(double %p) define void @test_v4i16_f64(double* %p, <4 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = call <4 x i16> @test_v4i16_f64_helper(double %2) %4 = add <4 x i16> %3, %3 @@ -395,7 +395,7 @@ define void @test_v4i16_f64(double* %p, <4 x i16>* %q) { declare <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %p) define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = call <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %2) %4 = add <4 x i16> %3, %3 @@ -408,7 +408,7 @@ declare <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %p) define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = call <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %2) %4 = add <4 x i16> %3, %3 @@ -421,7 +421,7 @@ declare <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %p) define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = call <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %2) %4 = add <4 x i16> %3, %3 @@ -434,7 +434,7 @@ declare <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %p) define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b ; CHECK: rev64 v{{[0-9]+}}.4h - %1 = load <8 x i8>* %p + %1 = load <8 x i8>, <8 x i8>* %p %2 = add <8 x i8> %1, %1 %3 = call <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %2) %4 = add <4 x i16> %3, %3 @@ -446,7 +446,7 @@ define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) { declare <8 x i8> @test_v8i8_i64_helper(i64 %p) define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load i64* %p + %1 = load i64, i64* %p %2 = add i64 %1, %1 %3 = call <8 x i8> @test_v8i8_i64_helper(i64 %2) %4 = add <8 x i8> %3, %3 @@ -458,7 +458,7 @@ define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) { declare <8 x i8> @test_v8i8_f64_helper(double %p) define void @test_v8i8_f64(double* %p, <8 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load double* %p + %1 = load double, double* %p %2 = fadd double %1, %1 %3 = call <8 x i8> @test_v8i8_f64_helper(double %2) %4 = add <8 x i8> %3, %3 @@ -470,7 +470,7 @@ define void @test_v8i8_f64(double* %p, <8 x i8>* %q) { declare <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %p) define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <1 x i64>* %p + %1 = load <1 x i64>, <1 x i64>* %p %2 = add <1 x i64> %1, %1 %3 = call <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %2) %4 = add <8 x i8> %3, %3 @@ -483,7 +483,7 @@ declare <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %p) define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <2 x float>* %p + %1 = load <2 x float>, <2 x float>* %p %2 = fadd <2 x float> %1, %1 %3 = call <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %2) %4 = add <8 x i8> %3, %3 @@ -496,7 +496,7 @@ declare <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %p) define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.2s ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <2 x i32>* %p + %1 = load <2 x i32>, <2 x i32>* %p %2 = add <2 x i32> %1, %1 %3 = call <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %2) %4 = add <8 x i8> %3, %3 @@ -509,7 +509,7 @@ declare <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %p) define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4h ; CHECK: rev64 v{{[0-9]+}}.8b - %1 = load <4 x i16>* %p + %1 = load <4 x i16>, <4 x i16>* %p %2 = add <4 x i16> %1, %1 %3 = call <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %2) %4 = add <8 x i8> %3, %3 @@ -521,7 +521,7 @@ define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) { declare fp128 @test_f128_v2f64_helper(<2 x double> %p) define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) { ; CHECK: ext - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = call fp128 @test_f128_v2f64_helper(<2 x double> %2) %4 = fadd fp128 %3, %3 @@ -533,7 +533,7 @@ define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) { declare fp128 @test_f128_v2i64_helper(<2 x i64> %p) define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) { ; CHECK: ext - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = call fp128 @test_f128_v2i64_helper(<2 x i64> %2) %4 = fadd fp128 %3, %3 @@ -546,7 +546,7 @@ declare fp128 @test_f128_v4f32_helper(<4 x float> %p) define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = call fp128 @test_f128_v4f32_helper(<4 x float> %2) %4 = fadd fp128 %3, %3 @@ -559,7 +559,7 @@ declare fp128 @test_f128_v4i32_helper(<4 x i32> %p) define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = call fp128 @test_f128_v4i32_helper(<4 x i32> %2) %4 = fadd fp128 %3, %3 @@ -572,7 +572,7 @@ declare fp128 @test_f128_v8i16_helper(<8 x i16> %p) define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = call fp128 @test_f128_v8i16_helper(<8 x i16> %2) %4 = fadd fp128 %3, %3 @@ -585,7 +585,7 @@ declare fp128 @test_f128_v16i8_helper(<16 x i8> %p) define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = call fp128 @test_f128_v16i8_helper(<16 x i8> %2) %4 = fadd fp128 %3, %3 @@ -597,7 +597,7 @@ define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) { declare <2 x double> @test_v2f64_f128_helper(fp128 %p) define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) { ; CHECK: ext - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = call <2 x double> @test_v2f64_f128_helper(fp128 %2) %4 = fadd <2 x double> %3, %3 @@ -610,7 +610,7 @@ declare <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %p) define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) { ; CHECK: ext ; CHECK: ext - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = call <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %2) %4 = fadd <2 x double> %3, %3 @@ -624,7 +624,7 @@ define void @test_v2f64_v4f32(<4 x float>* %p, <2 x double>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: ext - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = call <2 x double> @test_v2f64_v4f32_helper(<4 x float> %2) %4 = fadd <2 x double> %3, %3 @@ -638,7 +638,7 @@ define void @test_v2f64_v4i32(<4 x i32>* %p, <2 x double>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: ext - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = call <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %2) %4 = fadd <2 x double> %3, %3 @@ -652,7 +652,7 @@ define void @test_v2f64_v8i16(<8 x i16>* %p, <2 x double>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: ext - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = call <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %2) %4 = fadd <2 x double> %3, %3 @@ -666,7 +666,7 @@ define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: ext - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = call <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %2) %4 = fadd <2 x double> %3, %3 @@ -678,7 +678,7 @@ define void @test_v2f64_v16i8(<16 x i8>* %p, <2 x double>* %q) { declare <2 x i64> @test_v2i64_f128_helper(fp128 %p) define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) { ; CHECK: ext - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = call <2 x i64> @test_v2i64_f128_helper(fp128 %2) %4 = add <2 x i64> %3, %3 @@ -691,7 +691,7 @@ declare <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %p) define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) { ; CHECK: ext ; CHECK: ext - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = call <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %2) %4 = add <2 x i64> %3, %3 @@ -705,7 +705,7 @@ define void @test_v2i64_v4f32(<4 x float>* %p, <2 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: ext - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = call <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %2) %4 = add <2 x i64> %3, %3 @@ -719,7 +719,7 @@ define void @test_v2i64_v4i32(<4 x i32>* %p, <2 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext ; CHECK: ext - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = call <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %2) %4 = add <2 x i64> %3, %3 @@ -733,7 +733,7 @@ define void @test_v2i64_v8i16(<8 x i16>* %p, <2 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext ; CHECK: ext - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = call <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %2) %4 = add <2 x i64> %3, %3 @@ -747,7 +747,7 @@ define void @test_v2i64_v16i8(<16 x i8>* %p, <2 x i64>* %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext ; CHECK: ext - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = call <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %2) %4 = add <2 x i64> %3, %3 @@ -760,7 +760,7 @@ declare <4 x float> @test_v4f32_f128_helper(fp128 %p) define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = call <4 x float> @test_v4f32_f128_helper(fp128 %2) %4 = fadd <4 x float> %3, %3 @@ -774,7 +774,7 @@ define void @test_v4f32_v2f64(<2 x double>* %p, <4 x float>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = call <4 x float> @test_v4f32_v2f64_helper(<2 x double> %2) %4 = fadd <4 x float> %3, %3 @@ -788,7 +788,7 @@ define void @test_v4f32_v2i64(<2 x i64>* %p, <4 x float>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = call <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %2) %4 = fadd <4 x float> %3, %3 @@ -803,7 +803,7 @@ define void @test_v4f32_v4i32(<4 x i32>* %p, <4 x float>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = call <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %2) %4 = fadd <4 x float> %3, %3 @@ -818,7 +818,7 @@ define void @test_v4f32_v8i16(<8 x i16>* %p, <4 x float>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = call <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %2) %4 = fadd <4 x float> %3, %3 @@ -833,7 +833,7 @@ define void @test_v4f32_v16i8(<16 x i8>* %p, <4 x float>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = call <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %2) %4 = fadd <4 x float> %3, %3 @@ -846,7 +846,7 @@ declare <4 x i32> @test_v4i32_f128_helper(fp128 %p) define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) { ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = call <4 x i32> @test_v4i32_f128_helper(fp128 %2) %4 = add <4 x i32> %3, %3 @@ -860,7 +860,7 @@ define void @test_v4i32_v2f64(<2 x double>* %p, <4 x i32>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = call <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %2) %4 = add <4 x i32> %3, %3 @@ -874,7 +874,7 @@ define void @test_v4i32_v2i64(<2 x i64>* %p, <4 x i32>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = call <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %2) %4 = add <4 x i32> %3, %3 @@ -889,7 +889,7 @@ define void @test_v4i32_v4f32(<4 x float>* %p, <4 x i32>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = call <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %2) %4 = add <4 x i32> %3, %3 @@ -904,7 +904,7 @@ define void @test_v4i32_v8i16(<8 x i16>* %p, <4 x i32>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = call <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %2) %4 = add <4 x i32> %3, %3 @@ -919,7 +919,7 @@ define void @test_v4i32_v16i8(<16 x i8>* %p, <4 x i32>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.4s ; CHECK: ext - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = call <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %2) %4 = add <4 x i32> %3, %3 @@ -932,7 +932,7 @@ declare <8 x i16> @test_v8i16_f128_helper(fp128 %p) define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) { ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = call <8 x i16> @test_v8i16_f128_helper(fp128 %2) %4 = add <8 x i16> %3, %3 @@ -946,7 +946,7 @@ define void @test_v8i16_v2f64(<2 x double>* %p, <8 x i16>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = call <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %2) %4 = add <8 x i16> %3, %3 @@ -960,7 +960,7 @@ define void @test_v8i16_v2i64(<2 x i64>* %p, <8 x i16>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = call <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %2) %4 = add <8 x i16> %3, %3 @@ -975,7 +975,7 @@ define void @test_v8i16_v4f32(<4 x float>* %p, <8 x i16>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = call <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %2) %4 = add <8 x i16> %3, %3 @@ -990,7 +990,7 @@ define void @test_v8i16_v4i32(<4 x i32>* %p, <8 x i16>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = call <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %2) %4 = add <8 x i16> %3, %3 @@ -1005,7 +1005,7 @@ define void @test_v8i16_v16i8(<16 x i8>* %p, <8 x i16>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.8h ; CHECK: ext - %1 = load <16 x i8>* %p + %1 = load <16 x i8>, <16 x i8>* %p %2 = add <16 x i8> %1, %1 %3 = call <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %2) %4 = add <8 x i16> %3, %3 @@ -1018,7 +1018,7 @@ declare <16 x i8> @test_v16i8_f128_helper(fp128 %p) define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) { ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load fp128* %p + %1 = load fp128, fp128* %p %2 = fadd fp128 %1, %1 %3 = call <16 x i8> @test_v16i8_f128_helper(fp128 %2) %4 = add <16 x i8> %3, %3 @@ -1032,7 +1032,7 @@ define void @test_v16i8_v2f64(<2 x double>* %p, <16 x i8>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <2 x double>* %p + %1 = load <2 x double>, <2 x double>* %p %2 = fadd <2 x double> %1, %1 %3 = call <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %2) %4 = add <16 x i8> %3, %3 @@ -1046,7 +1046,7 @@ define void @test_v16i8_v2i64(<2 x i64>* %p, <16 x i8>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <2 x i64>* %p + %1 = load <2 x i64>, <2 x i64>* %p %2 = add <2 x i64> %1, %1 %3 = call <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %2) %4 = add <16 x i8> %3, %3 @@ -1061,7 +1061,7 @@ define void @test_v16i8_v4f32(<4 x float>* %p, <16 x i8>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <4 x float>* %p + %1 = load <4 x float>, <4 x float>* %p %2 = fadd <4 x float> %1, %1 %3 = call <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %2) %4 = add <16 x i8> %3, %3 @@ -1076,7 +1076,7 @@ define void @test_v16i8_v4i32(<4 x i32>* %p, <16 x i8>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <4 x i32>* %p + %1 = load <4 x i32>, <4 x i32>* %p %2 = add <4 x i32> %1, %1 %3 = call <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %2) %4 = add <16 x i8> %3, %3 @@ -1091,7 +1091,7 @@ define void @test_v16i8_v8i16(<8 x i16>* %p, <16 x i8>* %q) { ; CHECK: ext ; CHECK: rev64 v{{[0-9]+}}.16b ; CHECK: ext - %1 = load <8 x i16>* %p + %1 = load <8 x i16>, <8 x i16>* %p %2 = add <8 x i16> %1, %1 %3 = call <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %2) %4 = add <16 x i8> %3, %3 diff --git a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll index d9d6b611972..5dca9294121 100644 --- a/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll +++ b/llvm/test/CodeGen/AArch64/arm64-bitfield-extract.ll @@ -12,7 +12,7 @@ define void @foo(%struct.X* nocapture %x, %struct.Y* nocapture %y) nounwind opts ; CHECK: ret %tmp = bitcast %struct.X* %x to i32* - %tmp1 = load i32* %tmp, align 4 + %tmp1 = load i32, i32* %tmp, align 4 %b = getelementptr inbounds %struct.Y, %struct.Y* %y, i64 0, i32 1 %bf.clear = lshr i32 %tmp1, 3 %bf.clear.lobit = and i32 %bf.clear, 1 @@ -46,7 +46,7 @@ define void @fct1(%struct.Z* nocapture %x, %struct.A* nocapture %y) nounwind opt ; CHECK: ret %tmp = bitcast %struct.Z* %x to i64* - %tmp1 = load i64* %tmp, align 4 + %tmp1 = load i64, i64* %tmp, align 4 %b = getelementptr inbounds %struct.A, %struct.A* %y, i64 0, i32 0 %bf.clear = lshr i64 %tmp1, 3 %bf.clear.lobit = and i64 %bf.clear, 1 @@ -77,7 +77,7 @@ entry: ; CHECK-NEXT: bfxil [[REG1]], x1, #16, #24 ; CHECK-NEXT: str [[REG1]], ; CHECK-NEXT: ret - %0 = load i64* %y, align 8 + %0 = load i64, i64* %y, align 8 %and = and i64 %0, -16777216 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 16777215 @@ -93,7 +93,7 @@ entry: ; CHECK-NEXT: bfxil [[REG1]], w1, #16, #3 ; CHECK-NEXT: str [[REG1]], ; CHECK-NEXT: ret - %0 = load i32* %y, align 8 + %0 = load i32, i32* %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 @@ -112,7 +112,7 @@ entry: ; CHECK-NEXT: lsr [[REG2:w[0-9]+]], [[REG1]], #2 ; CHECK-NEXT: str [[REG2]], ; CHECK-NEXT: ret - %0 = load i32* %y, align 8 + %0 = load i32, i32* %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 @@ -133,7 +133,7 @@ entry: ; CHECK-NEXT: lsl [[REG2:w[0-9]+]], [[REG1]], #2 ; CHECK-NEXT: str [[REG2]], ; CHECK-NEXT: ret - %0 = load i32* %y, align 8 + %0 = load i32, i32* %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 @@ -155,7 +155,7 @@ entry: ; CHECK-NEXT: lsr [[REG2:x[0-9]+]], [[REG1]], #2 ; CHECK-NEXT: str [[REG2]], ; CHECK-NEXT: ret - %0 = load i64* %y, align 8 + %0 = load i64, i64* %y, align 8 %and = and i64 %0, -8 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 @@ -177,7 +177,7 @@ entry: ; CHECK-NEXT: lsl [[REG2:x[0-9]+]], [[REG1]], #2 ; CHECK-NEXT: str [[REG2]], ; CHECK-NEXT: ret - %0 = load i64* %y, align 8 + %0 = load i64, i64* %y, align 8 %and = and i64 %0, -8 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 @@ -198,7 +198,7 @@ entry: ; CHECK-NEXT: lsl [[REG2:w[0-9]+]], [[REG1]], #2 ; CHECK-NEXT: str [[REG2]], ; CHECK-NEXT: ret - %0 = load i32* %y, align 8 + %0 = load i32, i32* %y, align 8 %and = and i32 %0, -8 %and1 = and i32 %x, 7 %or = or i32 %and, %and1 @@ -218,7 +218,7 @@ entry: ; CHECK-NEXT: lsl [[REG2:x[0-9]+]], [[REG1]], #2 ; CHECK-NEXT: str [[REG2]], ; CHECK-NEXT: ret - %0 = load i64* %y, align 8 + %0 = load i64, i64* %y, align 8 %and = and i64 %0, -8 %and1 = and i64 %x, 7 %or = or i64 %and, %and1 @@ -247,7 +247,7 @@ entry: ; CHECK-NEXT: ubfx [[REG2:w[0-9]+]], [[REG1]], #2, #28 ; CHECK-NEXT: str [[REG2]], ; CHECK-NEXT: ret - %0 = load i32* %y, align 8 + %0 = load i32, i32* %y, align 8 %and = and i32 %0, -8 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 @@ -270,7 +270,7 @@ entry: ; CHECK-NEXT: ubfx [[REG2:x[0-9]+]], [[REG1]], #2, #60 ; CHECK-NEXT: str [[REG2]], ; CHECK-NEXT: ret - %0 = load i64* %y, align 8 + %0 = load i64, i64* %y, align 8 %and = and i64 %0, -8 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 @@ -296,7 +296,7 @@ entry: ; CHECK-NEXT: lsl [[REG3:w[0-9]+]], [[REG2]], #2 ; CHECK-NEXT: str [[REG3]], ; CHECK-NEXT: ret - %0 = load i32* %y, align 8 + %0 = load i32, i32* %y, align 8 %and = and i32 %0, -256 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 255 @@ -326,7 +326,7 @@ entry: ; CHECK-NEXT: lsl [[REG3:x[0-9]+]], [[REG2]], #2 ; CHECK-NEXT: str [[REG3]], ; CHECK-NEXT: ret - %0 = load i64* %y, align 8 + %0 = load i64, i64* %y, align 8 %and = and i64 %0, -256 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 255 @@ -357,7 +357,7 @@ entry: ; CHECK-NEXT: ubfx [[REG3:w[0-9]+]], [[REG2]], #2, #28 ; CHECK-NEXT: str [[REG3]], ; CHECK-NEXT: ret - %0 = load i32* %y, align 8 + %0 = load i32, i32* %y, align 8 %and = and i32 %0, 1737056 %shr = lshr i32 %x, 16 %and1 = and i32 %shr, 7 @@ -386,7 +386,7 @@ entry: ; CHECK-NEXT: ubfx [[REG3:x[0-9]+]], [[REG2]], #2, #60 ; CHECK-NEXT: str [[REG3]], ; CHECK-NEXT: ret - %0 = load i64* %y, align 8 + %0 = load i64, i64* %y, align 8 %and = and i64 %0, 1737056 %shr = lshr i64 %x, 16 %and1 = and i64 %shr, 7 @@ -422,7 +422,7 @@ entry: if.then: ; preds = %entry %arrayidx3 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift - %0 = load i8* %arrayidx3, align 1 + %0 = load i8, i8* %arrayidx3, align 1 %conv = zext i8 %0 to i32 br label %return @@ -444,7 +444,7 @@ if.then7: ; preds = %if.end ; CHECK-NOT: ubfm %idxprom10 = and i64 %x.sroa.3.0.extract.shift, 65535 %arrayidx11 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom10 - %1 = load i8* %arrayidx11, align 1 + %1 = load i8, i8* %arrayidx11, align 1 %conv12 = zext i8 %1 to i32 %add = add nsw i32 %conv12, 16 br label %return @@ -467,7 +467,7 @@ if.then17: ; preds = %if.end13 ; CHECK-NOT: ubfm %idxprom20 = and i64 %x.sroa.1.0.extract.shift, 65535 %arrayidx21 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom20 - %2 = load i8* %arrayidx21, align 1 + %2 = load i8, i8* %arrayidx21, align 1 %conv22 = zext i8 %2 to i32 %add23 = add nsw i32 %conv22, 32 br label %return @@ -510,7 +510,7 @@ entry: %shr = lshr i64 %x, 4 %and = and i64 %shr, 15 %arrayidx = getelementptr inbounds [8 x [64 x i64]], [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 %and - %0 = load i64* %arrayidx, align 8 + %0 = load i64, i64* %arrayidx, align 8 ret i64 %0 } diff --git a/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll b/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll index ac4f19e65df..5df84021635 100644 --- a/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll +++ b/llvm/test/CodeGen/AArch64/arm64-blockaddress.ll @@ -25,6 +25,6 @@ entry: br label %mylabel mylabel: - %tmp = load volatile i64* %recover, align 8 + %tmp = load volatile i64, i64* %recover, align 8 ret i64 %tmp } diff --git a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll index 487c1d9bec3..71d932787ce 100644 --- a/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll +++ b/llvm/test/CodeGen/AArch64/arm64-call-tailcalls.ll @@ -9,7 +9,7 @@ define void @t2() { ; CHECK: ldr x[[ADDR:[0-9]+]], [x[[GOTADDR]], _t@GOTPAGEOFF] ; CHECK: ldr x[[DEST:[0-9]+]], [x[[ADDR]]] ; CHECK: br x[[DEST]] - %tmp = load i32 ()** @t + %tmp = load i32 ()*, i32 ()** @t %tmp.upgrd.2 = tail call i32 %tmp() ret void } diff --git a/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll b/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll index c701db2dbd9..463add5688e 100644 --- a/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll +++ b/llvm/test/CodeGen/AArch64/arm64-cast-opt.ll @@ -11,12 +11,12 @@ define zeroext i8 @foo(i32 %i1, i32 %i2) { ; CHECK-NOT: and entry: %idxprom = sext i32 %i1 to i64 - %0 = load i8** @block, align 8 + %0 = load i8*, i8** @block, align 8 %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom - %1 = load i8* %arrayidx, align 1 + %1 = load i8, i8* %arrayidx, align 1 %idxprom1 = sext i32 %i2 to i64 %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1 - %2 = load i8* %arrayidx2, align 1 + %2 = load i8, i8* %arrayidx2, align 1 %cmp = icmp eq i8 %1, %2 br i1 %cmp, label %return, label %if.then diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll index 730aabf9ae2..4e47ab6c03f 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll @@ -21,7 +21,7 @@ target triple = "arm64-apple-ios7.0.0" ; CHECK-NEXT b.cc define i32 @Maze1() nounwind ssp { entry: - %0 = load i64* @channelColumns, align 8, !tbaa !0 + %0 = load i64, i64* @channelColumns, align 8, !tbaa !0 %cmp90 = icmp eq i64 %0, 0 br i1 %cmp90, label %for.end, label %for.body @@ -29,51 +29,51 @@ for.body: ; preds = %for.inc, %entry %1 = phi i64 [ %0, %entry ], [ %37, %for.inc ] %i.092 = phi i64 [ 1, %entry ], [ %inc53, %for.inc ] %numLeft.091 = phi i32 [ 0, %entry ], [ %numLeft.1, %for.inc ] - %2 = load i8** @mazeRoute, align 8, !tbaa !3 + %2 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 %arrayidx = getelementptr inbounds i8, i8* %2, i64 %i.092 - %3 = load i8* %arrayidx, align 1, !tbaa !1 + %3 = load i8, i8* %arrayidx, align 1, !tbaa !1 %tobool = icmp eq i8 %3, 0 br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %4 = load i64** @TOP, align 8, !tbaa !3 + %4 = load i64*, i64** @TOP, align 8, !tbaa !3 %arrayidx1 = getelementptr inbounds i64, i64* %4, i64 %i.092 - %5 = load i64* %arrayidx1, align 8, !tbaa !0 - %6 = load i64** @netsAssign, align 8, !tbaa !3 + %5 = load i64, i64* %arrayidx1, align 8, !tbaa !0 + %6 = load i64*, i64** @netsAssign, align 8, !tbaa !3 %arrayidx2 = getelementptr inbounds i64, i64* %6, i64 %5 - %7 = load i64* %arrayidx2, align 8, !tbaa !0 - %8 = load i64** @BOT, align 8, !tbaa !3 + %7 = load i64, i64* %arrayidx2, align 8, !tbaa !0 + %8 = load i64*, i64** @BOT, align 8, !tbaa !3 %arrayidx3 = getelementptr inbounds i64, i64* %8, i64 %i.092 - %9 = load i64* %arrayidx3, align 8, !tbaa !0 + %9 = load i64, i64* %arrayidx3, align 8, !tbaa !0 %arrayidx4 = getelementptr inbounds i64, i64* %6, i64 %9 - %10 = load i64* %arrayidx4, align 8, !tbaa !0 + %10 = load i64, i64* %arrayidx4, align 8, !tbaa !0 %cmp5 = icmp ugt i64 %i.092, 1 %cmp6 = icmp ugt i64 %10, 1 %or.cond = and i1 %cmp5, %cmp6 br i1 %or.cond, label %land.lhs.true7, label %if.else land.lhs.true7: ; preds = %if.then - %11 = load i64* @channelTracks, align 8, !tbaa !0 + %11 = load i64, i64* @channelTracks, align 8, !tbaa !0 %add = add i64 %11, 1 %call = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add, i64 %10, i64 0, i64 %7, i32 -1, i32 -1) %tobool8 = icmp eq i32 %call, 0 br i1 %tobool8, label %land.lhs.true7.if.else_crit_edge, label %if.then9 land.lhs.true7.if.else_crit_edge: ; preds = %land.lhs.true7 - %.pre = load i64* @channelColumns, align 8, !tbaa !0 + %.pre = load i64, i64* @channelColumns, align 8, !tbaa !0 br label %if.else if.then9: ; preds = %land.lhs.true7 - %12 = load i8** @mazeRoute, align 8, !tbaa !3 + %12 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 %arrayidx10 = getelementptr inbounds i8, i8* %12, i64 %i.092 store i8 0, i8* %arrayidx10, align 1, !tbaa !1 - %13 = load i64** @TOP, align 8, !tbaa !3 + %13 = load i64*, i64** @TOP, align 8, !tbaa !3 %arrayidx11 = getelementptr inbounds i64, i64* %13, i64 %i.092 - %14 = load i64* %arrayidx11, align 8, !tbaa !0 + %14 = load i64, i64* %arrayidx11, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %14) - %15 = load i64** @BOT, align 8, !tbaa !3 + %15 = load i64*, i64** @BOT, align 8, !tbaa !3 %arrayidx12 = getelementptr inbounds i64, i64* %15, i64 %i.092 - %16 = load i64* %arrayidx12, align 8, !tbaa !0 + %16 = load i64, i64* %arrayidx12, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %16) br label %for.inc @@ -84,23 +84,23 @@ if.else: ; preds = %land.lhs.true7.if.e br i1 %or.cond89, label %land.lhs.true16, label %if.else24 land.lhs.true16: ; preds = %if.else - %18 = load i64* @channelTracks, align 8, !tbaa !0 + %18 = load i64, i64* @channelTracks, align 8, !tbaa !0 %add17 = add i64 %18, 1 %call18 = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add17, i64 %10, i64 0, i64 %7, i32 1, i32 -1) %tobool19 = icmp eq i32 %call18, 0 br i1 %tobool19, label %if.else24, label %if.then20 if.then20: ; preds = %land.lhs.true16 - %19 = load i8** @mazeRoute, align 8, !tbaa !3 + %19 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 %arrayidx21 = getelementptr inbounds i8, i8* %19, i64 %i.092 store i8 0, i8* %arrayidx21, align 1, !tbaa !1 - %20 = load i64** @TOP, align 8, !tbaa !3 + %20 = load i64*, i64** @TOP, align 8, !tbaa !3 %arrayidx22 = getelementptr inbounds i64, i64* %20, i64 %i.092 - %21 = load i64* %arrayidx22, align 8, !tbaa !0 + %21 = load i64, i64* %arrayidx22, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %21) - %22 = load i64** @BOT, align 8, !tbaa !3 + %22 = load i64*, i64** @BOT, align 8, !tbaa !3 %arrayidx23 = getelementptr inbounds i64, i64* %22, i64 %i.092 - %23 = load i64* %arrayidx23, align 8, !tbaa !0 + %23 = load i64, i64* %arrayidx23, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %23) br label %for.inc @@ -108,7 +108,7 @@ if.else24: ; preds = %land.lhs.true16, %i br i1 %cmp5, label %land.lhs.true26, label %if.else36 land.lhs.true26: ; preds = %if.else24 - %24 = load i64* @channelTracks, align 8, !tbaa !0 + %24 = load i64, i64* @channelTracks, align 8, !tbaa !0 %cmp27 = icmp ult i64 %7, %24 br i1 %cmp27, label %land.lhs.true28, label %if.else36 @@ -119,26 +119,26 @@ land.lhs.true28: ; preds = %land.lhs.true26 br i1 %tobool31, label %if.else36, label %if.then32 if.then32: ; preds = %land.lhs.true28 - %25 = load i8** @mazeRoute, align 8, !tbaa !3 + %25 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 %arrayidx33 = getelementptr inbounds i8, i8* %25, i64 %i.092 store i8 0, i8* %arrayidx33, align 1, !tbaa !1 - %26 = load i64** @TOP, align 8, !tbaa !3 + %26 = load i64*, i64** @TOP, align 8, !tbaa !3 %arrayidx34 = getelementptr inbounds i64, i64* %26, i64 %i.092 - %27 = load i64* %arrayidx34, align 8, !tbaa !0 + %27 = load i64, i64* %arrayidx34, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %27) - %28 = load i64** @BOT, align 8, !tbaa !3 + %28 = load i64*, i64** @BOT, align 8, !tbaa !3 %arrayidx35 = getelementptr inbounds i64, i64* %28, i64 %i.092 - %29 = load i64* %arrayidx35, align 8, !tbaa !0 + %29 = load i64, i64* %arrayidx35, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %29) br label %for.inc if.else36: ; preds = %land.lhs.true28, %land.lhs.true26, %if.else24 - %30 = load i64* @channelColumns, align 8, !tbaa !0 + %30 = load i64, i64* @channelColumns, align 8, !tbaa !0 %cmp37 = icmp ult i64 %i.092, %30 br i1 %cmp37, label %land.lhs.true38, label %if.else48 land.lhs.true38: ; preds = %if.else36 - %31 = load i64* @channelTracks, align 8, !tbaa !0 + %31 = load i64, i64* @channelTracks, align 8, !tbaa !0 %cmp39 = icmp ult i64 %7, %31 br i1 %cmp39, label %land.lhs.true40, label %if.else48 @@ -149,16 +149,16 @@ land.lhs.true40: ; preds = %land.lhs.true38 br i1 %tobool43, label %if.else48, label %if.then44 if.then44: ; preds = %land.lhs.true40 - %32 = load i8** @mazeRoute, align 8, !tbaa !3 + %32 = load i8*, i8** @mazeRoute, align 8, !tbaa !3 %arrayidx45 = getelementptr inbounds i8, i8* %32, i64 %i.092 store i8 0, i8* %arrayidx45, align 1, !tbaa !1 - %33 = load i64** @TOP, align 8, !tbaa !3 + %33 = load i64*, i64** @TOP, align 8, !tbaa !3 %arrayidx46 = getelementptr inbounds i64, i64* %33, i64 %i.092 - %34 = load i64* %arrayidx46, align 8, !tbaa !0 + %34 = load i64, i64* %arrayidx46, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %34) - %35 = load i64** @BOT, align 8, !tbaa !3 + %35 = load i64*, i64** @BOT, align 8, !tbaa !3 %arrayidx47 = getelementptr inbounds i64, i64* %35, i64 %i.092 - %36 = load i64* %arrayidx47, align 8, !tbaa !0 + %36 = load i64, i64* %arrayidx47, align 8, !tbaa !0 tail call fastcc void @CleanNet(i64 %36) br label %for.inc @@ -169,7 +169,7 @@ if.else48: ; preds = %land.lhs.true40, %l for.inc: ; preds = %if.else48, %if.then44, %if.then32, %if.then20, %if.then9, %for.body %numLeft.1 = phi i32 [ %numLeft.091, %if.then9 ], [ %numLeft.091, %if.then20 ], [ %numLeft.091, %if.then32 ], [ %numLeft.091, %if.then44 ], [ %inc, %if.else48 ], [ %numLeft.091, %for.body ] %inc53 = add i64 %i.092, 1 - %37 = load i64* @channelColumns, align 8, !tbaa !0 + %37 = load i64, i64* @channelColumns, align 8, !tbaa !0 %cmp = icmp ugt i64 %inc53, %37 br i1 %cmp, label %for.end, label %for.body diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll index 4c403134617..ff18f736433 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll @@ -283,7 +283,7 @@ sw.bb.i.i: %ref.tr.i.i = phi %str1* [ %0, %sw.bb.i.i ], [ undef, %entry ] %operands.i.i = getelementptr inbounds %str1, %str1* %ref.tr.i.i, i64 0, i32 0, i32 2 %arrayidx.i.i = bitcast i32* %operands.i.i to %str1** - %0 = load %str1** %arrayidx.i.i, align 8 + %0 = load %str1*, %str1** %arrayidx.i.i, align 8 %code1.i.i.phi.trans.insert = getelementptr inbounds %str1, %str1* %0, i64 0, i32 0, i32 0, i64 16 br label %sw.bb.i.i } diff --git a/llvm/test/CodeGen/AArch64/arm64-code-model-large-abs.ll b/llvm/test/CodeGen/AArch64/arm64-code-model-large-abs.ll index 264da2da25b..9f50fea370e 100644 --- a/llvm/test/CodeGen/AArch64/arm64-code-model-large-abs.ll +++ b/llvm/test/CodeGen/AArch64/arm64-code-model-large-abs.ll @@ -18,7 +18,7 @@ define i8* @global_addr() { define i8 @global_i8() { ; CHECK-LABEL: global_i8: - %val = load i8* @var8 + %val = load i8, i8* @var8 ret i8 %val ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8 ; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8 @@ -29,7 +29,7 @@ define i8 @global_i8() { define i16 @global_i16() { ; CHECK-LABEL: global_i16: - %val = load i16* @var16 + %val = load i16, i16* @var16 ret i16 %val ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16 ; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16 @@ -40,7 +40,7 @@ define i16 @global_i16() { define i32 @global_i32() { ; CHECK-LABEL: global_i32: - %val = load i32* @var32 + %val = load i32, i32* @var32 ret i32 %val ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32 ; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32 @@ -51,7 +51,7 @@ define i32 @global_i32() { define i64 @global_i64() { ; CHECK-LABEL: global_i64: - %val = load i64* @var64 + %val = load i64, i64* @var64 ret i64 %val ; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64 ; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64 diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll index 2a590f93b0d..e34ef39bcfe 100644 --- a/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll +++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll @@ -22,13 +22,13 @@ define void @foo() { entry: br label %if.then83 if.then83: ; preds = %if.end81 - %tmp = load %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8 + %tmp = load %"class.H4ISP::H4ISPDevice"*, %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8 %call84 = call i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(%"class.H4ISP::H4ISPDevice"* %tmp) #19 tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27}"() - %tmp2 = load %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8 + %tmp2 = load %"class.H4ISP::H4ISPDevice"*, %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8 tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x28}"() %pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice", %"class.H4ISP::H4ISPDevice"* %tmp2, i64 0, i32 3 - %tmp3 = load %"class.H4ISP::H4ISPCameraManager"** %pCameraManager.i268, align 8 + %tmp3 = load %"class.H4ISP::H4ISPCameraManager"*, %"class.H4ISP::H4ISPCameraManager"** %pCameraManager.i268, align 8 %tobool.i269 = icmp eq %"class.H4ISP::H4ISPCameraManager"* %tmp3, null br i1 %tobool.i269, label %if.then83, label %end end: diff --git a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll index 6d73daac620..c0aa63cc433 100644 --- a/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll +++ b/llvm/test/CodeGen/AArch64/arm64-collect-loh.ll @@ -12,7 +12,7 @@ ; Function Attrs: noinline nounwind ssp define void @foo(i32 %t) { entry: - %tmp = load i32* @a, align 4 + %tmp = load i32, i32* @a, align 4 %add = add nsw i32 %tmp, %t store i32 %add, i32* @a, align 4 ret void @@ -32,22 +32,22 @@ entry: br i1 %cmp, label %if.then, label %if.end4 if.then: ; preds = %entry - %tmp = load i32* @a, align 4 + %tmp = load i32, i32* @a, align 4 %add = add nsw i32 %tmp, %t %cmp1 = icmp sgt i32 %add, 12 br i1 %cmp1, label %if.then2, label %if.end4 if.then2: ; preds = %if.then tail call void @foo(i32 %add) - %tmp1 = load i32* @a, align 4 + %tmp1 = load i32, i32* @a, align 4 br label %if.end4 if.end4: ; preds = %if.then2, %if.then, %entry %t.addr.0 = phi i32 [ %tmp1, %if.then2 ], [ %t, %if.then ], [ %t, %entry ] - %tmp2 = load i32* @b, align 4 + %tmp2 = load i32, i32* @b, align 4 %add5 = add nsw i32 %tmp2, %t.addr.0 tail call void @foo(i32 %add5) - %tmp3 = load i32* @b, align 4 + %tmp3 = load i32, i32* @b, align 4 %add6 = add nsw i32 %tmp3, %t.addr.0 ret i32 %add6 } diff --git a/llvm/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll b/llvm/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll index bc66e1fc34a..938bc62808f 100644 --- a/llvm/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll +++ b/llvm/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll @@ -9,9 +9,9 @@ define void @store_combine() nounwind { %dst = alloca { double, double }, align 8 %src.realp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 0 - %src.real = load double* %src.realp + %src.real = load double, double* %src.realp %src.imagp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 1 - %src.imag = load double* %src.imagp + %src.imag = load double, double* %src.imagp %dst.realp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 0 %dst.imagp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 1 diff --git a/llvm/test/CodeGen/AArch64/arm64-const-addr.ll b/llvm/test/CodeGen/AArch64/arm64-const-addr.ll index 834e9be8cd0..ffc153344d3 100644 --- a/llvm/test/CodeGen/AArch64/arm64-const-addr.ll +++ b/llvm/test/CodeGen/AArch64/arm64-const-addr.ll @@ -11,12 +11,12 @@ define i32 @test1() nounwind { ; CHECK: ldr w8, [x8, #12] %at = inttoptr i64 68141056 to %T* %o1 = getelementptr %T, %T* %at, i32 0, i32 1 - %t1 = load i32* %o1 + %t1 = load i32, i32* %o1 %o2 = getelementptr %T, %T* %at, i32 0, i32 2 - %t2 = load i32* %o2 + %t2 = load i32, i32* %o2 %a1 = add i32 %t1, %t2 %o3 = getelementptr %T, %T* %at, i32 0, i32 3 - %t3 = load i32* %o3 + %t3 = load i32, i32* %o3 %a2 = add i32 %a1, %t3 ret i32 %a2 } diff --git a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll index 7123e5e0b23..c6b7d835780 100644 --- a/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll +++ b/llvm/test/CodeGen/AArch64/arm64-convert-v4f64.ll @@ -8,7 +8,7 @@ define <4 x i16> @fptosi_v4f64_to_v4i16(<4 x double>* %ptr) { ; CHECK-DAG: xtn v[[LHS_NA:[0-9]+]].2s, v[[LHS]].2d ; CHECK-DAG: xtn v[[RHS_NA:[0-9]+]].2s, v[[RHS]].2d ; CHECK: uzp1 v0.4h, v[[RHS_NA]].4h, v[[LHS_NA]].4h - %tmp1 = load <4 x double>* %ptr + %tmp1 = load <4 x double>, <4 x double>* %ptr %tmp2 = fptosi <4 x double> %tmp1 to <4 x i16> ret <4 x i16> %tmp2 } @@ -26,7 +26,7 @@ define <8 x i8> @fptosi_v4f64_to_v4i8(<8 x double>* %ptr) { ; CHECK-DAG: uzp1 v[[TMP1:[0-9]+]].4h, v[[CONV2]].4h, v[[CONV3]].4h ; CHECK-DAG: uzp1 v[[TMP2:[0-9]+]].4h, v[[CONV0]].4h, v[[CONV1]].4h ; CHECK: uzp1 v0.8b, v[[TMP2]].8b, v[[TMP1]].8b - %tmp1 = load <8 x double>* %ptr + %tmp1 = load <8 x double>, <8 x double>* %ptr %tmp2 = fptosi <8 x double> %tmp1 to <8 x i8> ret <8 x i8> %tmp2 } diff --git a/llvm/test/CodeGen/AArch64/arm64-cse.ll b/llvm/test/CodeGen/AArch64/arm64-cse.ll index cefdec86fae..8d4bf5dbeb7 100644 --- a/llvm/test/CodeGen/AArch64/arm64-cse.ll +++ b/llvm/test/CodeGen/AArch64/arm64-cse.ll @@ -15,7 +15,7 @@ entry: ; CHECK: sub ; CHECK-NOT: sub ; CHECK: ret - %0 = load i32* %offset, align 4 + %0 = load i32, i32* %offset, align 4 %cmp = icmp slt i32 %0, %size %s = sub nsw i32 %0, %size br i1 %cmp, label %return, label %if.end @@ -43,7 +43,7 @@ entry: ; CHECK: b.lt ; CHECK-NOT: sub ; CHECK: ret - %0 = load i32* %offset, align 4 + %0 = load i32, i32* %offset, align 4 %cmp = icmp slt i32 %0, 1 br i1 %cmp, label %return, label %if.end diff --git a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll index 424e1e7785d..37f3504be93 100644 --- a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll +++ b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll @@ -19,7 +19,7 @@ define void @test(%"struct.SU"* nocapture %su) { entry: %r1 = getelementptr inbounds %"struct.SU", %"struct.SU"* %su, i64 1, i32 5 %r2 = bitcast %"struct.BO"* %r1 to i48* - %r3 = load i48* %r2, align 8 + %r3 = load i48, i48* %r2, align 8 %r4 = and i48 %r3, -4294967296 %r5 = or i48 0, %r4 store i48 %r5, i48* %r2, align 8 diff --git a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll index 1109840541d..09483ea09bd 100644 --- a/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll +++ b/llvm/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll @@ -16,7 +16,7 @@ define void @test(%class.Complex* nocapture %out, i64 %out_start) { entry: %arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start %0 = bitcast %class.Complex* %arrayidx to i64* - %1 = load i64* %0, align 4 + %1 = load i64, i64* %0, align 4 %t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32 %2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float %t0.sroa.2.0.extract.shift = lshr i64 %1, 32 @@ -25,11 +25,11 @@ entry: %add = add i64 %out_start, 8 %arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add %i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0 - %4 = load float* %i.i, align 4 + %4 = load float, float* %i.i, align 4 %add.i = fadd float %4, %2 %retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0 %r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1 - %5 = load float* %r.i, align 4 + %5 = load float, float* %r.i, align 4 %add5.i = fadd float %5, %3 %retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1 %ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>* @@ -48,7 +48,7 @@ define void @test_int(%class.Complex_int* nocapture %out, i64 %out_start) { entry: %arrayidx = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %out_start %0 = bitcast %class.Complex_int* %arrayidx to i64* - %1 = load i64* %0, align 4 + %1 = load i64, i64* %0, align 4 %t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32 %2 = bitcast i32 %t0.sroa.0.0.extract.trunc to i32 %t0.sroa.2.0.extract.shift = lshr i64 %1, 32 @@ -57,11 +57,11 @@ entry: %add = add i64 %out_start, 8 %arrayidx2 = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %add %i.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 0 - %4 = load i32* %i.i, align 4 + %4 = load i32, i32* %i.i, align 4 %add.i = add i32 %4, %2 %retval.sroa.0.0.vec.insert.i = insertelement <2 x i32> undef, i32 %add.i, i32 0 %r.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 1 - %5 = load i32* %r.i, align 4 + %5 = load i32, i32* %r.i, align 4 %add5.i = add i32 %5, %3 %retval.sroa.0.4.vec.insert.i = insertelement <2 x i32> %retval.sroa.0.0.vec.insert.i, i32 %add5.i, i32 1 %ref.tmp.sroa.0.0.cast = bitcast %class.Complex_int* %arrayidx to <2 x i32>* @@ -80,7 +80,7 @@ define void @test_long(%class.Complex_long* nocapture %out, i64 %out_start) { entry: %arrayidx = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %out_start %0 = bitcast %class.Complex_long* %arrayidx to i128* - %1 = load i128* %0, align 4 + %1 = load i128, i128* %0, align 4 %t0.sroa.0.0.extract.trunc = trunc i128 %1 to i64 %2 = bitcast i64 %t0.sroa.0.0.extract.trunc to i64 %t0.sroa.2.0.extract.shift = lshr i128 %1, 64 @@ -89,11 +89,11 @@ entry: %add = add i64 %out_start, 8 %arrayidx2 = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %add %i.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 0 - %4 = load i64* %i.i, align 4 + %4 = load i64, i64* %i.i, align 4 %add.i = add i64 %4, %2 %retval.sroa.0.0.vec.insert.i = insertelement <2 x i64> undef, i64 %add.i, i32 0 %r.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 1 - %5 = load i64* %r.i, align 4 + %5 = load i64, i64* %r.i, align 4 %add5.i = add i64 %5, %3 %retval.sroa.0.4.vec.insert.i = insertelement <2 x i64> %retval.sroa.0.0.vec.insert.i, i64 %add5.i, i32 1 %ref.tmp.sroa.0.0.cast = bitcast %class.Complex_long* %arrayidx to <2 x i64>* diff --git a/llvm/test/CodeGen/AArch64/arm64-dup.ll b/llvm/test/CodeGen/AArch64/arm64-dup.ll index 0c56b46c417..849e227116f 100644 --- a/llvm/test/CodeGen/AArch64/arm64-dup.ll +++ b/llvm/test/CodeGen/AArch64/arm64-dup.ll @@ -165,7 +165,7 @@ define <4 x float> @v_shuffledupQfloat(float %A) nounwind { define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind { ;CHECK-LABEL: vduplane8: ;CHECK: dup.8b - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 > ret <8 x i8> %tmp2 } @@ -173,7 +173,7 @@ define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind { define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: vduplane16: ;CHECK: dup.4h - %tmp1 = load <4 x i16>* %A + %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ret <4 x i16> %tmp2 } @@ -181,7 +181,7 @@ define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind { define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind { ;CHECK-LABEL: vduplane32: ;CHECK: dup.2s - %tmp1 = load <2 x i32>* %A + %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 > ret <2 x i32> %tmp2 } @@ -189,7 +189,7 @@ define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind { define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind { ;CHECK-LABEL: vduplanefloat: ;CHECK: dup.2s - %tmp1 = load <2 x float>* %A + %tmp1 = load <2 x float>, <2 x float>* %A %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 > ret <2 x float> %tmp2 } @@ -197,7 +197,7 @@ define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind { define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind { ;CHECK-LABEL: vduplaneQ8: ;CHECK: dup.16b - %tmp1 = load <8 x i8>* %A + %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 > ret <16 x i8> %tmp2 } @@ -205,7 +205,7 @@ define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind { define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: vduplaneQ16: ;CHECK: dup.8h - %tmp1 = load <4 x i16>* %A + %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 > ret <8 x i16> %tmp2 } @@ -213,7 +213,7 @@ define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind { define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind { ;CHECK-LABEL: vduplaneQ32: ;CHECK: dup.4s - %tmp1 = load <2 x i32>* %A + %tmp1 = load <2 x i32>, <2 x i32>* %A %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ret <4 x i32> %tmp2 } @@ -221,7 +221,7 @@ define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind { define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind { ;CHECK-LABEL: vduplaneQfloat: ;CHECK: dup.4s - %tmp1 = load <2 x float>* %A + %tmp1 = load <2 x float>, <2 x float>* %A %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 > ret <4 x float> %tmp2 } diff --git a/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll b/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll index 795ad700898..8164f46664b 100644 --- a/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll +++ b/llvm/test/CodeGen/AArch64/arm64-early-ifcvt.ll @@ -15,7 +15,7 @@ do.body: %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ] %p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ] %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 1 - %0 = load i32* %p.addr.0, align 4 + %0 = load i32, i32* %p.addr.0, align 4 %cmp = icmp sgt i32 %0, %max.0 br i1 %cmp, label %do.cond, label %if.else @@ -400,7 +400,7 @@ entry: br label %for.body for.body: - %x0 = load i32* undef, align 4 + %x0 = load i32, i32* undef, align 4 br i1 undef, label %if.then.i146, label %is_sbox.exit155 if.then.i146: @@ -413,7 +413,7 @@ is_sbox.exit155: ; preds = %if.then.i146, %for. %seg_offset.0.i151 = phi i32 [ %add9.i145, %if.then.i146 ], [ undef, %for.body ] %idxprom15.i152 = sext i32 %seg_offset.0.i151 to i64 %arrayidx18.i154 = getelementptr inbounds i32, i32* null, i64 %idxprom15.i152 - %x1 = load i32* %arrayidx18.i154, align 4 + %x1 = load i32, i32* %arrayidx18.i154, align 4 br i1 undef, label %for.body51, label %for.body for.body51: ; preds = %is_sbox.exit155 diff --git a/llvm/test/CodeGen/AArch64/arm64-elf-globals.ll b/llvm/test/CodeGen/AArch64/arm64-elf-globals.ll index 025aea11446..b1d5524aee8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-elf-globals.ll +++ b/llvm/test/CodeGen/AArch64/arm64-elf-globals.ll @@ -9,7 +9,7 @@ @var64 = external global i64, align 8 define i8 @test_i8(i8 %new) { - %val = load i8* @var8, align 1 + %val = load i8, i8* @var8, align 1 store i8 %new, i8* @var8 ret i8 %val ; CHECK-LABEL: test_i8: @@ -31,7 +31,7 @@ define i8 @test_i8(i8 %new) { } define i16 @test_i16(i16 %new) { - %val = load i16* @var16, align 2 + %val = load i16, i16* @var16, align 2 store i16 %new, i16* @var16 ret i16 %val ; CHECK-LABEL: test_i16: @@ -44,7 +44,7 @@ define i16 @test_i16(i16 %new) { } define i32 @test_i32(i32 %new) { - %val = load i32* @var32, align 4 + %val = load i32, i32* @var32, align 4 store i32 %new, i32* @var32 ret i32 %val ; CHECK-LABEL: test_i32: @@ -57,7 +57,7 @@ define i32 @test_i32(i32 %new) { } define i64 @test_i64(i64 %new) { - %val = load i64* @var64, align 8 + %val = load i64, i64* @var64, align 8 store i64 %new, i64* @var64 ret i64 %val ; CHECK-LABEL: test_i64: @@ -83,8 +83,8 @@ define i64* @test_addr() { @protectedvar = protected global i32 0, align 4 define i32 @test_vis() { - %lhs = load i32* @hiddenvar, align 4 - %rhs = load i32* @protectedvar, align 4 + %lhs = load i32, i32* @hiddenvar, align 4 + %rhs = load i32, i32* @protectedvar, align 4 %ret = add i32 %lhs, %rhs ret i32 %ret ; CHECK-PIC: adrp {{x[0-9]+}}, hiddenvar @@ -97,7 +97,7 @@ define i32 @test_vis() { define i32 @test_default_align() { %addr = getelementptr [2 x i32], [2 x i32]* @var_default, i32 0, i32 0 - %val = load i32* %addr + %val = load i32, i32* %addr ret i32 %val ; CHECK-LABEL: test_default_align: ; CHECK: adrp x[[HIREG:[0-9]+]], var_default @@ -106,7 +106,7 @@ define i32 @test_default_align() { define i64 @test_default_unaligned() { %addr = bitcast [2 x i32]* @var_default to i64* - %val = load i64* %addr + %val = load i64, i64* %addr ret i64 %val ; CHECK-LABEL: test_default_unaligned: ; CHECK: adrp [[HIREG:x[0-9]+]], var_default diff --git a/llvm/test/CodeGen/AArch64/arm64-ext.ll b/llvm/test/CodeGen/AArch64/arm64-ext.ll index 67860de51b0..8315ffcfb07 100644 --- a/llvm/test/CodeGen/AArch64/arm64-ext.ll +++ b/llvm/test/CodeGen/AArch64/arm64-ext.ll @@ -3,8 +3,8 @@ define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextd: ;CHECK: {{ext.8b.*#3}} - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B + %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> ret <8 x i8> %tmp3 } @@ -12,8 +12,8 @@ define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextRd: ;CHECK: {{ext.8b.*#5}} - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B + %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> ret <8 x i8> %tmp3 } @@ -21,8 +21,8 @@ define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextq: ;CHECK: {{ext.16b.*3}} - %tmp1 = load <16 x i8>* %A - %tmp2 = load <16 x i8>* %B + %tmp1 = load <16 x i8>, <16 x i8>* %A + %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> ret <16 x i8> %tmp3 } @@ -30,8 +30,8 @@ define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextRq: ;CHECK: {{ext.16b.*7}} - %tmp1 = load <16 x i8>* %A - %tmp2 = load <16 x i8>* %B + %tmp1 = load <16 x i8>, <16 x i8>* %A + %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> ret <16 x i8> %tmp3 } @@ -39,8 +39,8 @@ define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind { ;CHECK-LABEL: test_vextd16: ;CHECK: {{ext.8b.*#6}} - %tmp1 = load <4 x i16>* %A - %tmp2 = load <4 x i16>* %B + %tmp1 = load <4 x i16>, <4 x i16>* %A + %tmp2 = load <4 x i16>, <4 x i16>* %B %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> ret <4 x i16> %tmp3 } @@ -48,8 +48,8 @@ define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind { define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind { ;CHECK-LABEL: test_vextq32: ;CHECK: {{ext.16b.*12}} - %tmp1 = load <4 x i32>* %A - %tmp2 = load <4 x i32>* %B + %tmp1 = load <4 x i32>, <4 x i32>* %A + %tmp2 = load <4 x i32>, <4 x i32>* %B %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> ret <4 x i32> %tmp3 } @@ -59,8 +59,8 @@ define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind { define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextd_undef: ;CHECK: {{ext.8b.*}} - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B + %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> ret <8 x i8> %tmp3 } @@ -68,8 +68,8 @@ define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <8 x i8> @test_vextd_undef2(<8 x i8>* %A, <8 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextd_undef2: ;CHECK: {{ext.8b.*#6}} - %tmp1 = load <8 x i8>* %A - %tmp2 = load <8 x i8>* %B + %tmp1 = load <8 x i8>, <8 x i8>* %A + %tmp2 = load <8 x i8>, <8 x i8>* %B %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> ret <8 x i8> %tmp3 } @@ -77,8 +77,8 @@ define <8 x i8> @test_vextd_undef2(<8 x i8>* %A, <8 x i8>* %B) nounwind { define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { ;CHECK-LABEL: test_vextRq_undef: ;CHECK: {{ext.16b.*#7}} - %tmp1 = load <16 x i8>* %A - %tmp2 = load <16 x i8>* %B + %tmp1 = load <16 x i8>, <16 x i8>* %A + %tmp2 = load <16 x i8>, <16 x i8>* %B %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> ret <16 x i8> %tmp3 } @@ -86,7 +86,7 @@ define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind { define <8 x i16> @test_vextRq_undef2(<8 x i16>* %A) nounwind { ;CHECK-LABEL: test_vextRq_undef2: ;CHECK: {{ext.16b.*#10}} - %tmp1 = load <8 x i16>* %A + %tmp1 = load <8 x i16>, <8 x i16>* %A %vext = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> ret <8 x i16> %vext; } @@ -101,8 +101,8 @@ define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind { ;CHECK-LABEL: test_interleaved: ;CHECK: ext.8b ;CHECK: zip1.4h - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i16>* %B + %tmp1 = load <8 x i16>, <8 x i16>* %A + %tmp2 = load <8 x i16>, <8 x i16>* %B %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> ret <4 x i16> %tmp3 } @@ -111,8 +111,8 @@ define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind { define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ;CHECK-LABEL: test_undef: ;CHECK: zip1.4h - %tmp1 = load <8 x i16>* %A - %tmp2 = load <8 x i16>* %B + %tmp1 = load <8 x i16>, <8 x i16>* %A + %tmp2 = load <8 x i16>, <8 x i16>* %B %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> ret <4 x i16> %tmp3 } diff --git a/llvm/test/CodeGen/AArch64/arm64-extend.ll b/llvm/test/CodeGen/AArch64/arm64-extend.ll index ce3f0013011..0ef68f8a530 100644 --- a/llvm/test/CodeGen/AArch64/arm64-extend.ll +++ b/llvm/test/CodeGen/AArch64/arm64-extend.ll @@ -9,7 +9,7 @@ define i64 @foo(i32 %i) { ; CHECK: ret %idxprom = sext i32 %i to i64 %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @array, i64 0, i64 %idxprom - %tmp1 = load i32* %arrayidx, align 4 + %tmp1 = load i32, i32* %arrayidx, align 4 %conv = sext i32 %tmp1 to i64 ret i64 %conv } diff --git a/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll b/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll index 14e5fd310d7..642af876423 100644 --- a/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll +++ b/llvm/test/CodeGen/AArch64/arm64-extload-knownzero.ll @@ -9,7 +9,7 @@ entry: bb1: ; CHECK: %bb1 ; CHECK: ldrh [[REG:w[0-9]+]] - %tmp2 = load i16* %ptr, align 2 + %tmp2 = load i16, i16* %ptr, align 2 br label %bb2 bb2: ; CHECK: %bb2 diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll index 2e6f24fa077..3a14c7e58d8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll @@ -13,7 +13,7 @@ entry: ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]] ; CHECK: ldr w0, [x[[REG3]]] ; CHECK: ret - %0 = load i32* getelementptr inbounds ([5001 x i32]* @sortlist, i32 0, i64 5000), align 4 + %0 = load i32, i32* getelementptr inbounds ([5001 x i32]* @sortlist, i32 0, i64 5000), align 4 ret i32 %0 } @@ -26,7 +26,7 @@ entry: ; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]] ; CHECK: ldr x0, [x[[REG3]]] ; CHECK: ret - %0 = load i64* getelementptr inbounds ([5001 x i64]* @sortlist2, i32 0, i64 5000), align 4 + %0 = load i64, i64* getelementptr inbounds ([5001 x i64]* @sortlist2, i32 0, i64 5000), align 4 ret i64 %0 } @@ -40,8 +40,8 @@ entry: ; CHECK: movz x[[REG:[0-9]+]], #0xb3a, lsl #32 ; CHECK: movk x[[REG]], #0x73ce, lsl #16 ; CHECK: movk x[[REG]], #0x2ff2 - %0 = load i8** @pd2, align 8 + %0 = load i8*, i8** @pd2, align 8 %arrayidx = getelementptr inbounds i8, i8* %0, i64 12345678901234 - %1 = load i8* %arrayidx, align 1 + %1 = load i8, i8* %arrayidx, align 1 ret i8 %1 } diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll index 8c23c2a589d..0ef7b143df8 100644 --- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll +++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-br.ll @@ -3,7 +3,7 @@ define void @branch1() nounwind uwtable ssp { %x = alloca i32, align 4 store i32 0, i32* %x, align 4 - %1 = load i32* %x, align 4 + %1 = load i32, i32* %x, align 4 %2 = icmp ne i32 %1, 0 br i1 %2, label %3, label %4 @@ -23,7 +23,7 @@ define void @branch2() nounwind uwtable ssp { store i32 1, i32* %y, align 4 store i32 1, i32* %x, align 4 store i32 0, i32* %z, align 4 - %2 = load i32* %x, align 4 + %2 = load i32, i32* %x, align 4 %3 = icmp ne i32 %2, 0 br i1 %3, label %4, label %5 @@ -32,12 +32,12 @@ define void @branch2() nounwind uwtable ssp { br label %14 ;