summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms
diff options
context:
space:
mode:
authorDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
committerDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
commita79ac14fa68297f9888bc70a10df5ed9b8864e38 (patch)
tree8d8217a8928e3ee599bdde405e2e178b3a55b645 /llvm/test/Transforms
parent83687fb9e654c9d0086e7f6b728c26fa0b729e71 (diff)
downloadbcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.tar.gz
bcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.zip
[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 llvm-svn: 230794
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r--llvm/test/Transforms/ADCE/2002-05-23-ZeroArgPHITest.ll4
-rw-r--r--llvm/test/Transforms/ADCE/2002-05-28-Crash.ll6
-rw-r--r--llvm/test/Transforms/ADCE/2002-07-17-AssertionFailure.ll2
-rw-r--r--llvm/test/Transforms/ADCE/2002-07-17-PHIAssertion.ll6
-rw-r--r--llvm/test/Transforms/ADCE/2003-06-11-InvalidCFG.ll2
-rw-r--r--llvm/test/Transforms/ADCE/2003-06-24-BadSuccessor.ll4
-rw-r--r--llvm/test/Transforms/ADCE/2003-06-24-BasicFunctionality.ll4
-rw-r--r--llvm/test/Transforms/ADCE/basictest1.ll14
-rw-r--r--llvm/test/Transforms/ADCE/basictest2.ll14
-rw-r--r--llvm/test/Transforms/AddDiscriminators/basic.ll6
-rw-r--r--llvm/test/Transforms/AddDiscriminators/first-only.ll8
-rw-r--r--llvm/test/Transforms/AddDiscriminators/multiple.ll10
-rw-r--r--llvm/test/Transforms/AddDiscriminators/no-discriminators.ll6
-rw-r--r--llvm/test/Transforms/AlignmentFromAssumptions/simple.ll32
-rw-r--r--llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll32
-rw-r--r--llvm/test/Transforms/AlignmentFromAssumptions/start-unk.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll2
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/attrs.ll2
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/basictest.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/byval-2.ll2
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/byval.ll2
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/chained.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/control-flow.ll2
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/control-flow2.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/crash.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/dbg.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/fp80.ll6
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/inalloca.ll4
-rw-r--r--llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll12
-rw-r--r--llvm/test/Transforms/BBVectorize/X86/loop1.ll4
-rw-r--r--llvm/test/Transforms/BBVectorize/X86/sh-rec2.ll16
-rw-r--r--llvm/test/Transforms/BBVectorize/X86/sh-rec3.ll30
-rw-r--r--llvm/test/Transforms/BBVectorize/X86/simple-ldstr.ll12
-rw-r--r--llvm/test/Transforms/BBVectorize/X86/wr-aliases.ll38
-rw-r--r--llvm/test/Transforms/BBVectorize/func-alias.ll114
-rw-r--r--llvm/test/Transforms/BBVectorize/ld1.ll18
-rw-r--r--llvm/test/Transforms/BBVectorize/loop1.ll12
-rw-r--r--llvm/test/Transforms/BBVectorize/mem-op-depth.ll10
-rw-r--r--llvm/test/Transforms/BBVectorize/metadata.ll16
-rw-r--r--llvm/test/Transforms/BBVectorize/no-ldstr-conn.ll4
-rw-r--r--llvm/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll64
-rw-r--r--llvm/test/Transforms/BBVectorize/simple-ldstr.ll64
-rw-r--r--llvm/test/Transforms/CodeExtractor/2004-03-14-DominanceProblem.ll2
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll8
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll2
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/statepoint-relocate.ll12
-rw-r--r--llvm/test/Transforms/ConstProp/loads.ll38
-rw-r--r--llvm/test/Transforms/ConstantHoisting/AArch64/const-addr.ll6
-rw-r--r--llvm/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll6
-rw-r--r--llvm/test/Transforms/ConstantHoisting/PowerPC/masks.ll4
-rw-r--r--llvm/test/Transforms/ConstantHoisting/X86/cast-inst.ll12
-rw-r--r--llvm/test/Transforms/ConstantHoisting/X86/const-base-addr.ll6
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/basic.ll4
-rw-r--r--llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll2
-rw-r--r--llvm/test/Transforms/DeadArgElim/aggregates.ll2
-rw-r--r--llvm/test/Transforms/DeadArgElim/deadexternal.ll4
-rw-r--r--llvm/test/Transforms/DeadArgElim/deadretval2.ll2
-rw-r--r--llvm/test/Transforms/DeadArgElim/keepalive.ll2
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll2
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll2
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll4
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/PartialStore.ll2
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/atomic.ll16
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/const-pointers.ll4
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/crash.ll4
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/free.ll2
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/simple.ll26
-rw-r--r--llvm/test/Transforms/EarlyCSE/basic.ll34
-rw-r--r--llvm/test/Transforms/FunctionAttrs/2008-09-03-ReadNone.ll2
-rw-r--r--llvm/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll2
-rw-r--r--llvm/test/Transforms/FunctionAttrs/2008-12-29-Constant.ll2
-rw-r--r--llvm/test/Transforms/FunctionAttrs/2009-01-02-LocalStores.ll2
-rw-r--r--llvm/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll2
-rw-r--r--llvm/test/Transforms/FunctionAttrs/atomic.ll4
-rw-r--r--llvm/test/Transforms/FunctionAttrs/nocapture.ll8
-rw-r--r--llvm/test/Transforms/FunctionAttrs/optnone-simple.ll24
-rw-r--r--llvm/test/Transforms/GCOVProfiling/linezero.ll16
-rw-r--r--llvm/test/Transforms/GCOVProfiling/return-block.ll2
-rw-r--r--llvm/test/Transforms/GVN/2007-07-25-DominatedLoop.ll4
-rw-r--r--llvm/test/Transforms/GVN/2007-07-25-InfiniteLoop.ll2
-rw-r--r--llvm/test/Transforms/GVN/2007-07-25-Loop.ll2
-rw-r--r--llvm/test/Transforms/GVN/2007-07-25-NestedLoop.ll6
-rw-r--r--llvm/test/Transforms/GVN/2007-07-25-SinglePredecessor.ll2
-rw-r--r--llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll6
-rw-r--r--llvm/test/Transforms/GVN/2007-07-26-NonRedundant.ll2
-rw-r--r--llvm/test/Transforms/GVN/2007-07-26-PhiErasure.ll4
-rw-r--r--llvm/test/Transforms/GVN/2007-07-30-PredIDom.ll2
-rw-r--r--llvm/test/Transforms/GVN/2007-07-31-NoDomInherit.ll66
-rw-r--r--llvm/test/Transforms/GVN/2007-07-31-RedundantPhi.ll2
-rw-r--r--llvm/test/Transforms/GVN/2008-02-12-UndefLoad.ll2
-rw-r--r--llvm/test/Transforms/GVN/2008-02-13-NewPHI.ll4
-rw-r--r--llvm/test/Transforms/GVN/2008-07-02-Unreachable.ll4
-rw-r--r--llvm/test/Transforms/GVN/2008-12-09-SelfRemove.ll2
-rw-r--r--llvm/test/Transforms/GVN/2008-12-12-RLE-Crash.ll4
-rw-r--r--llvm/test/Transforms/GVN/2008-12-14-rle-reanalyze.ll2
-rw-r--r--llvm/test/Transforms/GVN/2008-12-15-CacheVisited.ll4
-rw-r--r--llvm/test/Transforms/GVN/2009-01-21-SortInvalidation.ll2
-rw-r--r--llvm/test/Transforms/GVN/2009-01-22-SortInvalidation.ll4
-rw-r--r--llvm/test/Transforms/GVN/2009-02-17-LoadPRECrash.ll18
-rw-r--r--llvm/test/Transforms/GVN/2009-06-17-InvalidPRE.ll8
-rw-r--r--llvm/test/Transforms/GVN/2009-07-13-MemDepSortFail.ll4
-rw-r--r--llvm/test/Transforms/GVN/2009-11-12-MemDepMallocBitCast.ll4
-rw-r--r--llvm/test/Transforms/GVN/2010-03-31-RedundantPHIs.ll4
-rw-r--r--llvm/test/Transforms/GVN/2010-05-08-OneBit.ll2
-rw-r--r--llvm/test/Transforms/GVN/2011-04-27-phioperands.ll2
-rw-r--r--llvm/test/Transforms/GVN/2011-06-01-NonLocalMemdepMiscompile.ll14
-rw-r--r--llvm/test/Transforms/GVN/MemdepMiscompile.ll8
-rw-r--r--llvm/test/Transforms/GVN/atomic.ll56
-rw-r--r--llvm/test/Transforms/GVN/calloc-load-removal.ll4
-rw-r--r--llvm/test/Transforms/GVN/cond_br.ll8
-rw-r--r--llvm/test/Transforms/GVN/cond_br2.ll12
-rw-r--r--llvm/test/Transforms/GVN/condprop.ll14
-rw-r--r--llvm/test/Transforms/GVN/crash-no-aa.ll2
-rw-r--r--llvm/test/Transforms/GVN/crash.ll32
-rw-r--r--llvm/test/Transforms/GVN/invariant-load.ll18
-rw-r--r--llvm/test/Transforms/GVN/lifetime-simple.ll4
-rw-r--r--llvm/test/Transforms/GVN/load-constant-mem.ll4
-rw-r--r--llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll6
-rw-r--r--llvm/test/Transforms/GVN/load-pre-align.ll4
-rw-r--r--llvm/test/Transforms/GVN/load-pre-licm.ll4
-rw-r--r--llvm/test/Transforms/GVN/load-pre-nonlocal.ll18
-rw-r--r--llvm/test/Transforms/GVN/lpre-call-wrap-2.ll6
-rw-r--r--llvm/test/Transforms/GVN/lpre-call-wrap.ll6
-rw-r--r--llvm/test/Transforms/GVN/malloc-load-removal.ll4
-rw-r--r--llvm/test/Transforms/GVN/noalias.ll18
-rw-r--r--llvm/test/Transforms/GVN/non-local-offset.ll8
-rw-r--r--llvm/test/Transforms/GVN/nonescaping-malloc.ll12
-rw-r--r--llvm/test/Transforms/GVN/null-aliases-nothing.ll4
-rw-r--r--llvm/test/Transforms/GVN/phi-translate-partial-alias.ll8
-rw-r--r--llvm/test/Transforms/GVN/phi-translate.ll4
-rw-r--r--llvm/test/Transforms/GVN/pr10820.ll2
-rw-r--r--llvm/test/Transforms/GVN/pr14166.ll6
-rw-r--r--llvm/test/Transforms/GVN/pr17732.ll4
-rw-r--r--llvm/test/Transforms/GVN/pr17852.ll16
-rw-r--r--llvm/test/Transforms/GVN/pre-basic-add.ll2
-rw-r--r--llvm/test/Transforms/GVN/pre-gep-load.ll8
-rw-r--r--llvm/test/Transforms/GVN/pre-load.ll76
-rw-r--r--llvm/test/Transforms/GVN/pre-single-pred.ll6
-rw-r--r--llvm/test/Transforms/GVN/preserve-tbaa.ll4
-rw-r--r--llvm/test/Transforms/GVN/range.ll48
-rw-r--r--llvm/test/Transforms/GVN/readattrs.ll2
-rw-r--r--llvm/test/Transforms/GVN/rle-must-alias.ll6
-rw-r--r--llvm/test/Transforms/GVN/rle-no-phi-translate.ll2
-rw-r--r--llvm/test/Transforms/GVN/rle-nonlocal.ll10
-rw-r--r--llvm/test/Transforms/GVN/rle-phi-translate.ll18
-rw-r--r--llvm/test/Transforms/GVN/rle-semidominated.ll4
-rw-r--r--llvm/test/Transforms/GVN/rle.ll110
-rw-r--r--llvm/test/Transforms/GVN/tbaa.ll8
-rw-r--r--llvm/test/Transforms/GVN/volatile.ll80
-rw-r--r--llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll2
-rw-r--r--llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll2
-rw-r--r--llvm/test/Transforms/GlobalDCE/complex-constantexpr.ll8
-rw-r--r--llvm/test/Transforms/GlobalDCE/global_ctors_integration.ll8
-rw-r--r--llvm/test/Transforms/GlobalDCE/indirectbr.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2004-10-10-CastStoreOnce.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll16
-rw-r--r--llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2009-06-01-RecursivePHI.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/array-elem-refs.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/atomic.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/basictest.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/crash-2.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/crash.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll10
-rw-r--r--llvm/test/Transforms/GlobalOpt/deadfunction.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/fastcc.ll8
-rw-r--r--llvm/test/Transforms/GlobalOpt/globalsra-partial.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll12
-rw-r--r--llvm/test/Transforms/GlobalOpt/globalsra.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/heap-sra-1.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/heap-sra-2.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/heap-sra-3.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/heap-sra-4.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll8
-rw-r--r--llvm/test/Transforms/GlobalOpt/integer-bool.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/iterate.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/load-store-global.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/memset-null.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/phi-select.ll4
-rw-r--r--llvm/test/Transforms/GlobalOpt/storepointer-compare.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/storepointer.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/tls.ll6
-rw-r--r--llvm/test/Transforms/GlobalOpt/trivialstore.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/undef-init.ll2
-rw-r--r--llvm/test/Transforms/GlobalOpt/unnamed-addr.ll8
-rw-r--r--llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll2
-rw-r--r--llvm/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll8
-rw-r--r--llvm/test/Transforms/IPConstantProp/dangling-block-address.ll4
-rw-r--r--llvm/test/Transforms/IPConstantProp/global.ll4
-rw-r--r--llvm/test/Transforms/IPConstantProp/return-argument.ll2
-rw-r--r--llvm/test/Transforms/IRCE/decrementing-loop.ll2
-rw-r--r--llvm/test/Transforms/IRCE/low-becount.ll2
-rw-r--r--llvm/test/Transforms/IRCE/multiple-access-no-preloop.ll4
-rw-r--r--llvm/test/Transforms/IRCE/not-likely-taken.ll4
-rw-r--r--llvm/test/Transforms/IRCE/single-access-no-preloop.ll4
-rw-r--r--llvm/test/Transforms/IRCE/single-access-with-preloop.ll2
-rw-r--r--llvm/test/Transforms/IRCE/unhandled.ll2
-rw-r--r--llvm/test/Transforms/IRCE/with-parent-loops.ll18
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2005-02-17-TruncateExprCrash.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2006-06-16-Indvar-LCSSA-Crash.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2007-01-06-TripCount.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2008-10-03-CouldNotCompute.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2009-04-14-shorten_iv_vars.ll60
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2009-04-15-shorten-iv-vars-2.ll66
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2011-09-27-hoistsext.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll8
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2011-11-15-multiexit.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll12
-rw-r--r--llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll20
-rw-r--r--llvm/test/Transforms/IndVarSimplify/avoid-i0.ll28
-rw-r--r--llvm/test/Transforms/IndVarSimplify/eliminate-comparison.ll18
-rw-r--r--llvm/test/Transforms/IndVarSimplify/eliminate-rem.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/indirectbr.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-fold.ll8
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-sext.ll14
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-widen.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/iv-zext.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/lftr-promote.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll8
-rw-r--r--llvm/test/Transforms/IndVarSimplify/lftr-zext.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/loop_evaluate7.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/loop_evaluate8.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll8
-rw-r--r--llvm/test/Transforms/IndVarSimplify/overflowcheck.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/phi-uses-value-multiple-times.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/polynomial-expand.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pr18223.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pr20680.ll12
-rw-r--r--llvm/test/Transforms/IndVarSimplify/pr22222.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/sharpen-range.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/single-element-range.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/sink-alloca.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/udiv.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/uglygep.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/use-range-metadata.ll2
-rw-r--r--llvm/test/Transforms/IndVarSimplify/variable-stride-ivs-0.ll6
-rw-r--r--llvm/test/Transforms/IndVarSimplify/verify-scev.ll4
-rw-r--r--llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll18
-rw-r--r--llvm/test/Transforms/IndVarSimplify/widen-nsw.ll2
-rw-r--r--llvm/test/Transforms/Inline/2006-07-12-InlinePruneCGUpdate.ll2
-rw-r--r--llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll8
-rw-r--r--llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll42
-rw-r--r--llvm/test/Transforms/Inline/align.ll22
-rw-r--r--llvm/test/Transforms/Inline/alloca-bonus.ll10
-rw-r--r--llvm/test/Transforms/Inline/alloca-dbgdeclare.ll2
-rw-r--r--llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll12
-rw-r--r--llvm/test/Transforms/Inline/alloca-merge-align.ll16
-rw-r--r--llvm/test/Transforms/Inline/basictest.ll2
-rw-r--r--llvm/test/Transforms/Inline/byval-tail-call.ll2
-rw-r--r--llvm/test/Transforms/Inline/byval.ll10
-rw-r--r--llvm/test/Transforms/Inline/byval_lifetime.ll2
-rw-r--r--llvm/test/Transforms/Inline/crash2.ll2
-rw-r--r--llvm/test/Transforms/Inline/devirtualize-3.ll14
-rw-r--r--llvm/test/Transforms/Inline/devirtualize.ll6
-rw-r--r--llvm/test/Transforms/Inline/ephemeral.ll2
-rw-r--r--llvm/test/Transforms/Inline/gvn-inline-iteration.ll2
-rw-r--r--llvm/test/Transforms/Inline/inline-byval-bonus.ll32
-rw-r--r--llvm/test/Transforms/Inline/inline-cold.ll120
-rw-r--r--llvm/test/Transforms/Inline/inline-fast-math-flags.ll6
-rw-r--r--llvm/test/Transforms/Inline/inline-fp.ll24
-rw-r--r--llvm/test/Transforms/Inline/inline-invoke-tail.ll2
-rw-r--r--llvm/test/Transforms/Inline/inline-optsize.ll10
-rw-r--r--llvm/test/Transforms/Inline/inline_constprop.ll4
-rw-r--r--llvm/test/Transforms/Inline/inline_dbg_declare.ll8
-rw-r--r--llvm/test/Transforms/Inline/inline_minisize.ll96
-rw-r--r--llvm/test/Transforms/Inline/invoke-combine-clauses.ll2
-rw-r--r--llvm/test/Transforms/Inline/noalias-cs.ll12
-rw-r--r--llvm/test/Transforms/Inline/noalias.ll16
-rw-r--r--llvm/test/Transforms/Inline/noalias2.ll20
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks.ll16
-rw-r--r--llvm/test/Transforms/Inline/ptr-diff.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2006-09-15-CastToBool.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2007-02-07-PointerCast.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2007-10-31-StringCrash.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2008-05-17-InfLoop.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2008-05-23-CompareFold.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2008-08-05-And.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll64
-rw-r--r--llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll92
-rw-r--r--llvm/test/Transforms/InstCombine/2009-02-21-LoadCST.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2010-03-03-ExtElim.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll14
-rw-r--r--llvm/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/2012-05-28-select-hang.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/2012-06-06-LoadOfPHIs.ll50
-rw-r--r--llvm/test/Transforms/InstCombine/2012-07-25-LoadPart.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2012-10-25-vector-of-pointers.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2012-12-14-simp-vgep.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/2013-03-05-Combine-BitcastTy-Into-Alloca.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/CPP_min_max.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/add3.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/addrspacecast.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/alias-recursion.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/align-addr.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/align-attr.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/align-external.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/aligned-altivec.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/aligned-qpx.ll20
-rw-r--r--llvm/test/Transforms/InstCombine/alloca.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/assume-loop-align.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/assume-redundant.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/assume.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/atomic.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/bitcast-alias-function.ll34
-rw-r--r--llvm/test/Transforms/InstCombine/bitcast.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/bittest.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/call2.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/cast.ll72
-rw-r--r--llvm/test/Transforms/InstCombine/cast_ptr.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll34
-rw-r--r--llvm/test/Transforms/InstCombine/crash.ll20
-rw-r--r--llvm/test/Transforms/InstCombine/debuginfo.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/descale-zero.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/div-shift-crash.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/err-rep-cold.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/extractvalue.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/fmul.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/fold-vector-zero.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/fpextend.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/gc.relocate.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/gep-addrspace.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/gep-sext.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/gepphigep.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/getelementptr.ll38
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-range.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/invariant.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/known_align.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/load-cmp.ll48
-rw-r--r--llvm/test/Transforms/InstCombine/load-select.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/load.ll46
-rw-r--r--llvm/test/Transforms/InstCombine/load3.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/loadstore-alignment.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/loadstore-metadata.ll28
-rw-r--r--llvm/test/Transforms/InstCombine/lshr-phi.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/malloc-free-delete.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/mem-gep-zidx.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/memcmp-1.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/memcpy-from-global.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/merge-icmp.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/mul.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/no-negzero.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/obfuscated_splat.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/objsize.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/odr-linkage.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/or.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/phi-merge-gep.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/phi.ll26
-rw-r--r--llvm/test/Transforms/InstCombine/pr12251.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/pr2645-0.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/pr2645-1.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/select-cmp-br.ll24
-rw-r--r--llvm/test/Transforms/InstCombine/select-load-call.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/select.ll86
-rw-r--r--llvm/test/Transforms/InstCombine/shufflemask-undef.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/signed-comparison.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/simplify-libcalls.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/sincospi.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/sqrt.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/store.ll10
-rw-r--r--llvm/test/Transforms/InstCombine/strcmp-1.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/strncmp-1.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/struct-assign-tbaa.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/vec_demanded_elts.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/vec_extract_var_elt.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/vec_shuffle.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/volatile_store.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/vsx-unaligned.ll12
-rw-r--r--llvm/test/Transforms/InstCombine/zext-or-icmp.ll8
-rw-r--r--llvm/test/Transforms/InstMerge/ld_hoist1.ll10
-rw-r--r--llvm/test/Transforms/InstMerge/ld_hoist_st_sink.ll36
-rw-r--r--llvm/test/Transforms/InstMerge/st_sink_barrier_call.ll8
-rw-r--r--llvm/test/Transforms/InstMerge/st_sink_bugfix_22613.ll20
-rw-r--r--llvm/test/Transforms/InstMerge/st_sink_no_barrier_call.ll8
-rw-r--r--llvm/test/Transforms/InstMerge/st_sink_no_barrier_load.ll12
-rw-r--r--llvm/test/Transforms/InstMerge/st_sink_no_barrier_store.ll8
-rw-r--r--llvm/test/Transforms/InstMerge/st_sink_two_stores.ll8
-rw-r--r--llvm/test/Transforms/InstMerge/st_sink_with_barrier.ll12
-rw-r--r--llvm/test/Transforms/InstSimplify/call-callconv.ll2
-rw-r--r--llvm/test/Transforms/InstSimplify/compare.ll6
-rw-r--r--llvm/test/Transforms/InstSimplify/load.ll4
-rw-r--r--llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll4
-rw-r--r--llvm/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll2
-rw-r--r--llvm/test/Transforms/JumpThreading/2010-08-26-and.ll2
-rw-r--r--llvm/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll2
-rw-r--r--llvm/test/Transforms/JumpThreading/crash.ll2
-rw-r--r--llvm/test/Transforms/JumpThreading/landing-pad.ll26
-rw-r--r--llvm/test/Transforms/JumpThreading/lvi-load.ll2
-rw-r--r--llvm/test/Transforms/JumpThreading/or-undef.ll8
-rw-r--r--llvm/test/Transforms/JumpThreading/phi-eq.ll28
-rw-r--r--llvm/test/Transforms/JumpThreading/select.ll4
-rw-r--r--llvm/test/Transforms/JumpThreading/thread-loads.ll10
-rw-r--r--llvm/test/Transforms/LCSSA/2006-06-03-IncorrectIDFPhis.ll2
-rw-r--r--llvm/test/Transforms/LCSSA/2006-07-09-NoDominator.ll2
-rw-r--r--llvm/test/Transforms/LCSSA/2007-07-12-LICM-2.ll2
-rw-r--r--llvm/test/Transforms/LCSSA/2007-07-12-LICM-3.ll2
-rw-r--r--llvm/test/Transforms/LCSSA/2007-07-12-LICM.ll2
-rw-r--r--llvm/test/Transforms/LCSSA/unreachable-use.ll4
-rw-r--r--llvm/test/Transforms/LICM/2003-05-02-LoadHoist.ll4
-rw-r--r--llvm/test/Transforms/LICM/2004-09-14-AliasAnalysisInvalidate.ll4
-rw-r--r--llvm/test/Transforms/LICM/2007-05-22-VolatileSink.ll4
-rw-r--r--llvm/test/Transforms/LICM/2007-07-30-AliasSet.ll2
-rw-r--r--llvm/test/Transforms/LICM/2008-07-22-LoadGlobalConstant.ll4
-rw-r--r--llvm/test/Transforms/LICM/2009-12-10-LICM-Indbr-Crash.ll2
-rw-r--r--llvm/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll4
-rw-r--r--llvm/test/Transforms/LICM/2011-04-06-PromoteResultOfPromotion.ll6
-rw-r--r--llvm/test/Transforms/LICM/2011-04-09-RAUW-AST.ll4
-rw-r--r--llvm/test/Transforms/LICM/PR21582.ll2
-rw-r--r--llvm/test/Transforms/LICM/atomics.ll14
-rw-r--r--llvm/test/Transforms/LICM/constexpr.ll6
-rw-r--r--llvm/test/Transforms/LICM/crash.ll6
-rw-r--r--llvm/test/Transforms/LICM/hoist-bitcast-load.ll48
-rw-r--r--llvm/test/Transforms/LICM/hoist-deref-load.ll32
-rw-r--r--llvm/test/Transforms/LICM/hoist-invariant-load.ll8
-rw-r--r--llvm/test/Transforms/LICM/hoisting.ll8
-rw-r--r--llvm/test/Transforms/LICM/lcssa-ssa-promoter.ll2
-rw-r--r--llvm/test/Transforms/LICM/scalar-promote-memmodel.ll4
-rw-r--r--llvm/test/Transforms/LICM/scalar_promote.ll26
-rw-r--r--llvm/test/Transforms/LICM/sinking.ll20
-rw-r--r--llvm/test/Transforms/LICM/speculate.ll10
-rw-r--r--llvm/test/Transforms/LICM/volatile-alias.ll22
-rw-r--r--llvm/test/Transforms/LoadCombine/load-combine-aa.ll14
-rw-r--r--llvm/test/Transforms/LoadCombine/load-combine-assume.ll12
-rw-r--r--llvm/test/Transforms/LoadCombine/load-combine.ll78
-rw-r--r--llvm/test/Transforms/LoopDeletion/2008-05-06-Phi.ll10
-rw-r--r--llvm/test/Transforms/LoopIdiom/basic-address-space.ll4
-rw-r--r--llvm/test/Transforms/LoopIdiom/basic.ll10
-rw-r--r--llvm/test/Transforms/LoopIdiom/scev-invalidation.ll2
-rw-r--r--llvm/test/Transforms/LoopReroll/basic.ll60
-rw-r--r--llvm/test/Transforms/LoopReroll/nonconst_lb.ll30
-rw-r--r--llvm/test/Transforms/LoopReroll/reduction.ll28
-rw-r--r--llvm/test/Transforms/LoopRotate/PhiRename-1.ll28
-rw-r--r--llvm/test/Transforms/LoopRotate/alloca.ll2
-rw-r--r--llvm/test/Transforms/LoopRotate/dbgvalue.ll6
-rw-r--r--llvm/test/Transforms/LoopRotate/indirectbr.ll2
-rw-r--r--llvm/test/Transforms/LoopRotate/multiple-exits.ll2
-rw-r--r--llvm/test/Transforms/LoopRotate/nosimplifylatch.ll4
-rw-r--r--llvm/test/Transforms/LoopRotate/phi-duplicate.ll4
-rw-r--r--llvm/test/Transforms/LoopRotate/simplifylatch.ll8
-rw-r--r--llvm/test/Transforms/LoopSimplify/2003-08-15-PreheadersFail.ll14
-rw-r--r--llvm/test/Transforms/LoopSimplify/2003-12-10-ExitBlocksProblem.ll8
-rw-r--r--llvm/test/Transforms/LoopSimplify/ashr-crash.ll6
-rw-r--r--llvm/test/Transforms/LoopSimplify/merge-exits.ll6
-rw-r--r--llvm/test/Transforms/LoopSimplify/phi-node-simplify.ll8
-rw-r--r--llvm/test/Transforms/LoopSimplify/preserve-scev.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2005-08-15-AddRecIV.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2005-08-17-OutOfLoopVariant.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2009-01-13-nonconstant-stride-outside-loop.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2009-04-28-no-reduce-mul.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2011-07-19-CritEdgeBreakCrash.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll8
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll10
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll10
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll42
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll20
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll8
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll44
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll16
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/addrec-gep-address-space.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/addrec-gep.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/address-space-loop.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll10
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr12691.ll8
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr18165.ll10
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr2570.ll40
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr3086.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr3399.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr3571.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/share_code_in_preheader.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/uglygep.ll4
-rw-r--r--llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll4
-rw-r--r--llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/X86/partial.ll12
-rw-r--r--llvm/test/Transforms/LoopUnroll/ephemeral.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll4
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-loop.ll6
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-loop1.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-loop2.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-loop3.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/scevunroll.ll10
-rw-r--r--llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll4
-rw-r--r--llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll10
-rw-r--r--llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll18
-rw-r--r--llvm/test/Transforms/LoopUnswitch/2008-06-17-DomFrontier.ll2
-rw-r--r--llvm/test/Transforms/LoopUnswitch/2010-11-18-LCSSA.ll2
-rw-r--r--llvm/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll2
-rw-r--r--llvm/test/Transforms/LoopUnswitch/2011-11-18-SimpleSwitch.ll10
-rw-r--r--llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches-Threshold.ll6
-rw-r--r--llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches.ll14
-rw-r--r--llvm/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll4
-rw-r--r--llvm/test/Transforms/LoopUnswitch/2012-05-20-Phi.ll4
-rw-r--r--llvm/test/Transforms/LoopUnswitch/basictest.ll8
-rw-r--r--llvm/test/Transforms/LoopUnswitch/preserve-analyses.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/12-12-11-if-conv.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/arm-unroll.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/gather-cost.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/gcc-examples.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/width-detect.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/already-vectorized.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/assume.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/avx1.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/cost-model.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/gather-cost.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/gcc-examples.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll150
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/no-vector.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/powof2div.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/reduction-crash.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/small-size.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/tripcount.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/unroll-pm.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/unroll_selection.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/align.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/bzip_reverse_loops.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/calloc.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/conditional-assignment.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/control-flow.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/cpp-new-array.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/dbg.value.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/debugloc.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/duplicated-metadata.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/ee-crash.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/exact.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/flags.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/float-reduction.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/funcall.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/gcc-examples.ll96
-rw-r--r--llvm/test/Transforms/LoopVectorize/global_alias.ll362
-rw-r--r--llvm/test/Transforms/LoopVectorize/hoist-loads.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/i8-induction.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/if-conversion-edgemasks.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/if-conversion-reduction.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/if-conversion.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/if-pred-stores.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/increment.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/induction.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/intrinsic.ll122
-rw-r--r--llvm/test/Transforms/LoopVectorize/lifetime.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/loop-vect-memdep.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/memdep.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/metadata-unroll.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/metadata.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/minmax_reduction.ll68
-rw-r--r--llvm/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/no_array_bounds.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/no_idiv_reduction.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/no_int_induction.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/no_outside_user.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/no_switch.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/non-const-n.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/opt.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/ptr_loops.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/read-only.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction.ll46
-rw-r--r--llvm/test/Transforms/LoopVectorize/reverse_induction.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/reverse_iter.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check-readonly.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-limit.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/safegep.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/same-base-access.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalar-select.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/simple-unroll.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/small-loop.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/start-non-zero.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/struct_access.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/tbaa-nodep.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/unroll_novec.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/value-ptr-bug.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/vect.omp.persistence.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/vect.stats.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/vectorize-once.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/version-mem-access.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/write-only.ll2
-rw-r--r--llvm/test/Transforms/LowerAtomic/atomic-swap.ll2
-rw-r--r--llvm/test/Transforms/LowerBitSets/simple.ll4
-rw-r--r--llvm/test/Transforms/LowerExpectIntrinsic/basic.ll38
-rw-r--r--llvm/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll2
-rw-r--r--llvm/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll4
-rw-r--r--llvm/test/Transforms/Mem2Reg/2002-03-28-UninitializedVal.ll2
-rw-r--r--llvm/test/Transforms/Mem2Reg/2003-04-24-MultipleIdenticalSuccessors.ll2
-rw-r--r--llvm/test/Transforms/Mem2Reg/2003-06-26-IterativePromote.ll6
-rw-r--r--llvm/test/Transforms/Mem2Reg/2003-10-05-DeadPHIInsertion.ll4
-rw-r--r--llvm/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll16
-rw-r--r--llvm/test/Transforms/Mem2Reg/2005-11-28-Crash.ll2
-rw-r--r--llvm/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll6
-rw-r--r--llvm/test/Transforms/Mem2Reg/ConvertDebugInfo.ll8
-rw-r--r--llvm/test/Transforms/Mem2Reg/ConvertDebugInfo2.ll8
-rw-r--r--llvm/test/Transforms/Mem2Reg/PromoteMemToRegister.ll6
-rw-r--r--llvm/test/Transforms/Mem2Reg/UndefValuesMerge.ll2
-rw-r--r--llvm/test/Transforms/Mem2Reg/atomic.ll2
-rw-r--r--llvm/test/Transforms/Mem2Reg/crash.ll6
-rw-r--r--llvm/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll2
-rw-r--r--llvm/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll6
-rw-r--r--llvm/test/Transforms/MemCpyOpt/atomic.ll2
-rw-r--r--llvm/test/Transforms/MemCpyOpt/loadstore-sret.ll2
-rw-r--r--llvm/test/Transforms/MemCpyOpt/memcpy.ll2
-rw-r--r--llvm/test/Transforms/MemCpyOpt/sret.ll4
-rw-r--r--llvm/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll68
-rw-r--r--llvm/test/Transforms/MergeFunc/address-spaces.ll6
-rw-r--r--llvm/test/Transforms/MergeFunc/crash.ll4
-rw-r--r--llvm/test/Transforms/MergeFunc/inttoptr-address-space.ll4
-rw-r--r--llvm/test/Transforms/MergeFunc/inttoptr.ll4
-rw-r--r--llvm/test/Transforms/MergeFunc/mergefunc-struct-return.ll4
-rw-r--r--llvm/test/Transforms/MergeFunc/ranges.ll24
-rw-r--r--llvm/test/Transforms/MergeFunc/vector.ll4
-rw-r--r--llvm/test/Transforms/MetaRenamer/metarenamer.ll18
-rw-r--r--llvm/test/Transforms/ObjCARC/allocas.ll30
-rw-r--r--llvm/test/Transforms/ObjCARC/basic.ll142
-rw-r--r--llvm/test/Transforms/ObjCARC/cfg-hazards.ll2
-rw-r--r--llvm/test/Transforms/ObjCARC/contract-storestrong-ivar.ll4
-rw-r--r--llvm/test/Transforms/ObjCARC/contract-storestrong.ll36
-rw-r--r--llvm/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll8
-rw-r--r--llvm/test/Transforms/ObjCARC/escape.ll8
-rw-r--r--llvm/test/Transforms/ObjCARC/gvn.ll10
-rw-r--r--llvm/test/Transforms/ObjCARC/intrinsic-use.ll16
-rw-r--r--llvm/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll68
-rw-r--r--llvm/test/Transforms/ObjCARC/move-and-merge-autorelease.ll14
-rw-r--r--llvm/test/Transforms/ObjCARC/nested.ll166
-rw-r--r--llvm/test/Transforms/ObjCARC/provenance.ll18
-rw-r--r--llvm/test/Transforms/ObjCARC/retain-block-side-effects.ll6
-rw-r--r--llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll34
-rw-r--r--llvm/test/Transforms/PhaseOrdering/PR6627.ll20
-rw-r--r--llvm/test/Transforms/PhaseOrdering/basic.ll8
-rw-r--r--llvm/test/Transforms/PhaseOrdering/gdce.ll16
-rw-r--r--llvm/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll2
-rw-r--r--llvm/test/Transforms/Reassociate/basictest.ll24
-rw-r--r--llvm/test/Transforms/Reassociate/crash.ll6
-rw-r--r--llvm/test/Transforms/Reassociate/fast-basictest.ll18
-rw-r--r--llvm/test/Transforms/Reassociate/pr12245.ll20
-rw-r--r--llvm/test/Transforms/Reassociate/pr21205.ll2
-rw-r--r--llvm/test/Transforms/RewriteStatepointsForGC/basics.ll8
-rw-r--r--llvm/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll2
-rw-r--r--llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll6
-rw-r--r--llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll4
-rw-r--r--llvm/test/Transforms/SCCP/apint-array.ll4
-rw-r--r--llvm/test/Transforms/SCCP/apint-bigarray.ll4
-rw-r--r--llvm/test/Transforms/SCCP/apint-bigint2.ll4
-rw-r--r--llvm/test/Transforms/SCCP/apint-ipsccp3.ll4
-rw-r--r--llvm/test/Transforms/SCCP/apint-ipsccp4.ll4
-rw-r--r--llvm/test/Transforms/SCCP/apint-load.ll6
-rw-r--r--llvm/test/Transforms/SCCP/atomic-load-store.ll4
-rw-r--r--llvm/test/Transforms/SCCP/ipsccp-basic.ll6
-rw-r--r--llvm/test/Transforms/SCCP/loadtest.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/ARM/memory.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/R600/simplebb.ll36
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/addsub.ll180
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/align.ll18
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/call.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/cast.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/cmp_sel.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_bullet.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll6
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_gep.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_lencod.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/cse.ll42
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/cycle_dup.ll10
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/debug_info.ll6
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/diamond.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/external_user.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/extract.ll6
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll18
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/flag.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/gep.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/hoist.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll100
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/implicitfloat.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/intrinsic.ll136
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/long_chains.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/metadata.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/multi_block.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/multi_user.ll10
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll6
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll96
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/opt.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/ordering.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/phi.ll40
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/phi3.ll2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/powof2div.ll18
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/pr16628.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/pr16899.ll6
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/pr19657.ll58
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll104
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reduction.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll4
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/return.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/rgb_phi.ll12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/saxpy.ll20
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll16
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll32
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/tiny-tree.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/unreachable.ll8
-rw-r--r--llvm/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll8
-rw-r--r--llvm/test/Transforms/SROA/address-spaces.ll6
-rw-r--r--llvm/test/Transforms/SROA/alignment.ll32
-rw-r--r--llvm/test/Transforms/SROA/basictest.ll180
-rw-r--r--llvm/test/Transforms/SROA/big-endian.ll10
-rw-r--r--llvm/test/Transforms/SROA/fca.ll6
-rw-r--r--llvm/test/Transforms/SROA/phi-and-select.ll66
-rw-r--r--llvm/test/Transforms/SROA/slice-order-independence.ll8
-rw-r--r--llvm/test/Transforms/SROA/slice-width.ll14
-rw-r--r--llvm/test/Transforms/SROA/vector-conversion.ll6
-rw-r--r--llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll2
-rw-r--r--llvm/test/Transforms/SROA/vector-promotion.ll90
-rw-r--r--llvm/test/Transforms/SROA/vectors-of-pointers.ll2
-rw-r--r--llvm/test/Transforms/SampleProfile/branch.ll2
-rw-r--r--llvm/test/Transforms/SampleProfile/calls.ll14
-rw-r--r--llvm/test/Transforms/SampleProfile/discriminator.ll10
-rw-r--r--llvm/test/Transforms/SampleProfile/propagate.ll58
-rw-r--r--llvm/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll8
-rw-r--r--llvm/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll10
-rw-r--r--llvm/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll10
-rw-r--r--llvm/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2011-09-22-PHISpeculateInvoke.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/AggregatePromote.ll12
-rw-r--r--llvm/test/Transforms/ScalarRepl/DifferingTypes.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/address-space.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/arraytest.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/badarray.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/basictest.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/bitfield-sroa.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/copy-aggregate.ll12
-rw-r--r--llvm/test/Transforms/ScalarRepl/crash.ll16
-rw-r--r--llvm/test/Transforms/ScalarRepl/debuginfo-preserved.ll14
-rw-r--r--llvm/test/Transforms/ScalarRepl/inline-vector.ll6
-rw-r--r--llvm/test/Transforms/ScalarRepl/lifetime.ll8
-rw-r--r--llvm/test/Transforms/ScalarRepl/load-store-aggregate.ll6
-rw-r--r--llvm/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/memset-aggregate.ll8
-rw-r--r--llvm/test/Transforms/ScalarRepl/nonzero-first-index.ll8
-rw-r--r--llvm/test/Transforms/ScalarRepl/not-a-vector.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/phi-cycle.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/phi-select.ll16
-rw-r--r--llvm/test/Transforms/ScalarRepl/phinodepromote.ll6
-rw-r--r--llvm/test/Transforms/ScalarRepl/select_promote.ll6
-rw-r--r--llvm/test/Transforms/ScalarRepl/sroa-fca.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/sroa_two.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/union-fp-int.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/union-packed.ll2
-rw-r--r--llvm/test/Transforms/ScalarRepl/union-pointer.ll12
-rw-r--r--llvm/test/Transforms/ScalarRepl/vector_memcpy.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/vector_promote.ll34
-rw-r--r--llvm/test/Transforms/ScalarRepl/vectors-with-mismatched-elements.ll4
-rw-r--r--llvm/test/Transforms/ScalarRepl/volatile.ll2
-rw-r--r--llvm/test/Transforms/Scalarizer/basic.ll82
-rw-r--r--llvm/test/Transforms/Scalarizer/dbginfo.ll20
-rw-r--r--llvm/test/Transforms/Scalarizer/no-data-layout.ll4
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll32
-rw-r--r--llvm/test/Transforms/SimplifyCFG/2005-06-16-PHICrash.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll36
-rw-r--r--llvm/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll28
-rw-r--r--llvm/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll30
-rw-r--r--llvm/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/2008-07-13-InfLoopMiscompile.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/2008-09-08-MultiplePred.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll20
-rw-r--r--llvm/test/Transforms/SimplifyCFG/EmptyBlockMerge.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/PR17073.ll6
-rw-r--r--llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll10
-rw-r--r--llvm/test/Transforms/SimplifyCFG/basictest.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/branch-fold-threshold.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/dbginfo.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/hoist-with-range.ll6
-rw-r--r--llvm/test/Transforms/SimplifyCFG/indirectbr.ll6
-rw-r--r--llvm/test/Transforms/SimplifyCFG/iterative-simplify.ll18
-rw-r--r--llvm/test/Transforms/SimplifyCFG/multiple-phis.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll6
-rw-r--r--llvm/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll8
-rw-r--r--llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/speculate-store.ll16
-rw-r--r--llvm/test/Transforms/SimplifyCFG/speculate-with-offset.ll8
-rw-r--r--llvm/test/Transforms/SimplifyCFG/switch-to-select-multiple-edge-per-block-phi.ll4
-rw-r--r--llvm/test/Transforms/SimplifyCFG/switch_create.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll2
-rw-r--r--llvm/test/Transforms/SimplifyCFG/unreachable-blocks.ll2
-rw-r--r--llvm/test/Transforms/Sink/basic.ll12
-rw-r--r--llvm/test/Transforms/StripSymbols/strip-dead-debug-info.ll2
-rw-r--r--llvm/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll8
-rw-r--r--llvm/test/Transforms/TailCallElim/basic.ll2
-rw-r--r--llvm/test/Transforms/TailCallElim/dont_reorder_load.ll6
-rw-r--r--llvm/test/Transforms/TailCallElim/reorder_load.ll20
925 files changed, 5340 insertions, 5340 deletions
diff --git a/llvm/test/Transforms/ADCE/2002-05-23-ZeroArgPHITest.ll b/llvm/test/Transforms/ADCE/2002-05-23-ZeroArgPHITest.ll
index 8f12bef8d5b..8d1beec8ed3 100644
--- a/llvm/test/Transforms/ADCE/2002-05-23-ZeroArgPHITest.ll
+++ b/llvm/test/Transforms/ADCE/2002-05-23-ZeroArgPHITest.ll
@@ -15,14 +15,14 @@ bb0:
br label %bb1
bb1: ; preds = %bb0
- %reg107 = load %node_t** %nodelist.upgrd.1 ; <%node_t*> [#uses=2]
+ %reg107 = load %node_t*, %node_t** %nodelist.upgrd.1 ; <%node_t*> [#uses=2]
%cond211 = icmp eq %node_t* %reg107, null ; <i1> [#uses=1]
br i1 %cond211, label %bb3, label %bb2
bb2: ; preds = %bb2, %bb1
%reg109 = phi %node_t* [ %reg110, %bb2 ], [ %reg107, %bb1 ] ; <%node_t*> [#uses=1]
%reg212 = getelementptr %node_t, %node_t* %reg109, i64 0, i32 1 ; <%node_t**> [#uses=1]
- %reg110 = load %node_t** %reg212 ; <%node_t*> [#uses=2]
+ %reg110 = load %node_t*, %node_t** %reg212 ; <%node_t*> [#uses=2]
%cond213 = icmp ne %node_t* %reg110, null ; <i1> [#uses=1]
br i1 %cond213, label %bb2, label %bb3
diff --git a/llvm/test/Transforms/ADCE/2002-05-28-Crash.ll b/llvm/test/Transforms/ADCE/2002-05-28-Crash.ll
index 359c2504afe..d88580afad0 100644
--- a/llvm/test/Transforms/ADCE/2002-05-28-Crash.ll
+++ b/llvm/test/Transforms/ADCE/2002-05-28-Crash.ll
@@ -15,7 +15,7 @@
define i32 @rx_bitset_empty(i32 %size, i32* %set) {
bb1:
- %reg110 = load i32* %set ; <i32> [#uses=2]
+ %reg110 = load i32, i32* %set ; <i32> [#uses=2]
store i32 1, i32* %set
%cast112 = sext i32 %size to i64 ; <i64> [#uses=1]
%reg113 = add i64 %cast112, 31 ; <i64> [#uses=1]
@@ -26,7 +26,7 @@ bb1:
%reg114-idxcast-offset = add i32 %reg114-idxcast, 1073741823 ; <i32> [#uses=1]
%reg114-idxcast-offset.upgrd.1 = zext i32 %reg114-idxcast-offset to i64 ; <i64> [#uses=1]
%reg124 = getelementptr i32, i32* %set, i64 %reg114-idxcast-offset.upgrd.1 ; <i32*> [#uses=1]
- %reg125 = load i32* %reg124 ; <i32> [#uses=1]
+ %reg125 = load i32, i32* %reg124 ; <i32> [#uses=1]
%cond232 = icmp ne i32 %reg125, 0 ; <i1> [#uses=1]
br i1 %cond232, label %bb3, label %bb2
@@ -39,7 +39,7 @@ bb2: ; preds = %bb2, %bb1
%reg130-idxcast-offset = add i32 %reg130-idxcast, 1073741823 ; <i32> [#uses=1]
%reg130-idxcast-offset.upgrd.2 = zext i32 %reg130-idxcast-offset to i64 ; <i64> [#uses=1]
%reg118 = getelementptr i32, i32* %set, i64 %reg130-idxcast-offset.upgrd.2 ; <i32*> [#uses=1]
- %reg119 = load i32* %reg118 ; <i32> [#uses=1]
+ %reg119 = load i32, i32* %reg118 ; <i32> [#uses=1]
%cond233 = icmp eq i32 %reg119, 0 ; <i1> [#uses=1]
br i1 %cond233, label %bb2, label %bb3
diff --git a/llvm/test/Transforms/ADCE/2002-07-17-AssertionFailure.ll b/llvm/test/Transforms/ADCE/2002-07-17-AssertionFailure.ll
index 8f8dadf7332..ff8bdb3fd3b 100644
--- a/llvm/test/Transforms/ADCE/2002-07-17-AssertionFailure.ll
+++ b/llvm/test/Transforms/ADCE/2002-07-17-AssertionFailure.ll
@@ -7,7 +7,7 @@
define void @foo(i8* %reg5481) {
%cast611 = bitcast i8* %reg5481 to i8** ; <i8**> [#uses=1]
- %reg162 = load i8** %cast611 ; <i8*> [#uses=1]
+ %reg162 = load i8*, i8** %cast611 ; <i8*> [#uses=1]
ptrtoint i8* %reg162 to i32 ; <i32>:1 [#uses=0]
ret void
}
diff --git a/llvm/test/Transforms/ADCE/2002-07-17-PHIAssertion.ll b/llvm/test/Transforms/ADCE/2002-07-17-PHIAssertion.ll
index 2f0df670d6a..1bf79e8ec6c 100644
--- a/llvm/test/Transforms/ADCE/2002-07-17-PHIAssertion.ll
+++ b/llvm/test/Transforms/ADCE/2002-07-17-PHIAssertion.ll
@@ -17,7 +17,7 @@ bb3: ; preds = %bb2
br label %UnifiedExitNode
bb4: ; preds = %bb2
- %reg117 = load i32* @hufts ; <i32> [#uses=2]
+ %reg117 = load i32, i32* @hufts ; <i32> [#uses=2]
%cond241 = icmp ule i32 %reg117, %reg128 ; <i1> [#uses=1]
br i1 %cond241, label %bb6, label %bb5
@@ -29,12 +29,12 @@ bb6: ; preds = %bb5, %bb4
br i1 false, label %bb2, label %bb7
bb7: ; preds = %bb6
- %reg126 = load i32* @bk ; <i32> [#uses=1]
+ %reg126 = load i32, i32* @bk ; <i32> [#uses=1]
%cond247 = icmp ule i32 %reg126, 7 ; <i1> [#uses=1]
br i1 %cond247, label %bb9, label %bb8
bb8: ; preds = %bb8, %bb7
- %reg119 = load i32* @bk ; <i32> [#uses=1]
+ %reg119 = load i32, i32* @bk ; <i32> [#uses=1]
%cond256 = icmp ugt i32 %reg119, 7 ; <i1> [#uses=1]
br i1 %cond256, label %bb8, label %bb9
diff --git a/llvm/test/Transforms/ADCE/2003-06-11-InvalidCFG.ll b/llvm/test/Transforms/ADCE/2003-06-11-InvalidCFG.ll
index 5206b243e37..7c7e238f9d9 100644
--- a/llvm/test/Transforms/ADCE/2003-06-11-InvalidCFG.ll
+++ b/llvm/test/Transforms/ADCE/2003-06-11-InvalidCFG.ll
@@ -16,7 +16,7 @@ then.66: ; preds = %shortcirc_done.12
br label %endif.42
endif.65: ; preds = %endif.42
- %tmp.2846 = load i32** @G ; <i32*> [#uses=1]
+ %tmp.2846 = load i32*, i32** @G ; <i32*> [#uses=1]
br i1 false, label %shortcirc_next.12, label %shortcirc_done.12
shortcirc_next.12: ; preds = %endif.65
diff --git a/llvm/test/Transforms/ADCE/2003-06-24-BadSuccessor.ll b/llvm/test/Transforms/ADCE/2003-06-24-BadSuccessor.ll
index bf3506fb1d2..707e14aad0d 100644
--- a/llvm/test/Transforms/ADCE/2003-06-24-BadSuccessor.ll
+++ b/llvm/test/Transforms/ADCE/2003-06-24-BadSuccessor.ll
@@ -49,7 +49,7 @@ shortcirc_next.4: ; preds = %then.44
no_exit.2: ; preds = %shortcirc_next.4
%tmp.897 = getelementptr i32, i32* %SubArrays.10, i64 0 ; <i32*> [#uses=1]
- %tmp.899 = load i32* %tmp.897 ; <i32> [#uses=1]
+ %tmp.899 = load i32, i32* %tmp.897 ; <i32> [#uses=1]
store i32 %tmp.899, i32* null
ret i32 0
@@ -79,7 +79,7 @@ shortcirc_next.8: ; preds = %shortcirc_next.7
then.53: ; preds = %shortcirc_next.7, %label.17
%SubArrays.8 = phi i32* [ %SubArrays.10, %shortcirc_next.7 ], [ %SubArrays.10, %label.17 ] ; <i32*> [#uses=1]
- %tmp.1023 = load i32* null ; <i32> [#uses=1]
+ %tmp.1023 = load i32, i32* null ; <i32> [#uses=1]
switch i32 %tmp.1023, label %loopentry.1 [
]
diff --git a/llvm/test/Transforms/ADCE/2003-06-24-BasicFunctionality.ll b/llvm/test/Transforms/ADCE/2003-06-24-BasicFunctionality.ll
index 559b652717c..f0de4316436 100644
--- a/llvm/test/Transforms/ADCE/2003-06-24-BasicFunctionality.ll
+++ b/llvm/test/Transforms/ADCE/2003-06-24-BasicFunctionality.ll
@@ -2,7 +2,7 @@
define void @dead_test8(i32* %data.1, i32 %idx.1) {
entry:
- %tmp.1 = load i32* %data.1 ; <i32> [#uses=2]
+ %tmp.1 = load i32, i32* %data.1 ; <i32> [#uses=2]
%tmp.41 = icmp sgt i32 %tmp.1, 0 ; <i1> [#uses=1]
br i1 %tmp.41, label %no_exit.preheader, label %return
@@ -15,7 +15,7 @@ no_exit.preheader: ; preds = %entry
no_exit: ; preds = %endif, %no_exit.preheader
%k.1 = phi i32 [ %k.0, %endif ], [ 0, %no_exit.preheader ] ; <i32> [#uses=3]
%i.0 = phi i32 [ %inc.1, %endif ], [ 0, %no_exit.preheader ] ; <i32> [#uses=1]
- %tmp.12 = load i32* %tmp.11 ; <i32> [#uses=1]
+ %tmp.12 = load i32, i32* %tmp.11 ; <i32> [#uses=1]
%tmp.14 = sub i32 0, %tmp.12 ; <i32> [#uses=1]
%tmp.161 = icmp ne i32 %k.1, %tmp.14 ; <i1> [#uses=1]
br i1 %tmp.161, label %then, label %else
diff --git a/llvm/test/Transforms/ADCE/basictest1.ll b/llvm/test/Transforms/ADCE/basictest1.ll
index 7e2a786e6a7..4d0d386384b 100644
--- a/llvm/test/Transforms/ADCE/basictest1.ll
+++ b/llvm/test/Transforms/ADCE/basictest1.ll
@@ -22,7 +22,7 @@ declare i32 @fwrite(i8*, i32, i32, %FILE*)
declare void @perror(i8*)
define i32 @spec_getc(i32 %fd) {
- %reg109 = load i32* @dbglvl ; <i32> [#uses=1]
+ %reg109 = load i32, i32* @dbglvl ; <i32> [#uses=1]
%cond266 = icmp sle i32 %reg109, 4 ; <i1> [#uses=1]
br i1 %cond266, label %bb3, label %bb2
@@ -46,14 +46,14 @@ bb5: ; preds = %bb3
%reg107-idxcast2 = sext i32 %fd to i64 ; <i64> [#uses=1]
%reg1311 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast2 ; <%spec_fd_t*> [#uses=1]
%idx1 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
- %reg1321 = load i32* %idx1 ; <i32> [#uses=3]
+ %reg1321 = load i32, i32* %idx1 ; <i32> [#uses=3]
%idx2 = getelementptr %spec_fd_t, %spec_fd_t* %reg1311, i64 0, i32 1 ; <i32*> [#uses=1]
- %reg1331 = load i32* %idx2 ; <i32> [#uses=1]
+ %reg1331 = load i32, i32* %idx2 ; <i32> [#uses=1]
%cond270 = icmp slt i32 %reg1321, %reg1331 ; <i1> [#uses=1]
br i1 %cond270, label %bb9, label %bb6
bb6: ; preds = %bb5
- %reg134 = load i32* @dbglvl ; <i32> [#uses=1]
+ %reg134 = load i32, i32* @dbglvl ; <i32> [#uses=1]
%cond271 = icmp sle i32 %reg134, 4 ; <i1> [#uses=1]
br i1 %cond271, label %bb8, label %bb7
@@ -67,15 +67,15 @@ bb8: ; preds = %bb7, %bb6
bb9: ; preds = %bb5
%reg107-idxcast3 = sext i32 %fd to i64 ; <i64> [#uses=1]
%idx3 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast3, i32 3 ; <i8**> [#uses=1]
- %reg1601 = load i8** %idx3 ; <i8*> [#uses=1]
+ %reg1601 = load i8*, i8** %idx3 ; <i8*> [#uses=1]
%reg132-idxcast1 = sext i32 %reg1321 to i64 ; <i64> [#uses=1]
%idx4 = getelementptr i8, i8* %reg1601, i64 %reg132-idxcast1 ; <i8*> [#uses=1]
- %reg1621 = load i8* %idx4 ; <i8> [#uses=2]
+ %reg1621 = load i8, i8* %idx4 ; <i8> [#uses=2]
%cast108 = zext i8 %reg1621 to i64 ; <i64> [#uses=0]
%reg157 = add i32 %reg1321, 1 ; <i32> [#uses=1]
%idx5 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
store i32 %reg157, i32* %idx5
- %reg163 = load i32* @dbglvl ; <i32> [#uses=1]
+ %reg163 = load i32, i32* @dbglvl ; <i32> [#uses=1]
%cond272 = icmp sle i32 %reg163, 4 ; <i1> [#uses=1]
br i1 %cond272, label %bb11, label %bb10
diff --git a/llvm/test/Transforms/ADCE/basictest2.ll b/llvm/test/Transforms/ADCE/basictest2.ll
index a2d5e738751..26b2e85cccb 100644
--- a/llvm/test/Transforms/ADCE/basictest2.ll
+++ b/llvm/test/Transforms/ADCE/basictest2.ll
@@ -22,7 +22,7 @@ declare i32 @fwrite(i8*, i32, i32, %FILE*)
declare void @perror(i8*)
define i32 @spec_getc(i32 %fd) {
- %reg109 = load i32* @dbglvl ; <i32> [#uses=1]
+ %reg109 = load i32, i32* @dbglvl ; <i32> [#uses=1]
%cond266 = icmp sle i32 %reg109, 4 ; <i1> [#uses=1]
br i1 %cond266, label %bb3, label %bb2
@@ -46,14 +46,14 @@ bb5: ; preds = %bb3
%reg107-idxcast2 = sext i32 %fd to i64 ; <i64> [#uses=1]
%reg1311 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast2 ; <%spec_fd_t*> [#uses=1]
%idx1 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
- %reg1321 = load i32* %idx1 ; <i32> [#uses=3]
+ %reg1321 = load i32, i32* %idx1 ; <i32> [#uses=3]
%idx2 = getelementptr %spec_fd_t, %spec_fd_t* %reg1311, i64 0, i32 1 ; <i32*> [#uses=1]
- %reg1331 = load i32* %idx2 ; <i32> [#uses=1]
+ %reg1331 = load i32, i32* %idx2 ; <i32> [#uses=1]
%cond270 = icmp slt i32 %reg1321, %reg1331 ; <i1> [#uses=1]
br i1 %cond270, label %bb9, label %bb6
bb6: ; preds = %bb5
- %reg134 = load i32* @dbglvl ; <i32> [#uses=1]
+ %reg134 = load i32, i32* @dbglvl ; <i32> [#uses=1]
%cond271 = icmp sle i32 %reg134, 4 ; <i1> [#uses=1]
br i1 %cond271, label %bb8, label %bb7
@@ -67,15 +67,15 @@ bb8: ; preds = %bb7, %bb6
bb9: ; preds = %bb5
%reg107-idxcast3 = sext i32 %fd to i64 ; <i64> [#uses=1]
%idx3 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast3, i32 3 ; <i8**> [#uses=1]
- %reg1601 = load i8** %idx3 ; <i8*> [#uses=1]
+ %reg1601 = load i8*, i8** %idx3 ; <i8*> [#uses=1]
%reg132-idxcast1 = sext i32 %reg1321 to i64 ; <i64> [#uses=1]
%idx4 = getelementptr i8, i8* %reg1601, i64 %reg132-idxcast1 ; <i8*> [#uses=1]
- %reg1621 = load i8* %idx4 ; <i8> [#uses=2]
+ %reg1621 = load i8, i8* %idx4 ; <i8> [#uses=2]
%cast108 = zext i8 %reg1621 to i64 ; <i64> [#uses=0]
%reg157 = add i32 %reg1321, 1 ; <i32> [#uses=1]
%idx5 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
store i32 %reg157, i32* %idx5
- %reg163 = load i32* @dbglvl ; <i32> [#uses=1]
+ %reg163 = load i32, i32* @dbglvl ; <i32> [#uses=1]
%cond272 = icmp sle i32 %reg163, 4 ; <i1> [#uses=1]
br i1 %cond272, label %bb11, label %bb10
diff --git a/llvm/test/Transforms/AddDiscriminators/basic.ll b/llvm/test/Transforms/AddDiscriminators/basic.ll
index 7c8b3d3a7ce..464e7e7a13d 100644
--- a/llvm/test/Transforms/AddDiscriminators/basic.ll
+++ b/llvm/test/Transforms/AddDiscriminators/basic.ll
@@ -16,13 +16,13 @@ entry:
%i.addr = alloca i32, align 4
%x = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
- %0 = load i32* %i.addr, align 4, !dbg !10
+ %0 = load i32, i32* %i.addr, align 4, !dbg !10
%cmp = icmp slt i32 %0, 10, !dbg !10
br i1 %cmp, label %if.then, label %if.end, !dbg !10
if.then: ; preds = %entry
- %1 = load i32* %i.addr, align 4, !dbg !10
-; CHECK: %1 = load i32* %i.addr, align 4, !dbg !12
+ %1 = load i32, i32* %i.addr, align 4, !dbg !10
+; CHECK: %1 = load i32, i32* %i.addr, align 4, !dbg !12
store i32 %1, i32* %x, align 4, !dbg !10
; CHECK: store i32 %1, i32* %x, align 4, !dbg !12
diff --git a/llvm/test/Transforms/AddDiscriminators/first-only.ll b/llvm/test/Transforms/AddDiscriminators/first-only.ll
index 153cfc8a44c..f0fff8adff6 100644
--- a/llvm/test/Transforms/AddDiscriminators/first-only.ll
+++ b/llvm/test/Transforms/AddDiscriminators/first-only.ll
@@ -19,16 +19,16 @@ entry:
%x = alloca i32, align 4
%y = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
- %0 = load i32* %i.addr, align 4, !dbg !10
+ %0 = load i32, i32* %i.addr, align 4, !dbg !10
%cmp = icmp slt i32 %0, 10, !dbg !10
br i1 %cmp, label %if.then, label %if.end, !dbg !10
if.then: ; preds = %entry
- %1 = load i32* %i.addr, align 4, !dbg !12
+ %1 = load i32, i32* %i.addr, align 4, !dbg !12
store i32 %1, i32* %x, align 4, !dbg !12
- %2 = load i32* %i.addr, align 4, !dbg !14
-; CHECK: %2 = load i32* %i.addr, align 4, !dbg !15
+ %2 = load i32, i32* %i.addr, align 4, !dbg !14
+; CHECK: %2 = load i32, i32* %i.addr, align 4, !dbg !15
%sub = sub nsw i32 0, %2, !dbg !14
; CHECK: %sub = sub nsw i32 0, %2, !dbg !15
diff --git a/llvm/test/Transforms/AddDiscriminators/multiple.ll b/llvm/test/Transforms/AddDiscriminators/multiple.ll
index 5e552a87bbb..464318806cc 100644
--- a/llvm/test/Transforms/AddDiscriminators/multiple.ll
+++ b/llvm/test/Transforms/AddDiscriminators/multiple.ll
@@ -15,13 +15,13 @@ entry:
%i.addr = alloca i32, align 4
%x = alloca i32, align 4
store i32 %i, i32* %i.addr, align 4
- %0 = load i32* %i.addr, align 4, !dbg !10
+ %0 = load i32, i32* %i.addr, align 4, !dbg !10
%cmp = icmp slt i32 %0, 10, !dbg !10
br i1 %cmp, label %if.then, label %if.else, !dbg !10
if.then: ; preds = %entry
- %1 = load i32* %i.addr, align 4, !dbg !10
-; CHECK: %1 = load i32* %i.addr, align 4, !dbg !12
+ %1 = load i32, i32* %i.addr, align 4, !dbg !10
+; CHECK: %1 = load i32, i32* %i.addr, align 4, !dbg !12
store i32 %1, i32* %x, align 4, !dbg !10
; CHECK: store i32 %1, i32* %x, align 4, !dbg !12
@@ -30,8 +30,8 @@ if.then: ; preds = %entry
; CHECK: br label %if.end, !dbg !12
if.else: ; preds = %entry
- %2 = load i32* %i.addr, align 4, !dbg !10
-; CHECK: %2 = load i32* %i.addr, align 4, !dbg !14
+ %2 = load i32, i32* %i.addr, align 4, !dbg !10
+; CHECK: %2 = load i32, i32* %i.addr, align 4, !dbg !14
%sub = sub nsw i32 0, %2, !dbg !10
; CHECK: %sub = sub nsw i32 0, %2, !dbg !14
diff --git a/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll b/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
index dd7faf0a71e..0010498c153 100644
--- a/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
+++ b/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
@@ -18,8 +18,8 @@ entry:
%i.addr = alloca i64, align 8
store i64 %i, i64* %i.addr, align 8
call void @llvm.dbg.declare(metadata i64* %i.addr, metadata !13, metadata !{}), !dbg !14
- %0 = load i64* %i.addr, align 8, !dbg !15
-; CHECK: %0 = load i64* %i.addr, align 8, !dbg !15
+ %0 = load i64, i64* %i.addr, align 8, !dbg !15
+; CHECK: %0 = load i64, i64* %i.addr, align 8, !dbg !15
%cmp = icmp slt i64 %0, 5, !dbg !15
; CHECK: %cmp = icmp slt i64 %0, 5, !dbg !15
br i1 %cmp, label %if.then, label %if.else, !dbg !15
@@ -34,7 +34,7 @@ if.else: ; preds = %entry
br label %return, !dbg !15
return: ; preds = %if.else, %if.then
- %1 = load i32* %retval, !dbg !17
+ %1 = load i32, i32* %retval, !dbg !17
ret i32 %1, !dbg !17
}
diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll
index 887126ddb3e..851e6dc3ccc 100644
--- a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll
+++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll
@@ -7,11 +7,11 @@ entry:
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
ret i32 %0
; CHECK-LABEL: @foo
-; CHECK: load i32* {{[^,]+}}, align 32
+; CHECK: load i32, i32* {{[^,]+}}, align 32
; CHECK: ret i32
}
@@ -23,11 +23,11 @@ entry:
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds i32, i32* %a, i64 2
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: @foo2
-; CHECK: load i32* {{[^,]+}}, align 16
+; CHECK: load i32, i32* {{[^,]+}}, align 16
; CHECK: ret i32
}
@@ -39,11 +39,11 @@ entry:
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: @foo2a
-; CHECK: load i32* {{[^,]+}}, align 32
+; CHECK: load i32, i32* {{[^,]+}}, align 32
; CHECK: ret i32
}
@@ -53,11 +53,11 @@ entry:
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
ret i32 %0
; CHECK-LABEL: @goo
-; CHECK: load i32* {{[^,]+}}, align 32
+; CHECK: load i32, i32* {{[^,]+}}, align 32
; CHECK: ret i32
}
@@ -73,7 +73,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 8
%1 = trunc i64 %indvars.iv.next to i32
@@ -85,7 +85,7 @@ for.end: ; preds = %for.body
ret i32 %add.lcssa
; CHECK-LABEL: @hoo
-; CHECK: load i32* %arrayidx, align 32
+; CHECK: load i32, i32* %arrayidx, align 32
; CHECK: ret i32 %add.lcssa
}
@@ -101,7 +101,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 4, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 8
%1 = trunc i64 %indvars.iv.next to i32
@@ -113,7 +113,7 @@ for.end: ; preds = %for.body
ret i32 %add.lcssa
; CHECK-LABEL: @joo
-; CHECK: load i32* %arrayidx, align 16
+; CHECK: load i32, i32* %arrayidx, align 16
; CHECK: ret i32 %add.lcssa
}
@@ -129,7 +129,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
%1 = trunc i64 %indvars.iv.next to i32
@@ -141,7 +141,7 @@ for.end: ; preds = %for.body
ret i32 %add.lcssa
; CHECK-LABEL: @koo
-; CHECK: load i32* %arrayidx, align 16
+; CHECK: load i32, i32* %arrayidx, align 16
; CHECK: ret i32 %add.lcssa
}
@@ -157,7 +157,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ -4, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
%1 = trunc i64 %indvars.iv.next to i32
@@ -169,7 +169,7 @@ for.end: ; preds = %for.body
ret i32 %add.lcssa
; CHECK-LABEL: @koo2
-; CHECK: load i32* %arrayidx, align 16
+; CHECK: load i32, i32* %arrayidx, align 16
; CHECK: ret i32 %add.lcssa
}
diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll
index 7a0a6b601f1..2edc2e95f41 100644
--- a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll
+++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll
@@ -7,11 +7,11 @@ entry:
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
ret i32 %0
; CHECK-LABEL: @foo
-; CHECK: load i32* {{[^,]+}}, align 32
+; CHECK: load i32, i32* {{[^,]+}}, align 32
; CHECK: ret i32
}
@@ -23,11 +23,11 @@ entry:
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds i32, i32* %a, i64 2
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: @foo2
-; CHECK: load i32* {{[^,]+}}, align 16
+; CHECK: load i32, i32* {{[^,]+}}, align 16
; CHECK: ret i32
}
@@ -39,11 +39,11 @@ entry:
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: @foo2a
-; CHECK: load i32* {{[^,]+}}, align 32
+; CHECK: load i32, i32* {{[^,]+}}, align 32
; CHECK: ret i32
}
@@ -53,11 +53,11 @@ entry:
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
ret i32 %0
; CHECK-LABEL: @goo
-; CHECK: load i32* {{[^,]+}}, align 32
+; CHECK: load i32, i32* {{[^,]+}}, align 32
; CHECK: ret i32
}
@@ -73,7 +73,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 8
%1 = trunc i64 %indvars.iv.next to i32
@@ -85,7 +85,7 @@ for.end: ; preds = %for.body
ret i32 %add.lcssa
; CHECK-LABEL: @hoo
-; CHECK: load i32* %arrayidx, align 32
+; CHECK: load i32, i32* %arrayidx, align 32
; CHECK: ret i32 %add.lcssa
}
@@ -101,7 +101,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 4, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 8
%1 = trunc i64 %indvars.iv.next to i32
@@ -113,7 +113,7 @@ for.end: ; preds = %for.body
ret i32 %add.lcssa
; CHECK-LABEL: @joo
-; CHECK: load i32* %arrayidx, align 16
+; CHECK: load i32, i32* %arrayidx, align 16
; CHECK: ret i32 %add.lcssa
}
@@ -129,7 +129,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
%1 = trunc i64 %indvars.iv.next to i32
@@ -141,7 +141,7 @@ for.end: ; preds = %for.body
ret i32 %add.lcssa
; CHECK-LABEL: @koo
-; CHECK: load i32* %arrayidx, align 16
+; CHECK: load i32, i32* %arrayidx, align 16
; CHECK: ret i32 %add.lcssa
}
@@ -157,7 +157,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ -4, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
%1 = trunc i64 %indvars.iv.next to i32
@@ -169,7 +169,7 @@ for.end: ; preds = %for.body
ret i32 %add.lcssa
; CHECK-LABEL: @koo2
-; CHECK: load i32* %arrayidx, align 16
+; CHECK: load i32, i32* %arrayidx, align 16
; CHECK: ret i32 %add.lcssa
}
diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/start-unk.ll b/llvm/test/Transforms/AlignmentFromAssumptions/start-unk.ll
index 754deecb707..99533cf6ccb 100644
--- a/llvm/test/Transforms/AlignmentFromAssumptions/start-unk.ll
+++ b/llvm/test/Transforms/AlignmentFromAssumptions/start-unk.ll
@@ -79,7 +79,7 @@ if.then126: ; preds = %if.end123
%maskcond.i.i187 = icmp eq i64 %maskedptr.i.i186, 0
tail call void @llvm.assume(i1 %maskcond.i.i187) #0
%ret.0..sroa_cast.i.i188 = bitcast %type1* undef to i32*
- %ret.0.copyload.i.i189 = load i32* %ret.0..sroa_cast.i.i188, align 2
+ %ret.0.copyload.i.i189 = load i32, i32* %ret.0..sroa_cast.i.i188, align 2
; CHECK: load {{.*}} align 2
@@ -117,7 +117,7 @@ for.body137.if.end146_crit_edge: ; preds = %for.body137
br i1 undef, label %cond.false.i70, label %cond.end.i
if.then140: ; preds = %for.body137
- %ret.0.copyload.i.i102 = load i32* %ret.0..sroa_cast.i.i106, align 2
+ %ret.0.copyload.i.i102 = load i32, i32* %ret.0..sroa_cast.i.i106, align 2
; CHECK: load {{.*}} align 2
diff --git a/llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll b/llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll
index 1226b98a998..c988774da8a 100644
--- a/llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll
@@ -3,7 +3,7 @@
; CHECK: define internal i32 @deref(i32 %x.val) #0 {
define internal i32 @deref(i32* %x) nounwind {
entry:
- %tmp2 = load i32* %x, align 4
+ %tmp2 = load i32, i32* %x, align 4
ret i32 %tmp2
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll b/llvm/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll
index 08a1b805cf6..267a6c04597 100644
--- a/llvm/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll
@@ -7,14 +7,14 @@
define internal i32 @callee(i1 %C, i32* %A) {
entry:
; Unconditonally load the element at %A
- %A.0 = load i32* %A
+ %A.0 = load i32, i32* %A
br i1 %C, label %T, label %F
T:
ret i32 %A.0
F:
; Load the element at offset two from %A. This should not be promoted!
%A.2 = getelementptr i32, i32* %A, i32 2
- %R = load i32* %A.2
+ %R = load i32, i32* %A.2
ret i32 %R
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll b/llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll
index 23ea2e0cf60..3f521bace7f 100644
--- a/llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll
@@ -11,8 +11,8 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
define internal i32 @test(%QuadTy* %P) {
%A = getelementptr %QuadTy, %QuadTy* %P, i64 0, i32 3 ; <i32*> [#uses=1]
%B = getelementptr %QuadTy, %QuadTy* %P, i64 0, i32 2 ; <i32*> [#uses=1]
- %a = load i32* %A ; <i32> [#uses=1]
- %b = load i32* %B ; <i32> [#uses=1]
+ %a = load i32, i32* %A ; <i32> [#uses=1]
+ %b = load i32, i32* %B ; <i32> [#uses=1]
%V = add i32 %a, %b ; <i32> [#uses=1]
ret i32 %V
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/attrs.ll b/llvm/test/Transforms/ArgumentPromotion/attrs.ll
index 6213645bae1..46128f93c24 100644
--- a/llvm/test/Transforms/ArgumentPromotion/attrs.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/attrs.ll
@@ -5,7 +5,7 @@
define internal void @f(%struct.ss* byval %b, i32* byval %X, i32 %i) nounwind {
entry:
%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0
- %tmp1 = load i32* %tmp, align 4
+ %tmp1 = load i32, i32* %tmp, align 4
%tmp2 = add i32 %tmp1, 1
store i32 %tmp2, i32* %tmp, align 4
diff --git a/llvm/test/Transforms/ArgumentPromotion/basictest.ll b/llvm/test/Transforms/ArgumentPromotion/basictest.ll
index 8f78b98437b..89888bbc98a 100644
--- a/llvm/test/Transforms/ArgumentPromotion/basictest.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/basictest.ll
@@ -3,8 +3,8 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
define internal i32 @test(i32* %X, i32* %Y) {
; CHECK-LABEL: define internal i32 @test(i32 %X.val, i32 %Y.val)
- %A = load i32* %X
- %B = load i32* %Y
+ %A = load i32, i32* %X
+ %B = load i32, i32* %Y
%C = add i32 %A, %B
ret i32 %C
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/byval-2.ll b/llvm/test/Transforms/ArgumentPromotion/byval-2.ll
index 17845b8eda6..6c0288f5f98 100644
--- a/llvm/test/Transforms/ArgumentPromotion/byval-2.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/byval-2.ll
@@ -9,7 +9,7 @@ define internal void @f(%struct.ss* byval %b, i32* byval %X) nounwind {
; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1, i32* byval %X)
entry:
%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0
- %tmp1 = load i32* %tmp, align 4
+ %tmp1 = load i32, i32* %tmp, align 4
%tmp2 = add i32 %tmp1, 1
store i32 %tmp2, i32* %tmp, align 4
diff --git a/llvm/test/Transforms/ArgumentPromotion/byval.ll b/llvm/test/Transforms/ArgumentPromotion/byval.ll
index f99050b2d7f..b091b09a359 100644
--- a/llvm/test/Transforms/ArgumentPromotion/byval.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/byval.ll
@@ -8,7 +8,7 @@ define internal void @f(%struct.ss* byval %b) nounwind {
; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1)
entry:
%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
- %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
%tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
store i32 %tmp2, i32* %tmp, align 4
ret void
diff --git a/llvm/test/Transforms/ArgumentPromotion/chained.ll b/llvm/test/Transforms/ArgumentPromotion/chained.ll
index c9a453899d7..6ba2e8d4869 100644
--- a/llvm/test/Transforms/ArgumentPromotion/chained.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/chained.ll
@@ -5,8 +5,8 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
@G2 = constant i32* @G1 ; <i32**> [#uses=1]
define internal i32 @test(i32** %X) {
- %Y = load i32** %X ; <i32*> [#uses=1]
- %X.upgrd.1 = load i32* %Y ; <i32> [#uses=1]
+ %Y = load i32*, i32** %X ; <i32*> [#uses=1]
+ %X.upgrd.1 = load i32, i32* %Y ; <i32> [#uses=1]
ret i32 %X.upgrd.1
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/control-flow.ll b/llvm/test/Transforms/ArgumentPromotion/control-flow.ll
index e4a61da45cf..cdff36eb83c 100644
--- a/llvm/test/Transforms/ArgumentPromotion/control-flow.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/control-flow.ll
@@ -8,7 +8,7 @@ T: ; preds = %0
ret i32 17
F: ; preds = %0
- %X = load i32* %P ; <i32> [#uses=1]
+ %X = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %X
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/control-flow2.ll b/llvm/test/Transforms/ArgumentPromotion/control-flow2.ll
index db63584a610..7413f46a860 100644
--- a/llvm/test/Transforms/ArgumentPromotion/control-flow2.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/control-flow2.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s -argpromotion -S | FileCheck %s
-; CHECK: load i32* %A
+; CHECK: load i32, i32* %A
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
define internal i32 @callee(i1 %C, i32* %P) {
@@ -10,7 +10,7 @@ T: ; preds = %0
ret i32 17
F: ; preds = %0
- %X = load i32* %P ; <i32> [#uses=1]
+ %X = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %X
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/crash.ll b/llvm/test/Transforms/ArgumentPromotion/crash.ll
index 353d318dd70..dbd343ae920 100644
--- a/llvm/test/Transforms/ArgumentPromotion/crash.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/crash.ll
@@ -51,9 +51,9 @@ init:
define internal i32 @"clay_assign(Chain, Chain)"(%0* %c, %0* %d) {
init:
%0 = getelementptr %0, %0* %d, i32 0, i32 0
- %1 = load %0** %0
+ %1 = load %0*, %0** %0
%2 = getelementptr %0, %0* %c, i32 0, i32 0
- %3 = load %0** %2
+ %3 = load %0*, %0** %2
%4 = call i32 @"clay_assign(Chain, Chain)"(%0* %3, %0* %1)
ret i32 0
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/dbg.ll b/llvm/test/Transforms/ArgumentPromotion/dbg.ll
index 65cf3678bd9..79d4b167ab8 100644
--- a/llvm/test/Transforms/ArgumentPromotion/dbg.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/dbg.ll
@@ -5,8 +5,8 @@
declare void @sink(i32)
define internal void @test(i32** %X) {
- %1 = load i32** %X, align 8
- %2 = load i32* %1, align 8
+ %1 = load i32*, i32** %X, align 8
+ %2 = load i32, i32* %1, align 8
call void @sink(i32 %2)
ret void
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/fp80.ll b/llvm/test/Transforms/ArgumentPromotion/fp80.ll
index e08c3f78238..84ef603de82 100644
--- a/llvm/test/Transforms/ArgumentPromotion/fp80.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/fp80.ll
@@ -25,21 +25,21 @@ define internal i8 @UseLongDoubleUnsafely(%union.u* byval align 16 %arg) {
entry:
%bitcast = bitcast %union.u* %arg to %struct.s*
%gep = getelementptr inbounds %struct.s, %struct.s* %bitcast, i64 0, i32 2
- %result = load i8* %gep
+ %result = load i8, i8* %gep
ret i8 %result
}
; CHECK: internal x86_fp80 @UseLongDoubleSafely(x86_fp80 {{%.*}}) {
define internal x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 %arg) {
%gep = getelementptr inbounds %union.u, %union.u* %arg, i64 0, i32 0
- %fp80 = load x86_fp80* %gep
+ %fp80 = load x86_fp80, x86_fp80* %gep
ret x86_fp80 %fp80
}
; CHECK: define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval %a) {
define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval %a) {
%p = bitcast %struct.Foo* %a to i64*
- %v = load i64* %p
+ %v = load i64, i64* %p
ret i64 %v
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/inalloca.ll b/llvm/test/Transforms/ArgumentPromotion/inalloca.ll
index 5160994eefc..80bd6fdbc40 100644
--- a/llvm/test/Transforms/ArgumentPromotion/inalloca.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/inalloca.ll
@@ -9,8 +9,8 @@ define internal i32 @f(%struct.ss* inalloca %s) {
entry:
%f0 = getelementptr %struct.ss, %struct.ss* %s, i32 0, i32 0
%f1 = getelementptr %struct.ss, %struct.ss* %s, i32 0, i32 1
- %a = load i32* %f0, align 4
- %b = load i32* %f1, align 4
+ %a = load i32, i32* %f0, align 4
+ %b = load i32, i32* %f1, align 4
%r = add i32 %a, %b
ret i32 %r
}
diff --git a/llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll b/llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll
index db9d70d337a..3c8ed79eeb2 100644
--- a/llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll
+++ b/llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll
@@ -14,9 +14,9 @@
define internal fastcc void @fn(i32* nocapture readonly %p1, i64* nocapture readonly %p2) {
entry:
- %0 = load i64* %p2, align 8, !tbaa !1
+ %0 = load i64, i64* %p2, align 8, !tbaa !1
%conv = trunc i64 %0 to i32
- %1 = load i32* %p1, align 4, !tbaa !5
+ %1 = load i32, i32* %p1, align 4, !tbaa !5
%conv1 = trunc i32 %1 to i8
store i8 %conv1, i8* @d, align 1, !tbaa !7
ret void
@@ -26,11 +26,11 @@ define i32 @main() {
entry:
; CHECK-LABEL: main
; CHECK: store i32 1, i32* %{{.*}}, align 4, !tbaa ![[I32:[0-9]+]]
-; CHECK: %g.val = load i32* @g, align 4, !tbaa ![[I32]]
-; CHECK: %c.val = load i64* @c, align 8, !tbaa ![[LONG:[0-9]+]]
- %0 = load i32*** @e, align 8, !tbaa !8
+; CHECK: %g.val = load i32, i32* @g, align 4, !tbaa ![[I32]]
+; CHECK: %c.val = load i64, i64* @c, align 8, !tbaa ![[LONG:[0-9]+]]
+ %0 = load i32**, i32*** @e, align 8, !tbaa !8
store i32* @g, i32** %0, align 8, !tbaa !8
- %1 = load i32** @a, align 8, !tbaa !8
+ %1 = load i32*, i32** @a, align 8, !tbaa !8
store i32 1, i32* %1, align 4, !tbaa !5
call fastcc void @fn(i32* @g, i64* @c)
diff --git a/llvm/test/Transforms/BBVectorize/X86/loop1.ll b/llvm/test/Transforms/BBVectorize/X86/loop1.ll
index 34dc19f3a7e..c3c30454ce3 100644
--- a/llvm/test/Transforms/BBVectorize/X86/loop1.ll
+++ b/llvm/test/Transforms/BBVectorize/X86/loop1.ll
@@ -13,9 +13,9 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%mul = fmul double %0, %0
%mul3 = fmul double %0, %1
%add = fadd double %mul, %mul3
diff --git a/llvm/test/Transforms/BBVectorize/X86/sh-rec2.ll b/llvm/test/Transforms/BBVectorize/X86/sh-rec2.ll
index 29b33a5cc3b..d7a004c2138 100644
--- a/llvm/test/Transforms/BBVectorize/X86/sh-rec2.ll
+++ b/llvm/test/Transforms/BBVectorize/X86/sh-rec2.ll
@@ -13,13 +13,13 @@ entry:
%incdec.ptr157 = getelementptr inbounds i8, i8* %c, i64 11
store i8 0, i8* %incdec.ptr136, align 1
%arrayidx162 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 11
- %0 = load i16* %arrayidx162, align 2
+ %0 = load i16, i16* %arrayidx162, align 2
%conv1631 = trunc i16 %0 to i8
%and164 = shl i8 %conv1631, 3
%shl165 = and i8 %and164, 56
%incdec.ptr172 = getelementptr inbounds i8, i8* %c, i64 12
store i8 %shl165, i8* %incdec.ptr157, align 1
- %1 = load i16* inttoptr (i64 2 to i16*), align 2
+ %1 = load i16, i16* inttoptr (i64 2 to i16*), align 2
%conv1742 = trunc i16 %1 to i8
%and175 = shl i8 %conv1742, 1
%incdec.ptr183 = getelementptr inbounds i8, i8* %c, i64 13
@@ -29,13 +29,13 @@ entry:
%arrayidx214 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 15
%incdec.ptr220 = getelementptr inbounds i8, i8* %c, i64 15
store i8 0, i8* %incdec.ptr199, align 1
- %2 = load i16* %arrayidx214, align 2
+ %2 = load i16, i16* %arrayidx214, align 2
%conv2223 = trunc i16 %2 to i8
%and223 = shl i8 %conv2223, 6
%incdec.ptr235 = getelementptr inbounds i8, i8* %c, i64 16
store i8 %and223, i8* %incdec.ptr220, align 1
%arrayidx240 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 19
- %3 = load i16* %arrayidx240, align 2
+ %3 = load i16, i16* %arrayidx240, align 2
%conv2414 = trunc i16 %3 to i8
%and242 = shl i8 %conv2414, 2
%shl243 = and i8 %and242, 28
@@ -44,7 +44,7 @@ entry:
%incdec.ptr272 = getelementptr inbounds i8, i8* %c, i64 18
store i8 0, i8* %incdec.ptr251, align 1
%arrayidx282 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 25
- %4 = load i16* %arrayidx282, align 2
+ %4 = load i16, i16* %arrayidx282, align 2
%conv2835 = trunc i16 %4 to i8
%and284 = and i8 %conv2835, 7
%incdec.ptr287 = getelementptr inbounds i8, i8* %c, i64 19
@@ -54,14 +54,14 @@ entry:
%incdec.ptr314 = getelementptr inbounds i8, i8* %c, i64 21
store i8 0, i8* %incdec.ptr298, align 1
%arrayidx319 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 26
- %5 = load i16* %arrayidx319, align 4
+ %5 = load i16, i16* %arrayidx319, align 4
%conv3206 = trunc i16 %5 to i8
%and321 = shl i8 %conv3206, 4
%shl322 = and i8 %and321, 112
%incdec.ptr335 = getelementptr inbounds i8, i8* %c, i64 22
store i8 %shl322, i8* %incdec.ptr314, align 1
%arrayidx340 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 29
- %6 = load i16* %arrayidx340, align 2
+ %6 = load i16, i16* %arrayidx340, align 2
%conv3417 = trunc i16 %6 to i8
%and342 = shl i8 %conv3417, 3
%shl343 = and i8 %and342, 56
@@ -72,7 +72,7 @@ entry:
%arrayidx381 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 36
%incdec.ptr387 = getelementptr inbounds i8, i8* %c, i64 25
store i8 0, i8* %incdec.ptr366, align 1
- %7 = load i16* %arrayidx381, align 8
+ %7 = load i16, i16* %arrayidx381, align 8
%conv3898 = trunc i16 %7 to i8
%and390 = shl i8 %conv3898, 6
store i8 %and390, i8* %incdec.ptr387, align 1
diff --git a/llvm/test/Transforms/BBVectorize/X86/sh-rec3.ll b/llvm/test/Transforms/BBVectorize/X86/sh-rec3.ll
index 61df3369414..2096deb08a9 100644
--- a/llvm/test/Transforms/BBVectorize/X86/sh-rec3.ll
+++ b/llvm/test/Transforms/BBVectorize/X86/sh-rec3.ll
@@ -19,7 +19,7 @@ entry:
%arraydecay3 = getelementptr inbounds [4 x i16], [4 x i16]* %Mc, i64 0, i64 0
%arraydecay5 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 0
call void @Gsm_Coder(%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565* %s, i16* %source, i16* %arraydecay, i16* %arraydecay1, i16* %arraydecay2, i16* %arraydecay3, i16* undef, i16* %arraydecay5) nounwind
- %0 = load i64* %LARc28.sub, align 16
+ %0 = load i64, i64* %LARc28.sub, align 16
%1 = trunc i64 %0 to i32
%conv1 = lshr i32 %1, 2
%and = and i32 %conv1, 15
@@ -39,21 +39,21 @@ entry:
%incdec.ptr42 = getelementptr inbounds i8, i8* %c, i64 4
store i8 0, i8* %incdec.ptr26, align 1
%arrayidx52 = getelementptr inbounds [8 x i16], [8 x i16]* %tmpcast, i64 0, i64 7
- %3 = load i16* %arrayidx52, align 2
+ %3 = load i16, i16* %arrayidx52, align 2
%conv537 = trunc i16 %3 to i8
%and54 = and i8 %conv537, 7
%incdec.ptr57 = getelementptr inbounds i8, i8* %c, i64 5
store i8 %and54, i8* %incdec.ptr42, align 1
%incdec.ptr68 = getelementptr inbounds i8, i8* %c, i64 6
store i8 0, i8* %incdec.ptr57, align 1
- %4 = load i16* %arraydecay3, align 2
+ %4 = load i16, i16* %arraydecay3, align 2
%conv748 = trunc i16 %4 to i8
%and75 = shl i8 %conv748, 5
%shl76 = and i8 %and75, 96
%incdec.ptr84 = getelementptr inbounds i8, i8* %c, i64 7
store i8 %shl76, i8* %incdec.ptr68, align 1
%arrayidx94 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 1
- %5 = load i16* %arrayidx94, align 2
+ %5 = load i16, i16* %arrayidx94, align 2
%conv959 = trunc i16 %5 to i8
%and96 = shl i8 %conv959, 1
%shl97 = and i8 %and96, 14
@@ -62,7 +62,7 @@ entry:
store i8 %or103, i8* %incdec.ptr84, align 1
%arrayidx115 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 4
%6 = bitcast i16* %arrayidx115 to i32*
- %7 = load i32* %6, align 8
+ %7 = load i32, i32* %6, align 8
%conv11610 = trunc i32 %7 to i8
%and117 = and i8 %conv11610, 7
%incdec.ptr120 = getelementptr inbounds i8, i8* %c, i64 9
@@ -77,11 +77,11 @@ entry:
%incdec.ptr172 = getelementptr inbounds i8, i8* %c, i64 12
store i8 0, i8* %incdec.ptr157, align 1
%arrayidx173 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 1
- %9 = load i16* %arrayidx173, align 2
+ %9 = load i16, i16* %arrayidx173, align 2
%conv17412 = zext i16 %9 to i32
%and175 = shl nuw nsw i32 %conv17412, 1
%arrayidx177 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 1
- %10 = load i16* %arrayidx177, align 2
+ %10 = load i16, i16* %arrayidx177, align 2
%conv17826 = zext i16 %10 to i32
%shr17913 = lshr i32 %conv17826, 1
%and180 = and i32 %shr17913, 1
@@ -90,14 +90,14 @@ entry:
%incdec.ptr183 = getelementptr inbounds i8, i8* %c, i64 13
store i8 %conv182, i8* %incdec.ptr172, align 1
%arrayidx188 = getelementptr inbounds [4 x i16], [4 x i16]* %Mc, i64 0, i64 1
- %11 = load i16* %arrayidx188, align 2
+ %11 = load i16, i16* %arrayidx188, align 2
%conv18914 = trunc i16 %11 to i8
%and190 = shl i8 %conv18914, 5
%shl191 = and i8 %and190, 96
%incdec.ptr199 = getelementptr inbounds i8, i8* %c, i64 14
store i8 %shl191, i8* %incdec.ptr183, align 1
%arrayidx209 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 14
- %12 = load i16* %arrayidx209, align 4
+ %12 = load i16, i16* %arrayidx209, align 4
%conv21015 = trunc i16 %12 to i8
%and211 = shl i8 %conv21015, 1
%shl212 = and i8 %and211, 14
@@ -106,7 +106,7 @@ entry:
store i8 %or218, i8* %incdec.ptr199, align 1
%arrayidx225 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 16
%13 = bitcast i16* %arrayidx225 to i64*
- %14 = load i64* %13, align 16
+ %14 = load i64, i64* %13, align 16
%conv22616 = trunc i64 %14 to i8
%and227 = shl i8 %conv22616, 3
%shl228 = and i8 %and227, 56
@@ -120,17 +120,17 @@ entry:
%arrayidx266 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 23
%incdec.ptr272 = getelementptr inbounds i8, i8* %c, i64 18
store i8 0, i8* %incdec.ptr251, align 1
- %16 = load i16* %arrayidx266, align 2
+ %16 = load i16, i16* %arrayidx266, align 2
%conv27418 = trunc i16 %16 to i8
%and275 = shl i8 %conv27418, 6
%incdec.ptr287 = getelementptr inbounds i8, i8* %c, i64 19
store i8 %and275, i8* %incdec.ptr272, align 1
%arrayidx288 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 2
- %17 = load i16* %arrayidx288, align 2
+ %17 = load i16, i16* %arrayidx288, align 2
%conv28919 = zext i16 %17 to i32
%and290 = shl nuw nsw i32 %conv28919, 1
%arrayidx292 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 2
- %18 = load i16* %arrayidx292, align 2
+ %18 = load i16, i16* %arrayidx292, align 2
%conv29327 = zext i16 %18 to i32
%shr29420 = lshr i32 %conv29327, 1
%and295 = and i32 %shr29420, 1
@@ -145,7 +145,7 @@ entry:
%incdec.ptr335 = getelementptr inbounds i8, i8* %c, i64 22
store i8 0, i8* %incdec.ptr314, align 1
%arrayidx340 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 29
- %19 = load i16* %arrayidx340, align 2
+ %19 = load i16, i16* %arrayidx340, align 2
%conv34122 = trunc i16 %19 to i8
%and342 = shl i8 %conv34122, 3
%shl343 = and i8 %and342, 56
@@ -153,7 +153,7 @@ entry:
store i8 %shl343, i8* %incdec.ptr335, align 1
%arrayidx355 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 32
%20 = bitcast i16* %arrayidx355 to i32*
- %21 = load i32* %20, align 16
+ %21 = load i32, i32* %20, align 16
%conv35623 = shl i32 %21, 2
%shl358 = and i32 %conv35623, 28
%22 = lshr i32 %21, 17
diff --git a/llvm/test/Transforms/BBVectorize/X86/simple-ldstr.ll b/llvm/test/Transforms/BBVectorize/X86/simple-ldstr.ll
index 214ab9ff62f..2c05f30d081 100644
--- a/llvm/test/Transforms/BBVectorize/X86/simple-ldstr.ll
+++ b/llvm/test/Transforms/BBVectorize/X86/simple-ldstr.ll
@@ -4,13 +4,13 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Simple 3-pair chain with loads and stores
define void @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
@@ -19,8 +19,8 @@ entry:
; CHECK-LABEL: @test1(
; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i0 = load <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>* %i1.v.i0, align 8
+; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
+; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
; CHECK: %mul = fmul <2 x double> %i0, %i1
; CHECK: %0 = bitcast double* %c to <2 x double>*
; CHECK: store <2 x double> %mul, <2 x double>* %0, align 8
diff --git a/llvm/test/Transforms/BBVectorize/X86/wr-aliases.ll b/llvm/test/Transforms/BBVectorize/X86/wr-aliases.ll
index 57f8c928f60..56448c0e547 100644
--- a/llvm/test/Transforms/BBVectorize/X86/wr-aliases.ll
+++ b/llvm/test/Transforms/BBVectorize/X86/wr-aliases.ll
@@ -27,7 +27,7 @@ arrayctor.cont.ret.exitStub: ; preds = %arrayctor.cont
; CHECK: <2 x double>
; CHECK: @_ZL12printQBezier7QBezier
; CHECK: store double %mul8.i, double* %x3.i, align 16
-; CHECK: load double* %x3.i, align 16
+; CHECK: load double, double* %x3.i, align 16
; CHECK: ret
arrayctor.cont: ; preds = %newFuncRoot
@@ -58,72 +58,72 @@ arrayctor.cont: ; preds = %newFuncRoot
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v0, i8* %v3, i64 64, i32 8, i1 false)
call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp.i)
%x2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2
- %v4 = load double* %x2.i, align 16
+ %v4 = load double, double* %x2.i, align 16
%x3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4
- %v5 = load double* %x3.i, align 16
+ %v5 = load double, double* %x3.i, align 16
%add.i = fadd double %v4, %v5
%mul.i = fmul double 5.000000e-01, %add.i
%x1.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0
- %v6 = load double* %x1.i, align 16
+ %v6 = load double, double* %x1.i, align 16
%add3.i = fadd double %v4, %v6
%mul4.i = fmul double 5.000000e-01, %add3.i
%x25.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 2
store double %mul4.i, double* %x25.i, align 16
- %v7 = load double* %x3.i, align 16
+ %v7 = load double, double* %x3.i, align 16
%x4.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6
- %v8 = load double* %x4.i, align 16
+ %v8 = load double, double* %x4.i, align 16
%add7.i = fadd double %v7, %v8
%mul8.i = fmul double 5.000000e-01, %add7.i
store double %mul8.i, double* %x3.i, align 16
- %v9 = load double* %x1.i, align 16
+ %v9 = load double, double* %x1.i, align 16
%x111.i = getelementptr inbounds %class.QBezier.15, %class.QBezier.15* %add.ptr, i64 0, i32 0
store double %v9, double* %x111.i, align 16
- %v10 = load double* %x25.i, align 16
+ %v10 = load double, double* %x25.i, align 16
%add15.i = fadd double %mul.i, %v10
%mul16.i = fmul double 5.000000e-01, %add15.i
%x317.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 4
store double %mul16.i, double* %x317.i, align 16
- %v11 = load double* %x3.i, align 16
+ %v11 = load double, double* %x3.i, align 16
%add19.i = fadd double %mul.i, %v11
%mul20.i = fmul double 5.000000e-01, %add19.i
store double %mul20.i, double* %x2.i, align 16
- %v12 = load double* %x317.i, align 16
+ %v12 = load double, double* %x317.i, align 16
%add24.i = fadd double %v12, %mul20.i
%mul25.i = fmul double 5.000000e-01, %add24.i
store double %mul25.i, double* %x1.i, align 16
%x427.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 6
store double %mul25.i, double* %x427.i, align 16
%y2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3
- %v13 = load double* %y2.i, align 8
+ %v13 = load double, double* %y2.i, align 8
%y3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5
- %v14 = load double* %y3.i, align 8
+ %v14 = load double, double* %y3.i, align 8
%add28.i = fadd double %v13, %v14
%div.i = fmul double 5.000000e-01, %add28.i
%y1.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1
- %v15 = load double* %y1.i, align 8
+ %v15 = load double, double* %y1.i, align 8
%add30.i = fadd double %v13, %v15
%mul31.i = fmul double 5.000000e-01, %add30.i
%y232.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 3
store double %mul31.i, double* %y232.i, align 8
- %v16 = load double* %y3.i, align 8
+ %v16 = load double, double* %y3.i, align 8
%y4.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7
- %v17 = load double* %y4.i, align 8
+ %v17 = load double, double* %y4.i, align 8
%add34.i = fadd double %v16, %v17
%mul35.i = fmul double 5.000000e-01, %add34.i
store double %mul35.i, double* %y3.i, align 8
- %v18 = load double* %y1.i, align 8
+ %v18 = load double, double* %y1.i, align 8
%y138.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 1
store double %v18, double* %y138.i, align 8
- %v19 = load double* %y232.i, align 8
+ %v19 = load double, double* %y232.i, align 8
%add42.i = fadd double %div.i, %v19
%mul43.i = fmul double 5.000000e-01, %add42.i
%y344.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 5
store double %mul43.i, double* %y344.i, align 8
- %v20 = load double* %y3.i, align 8
+ %v20 = load double, double* %y3.i, align 8
%add46.i = fadd double %div.i, %v20
%mul47.i = fmul double 5.000000e-01, %add46.i
store double %mul47.i, double* %y2.i, align 8
- %v21 = load double* %y344.i, align 8
+ %v21 = load double, double* %y344.i, align 8
%add51.i = fadd double %v21, %mul47.i
%mul52.i = fmul double 5.000000e-01, %add51.i
store double %mul52.i, double* %y1.i, align 8
diff --git a/llvm/test/Transforms/BBVectorize/func-alias.ll b/llvm/test/Transforms/BBVectorize/func-alias.ll
index cb1ea95a0b3..408edcab077 100644
--- a/llvm/test/Transforms/BBVectorize/func-alias.ll
+++ b/llvm/test/Transforms/BBVectorize/func-alias.ll
@@ -60,82 +60,82 @@ codeRepl80.exitStub: ; preds = %"<bb 34>"
%tmp134 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
%tmp135 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp134, i32 0, i32 0
store i32 4096, i32* %tmp135, align 4
- %iounit.8748_288 = load i32* @__main1_MOD_iounit, align 4
+ %iounit.8748_288 = load i32, i32* @__main1_MOD_iounit, align 4
%tmp136 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
%tmp137 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp136, i32 0, i32 1
store i32 %iounit.8748_288, i32* %tmp137, align 4
call void @_gfortran_st_write(%struct.__st_parameter_dt* %memtmp3) nounwind
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* @j.4580, i32 4) nounwind
; CHECK: @_gfortran_transfer_integer_write
- %D.75807_289 = load i8** getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 0), align 8
- %j.8758_290 = load i32* @j.4580, align 4
+ %D.75807_289 = load i8*, i8** getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 0), align 8
+ %j.8758_290 = load i32, i32* @j.4580, align 4
%D.75760_291 = sext i32 %j.8758_290 to i64
- %iave.8736_292 = load i32* @__main1_MOD_iave, align 4
+ %iave.8736_292 = load i32, i32* @__main1_MOD_iave, align 4
%D.75620_293 = sext i32 %iave.8736_292 to i64
- %D.75808_294 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 2, i32 0), align 8
+ %D.75808_294 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 2, i32 0), align 8
%D.75809_295 = mul nsw i64 %D.75620_293, %D.75808_294
- %igrp.8737_296 = load i32* @__main1_MOD_igrp, align 4
+ %igrp.8737_296 = load i32, i32* @__main1_MOD_igrp, align 4
%D.75635_297 = sext i32 %igrp.8737_296 to i64
- %D.75810_298 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 1, i32 0), align 8
+ %D.75810_298 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 1, i32 0), align 8
%D.75811_299 = mul nsw i64 %D.75635_297, %D.75810_298
%D.75812_300 = add nsw i64 %D.75809_295, %D.75811_299
%D.75813_301 = add nsw i64 %D.75760_291, %D.75812_300
- %ityp.8750_302 = load i32* @__main1_MOD_ityp, align 4
+ %ityp.8750_302 = load i32, i32* @__main1_MOD_ityp, align 4
%D.75704_303 = sext i32 %ityp.8750_302 to i64
- %D.75814_304 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 3, i32 0), align 8
+ %D.75814_304 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 3, i32 0), align 8
%D.75815_305 = mul nsw i64 %D.75704_303, %D.75814_304
%D.75816_306 = add nsw i64 %D.75813_301, %D.75815_305
- %D.75817_307 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 1), align 8
+ %D.75817_307 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 1), align 8
%D.75818_308 = add nsw i64 %D.75816_306, %D.75817_307
%tmp138 = bitcast i8* %D.75807_289 to [0 x float]*
%tmp139 = bitcast [0 x float]* %tmp138 to float*
%D.75819_309 = getelementptr inbounds float, float* %tmp139, i64 %D.75818_308
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* %D.75819_309, i32 4) nounwind
; CHECK: @_gfortran_transfer_real_write
- %D.75820_310 = load i8** getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 0), align 8
- %j.8758_311 = load i32* @j.4580, align 4
+ %D.75820_310 = load i8*, i8** getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 0), align 8
+ %j.8758_311 = load i32, i32* @j.4580, align 4
%D.75760_312 = sext i32 %j.8758_311 to i64
- %iave.8736_313 = load i32* @__main1_MOD_iave, align 4
+ %iave.8736_313 = load i32, i32* @__main1_MOD_iave, align 4
%D.75620_314 = sext i32 %iave.8736_313 to i64
- %D.75821_315 = load i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 2, i32 0), align 8
+ %D.75821_315 = load i64, i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 2, i32 0), align 8
%D.75822_316 = mul nsw i64 %D.75620_314, %D.75821_315
- %igrp.8737_317 = load i32* @__main1_MOD_igrp, align 4
+ %igrp.8737_317 = load i32, i32* @__main1_MOD_igrp, align 4
%D.75635_318 = sext i32 %igrp.8737_317 to i64
- %D.75823_319 = load i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 1, i32 0), align 8
+ %D.75823_319 = load i64, i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 1, i32 0), align 8
%D.75824_320 = mul nsw i64 %D.75635_318, %D.75823_319
%D.75825_321 = add nsw i64 %D.75822_316, %D.75824_320
%D.75826_322 = add nsw i64 %D.75760_312, %D.75825_321
- %ityp.8750_323 = load i32* @__main1_MOD_ityp, align 4
+ %ityp.8750_323 = load i32, i32* @__main1_MOD_ityp, align 4
%D.75704_324 = sext i32 %ityp.8750_323 to i64
- %D.75827_325 = load i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 3, i32 0), align 8
+ %D.75827_325 = load i64, i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 3, i32 0), align 8
%D.75828_326 = mul nsw i64 %D.75704_324, %D.75827_325
%D.75829_327 = add nsw i64 %D.75826_322, %D.75828_326
- %D.75830_328 = load i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 1), align 8
+ %D.75830_328 = load i64, i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 1), align 8
%D.75831_329 = add nsw i64 %D.75829_327, %D.75830_328
%tmp140 = bitcast i8* %D.75820_310 to [0 x [1 x i8]]*
%tmp141 = bitcast [0 x [1 x i8]]* %tmp140 to [1 x i8]*
%D.75832_330 = getelementptr inbounds [1 x i8], [1 x i8]* %tmp141, i64 %D.75831_329
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_character_write to void (%struct.__st_parameter_dt*, [1 x i8]*, i32)*)(%struct.__st_parameter_dt* %memtmp3, [1 x i8]* %D.75832_330, i32 1) nounwind
; CHECK: @_gfortran_transfer_character_write
- %D.75833_331 = load i8** getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 0), align 8
- %j.8758_332 = load i32* @j.4580, align 4
+ %D.75833_331 = load i8*, i8** getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 0), align 8
+ %j.8758_332 = load i32, i32* @j.4580, align 4
%D.75760_333 = sext i32 %j.8758_332 to i64
- %iave.8736_334 = load i32* @__main1_MOD_iave, align 4
+ %iave.8736_334 = load i32, i32* @__main1_MOD_iave, align 4
%D.75620_335 = sext i32 %iave.8736_334 to i64
- %D.75834_336 = load i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 2, i32 0), align 8
+ %D.75834_336 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 2, i32 0), align 8
%D.75835_337 = mul nsw i64 %D.75620_335, %D.75834_336
- %igrp.8737_338 = load i32* @__main1_MOD_igrp, align 4
+ %igrp.8737_338 = load i32, i32* @__main1_MOD_igrp, align 4
%D.75635_339 = sext i32 %igrp.8737_338 to i64
- %D.75836_340 = load i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 1, i32 0), align 8
+ %D.75836_340 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 1, i32 0), align 8
%D.75837_341 = mul nsw i64 %D.75635_339, %D.75836_340
%D.75838_342 = add nsw i64 %D.75835_337, %D.75837_341
%D.75839_343 = add nsw i64 %D.75760_333, %D.75838_342
- %ityp.8750_344 = load i32* @__main1_MOD_ityp, align 4
+ %ityp.8750_344 = load i32, i32* @__main1_MOD_ityp, align 4
%D.75704_345 = sext i32 %ityp.8750_344 to i64
- %D.75840_346 = load i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 3, i32 0), align 8
+ %D.75840_346 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 3, i32 0), align 8
%D.75841_347 = mul nsw i64 %D.75704_345, %D.75840_346
%D.75842_348 = add nsw i64 %D.75839_343, %D.75841_347
- %D.75843_349 = load i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 1), align 8
+ %D.75843_349 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 1), align 8
%D.75844_350 = add nsw i64 %D.75842_348, %D.75843_349
%tmp142 = bitcast i8* %D.75833_331 to [0 x i32]*
%tmp143 = bitcast [0 x i32]* %tmp142 to i32*
@@ -150,75 +150,75 @@ codeRepl80.exitStub: ; preds = %"<bb 34>"
; CHECK: @_gfortran_transfer_character_write
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* @j1.4581, i32 4) nounwind
; CHECK: @_gfortran_transfer_integer_write
- %D.75807_352 = load i8** getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 0), align 8
- %j1.8760_353 = load i32* @j1.4581, align 4
+ %D.75807_352 = load i8*, i8** getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 0), align 8
+ %j1.8760_353 = load i32, i32* @j1.4581, align 4
%D.75773_354 = sext i32 %j1.8760_353 to i64
- %iave.8736_355 = load i32* @__main1_MOD_iave, align 4
+ %iave.8736_355 = load i32, i32* @__main1_MOD_iave, align 4
%D.75620_356 = sext i32 %iave.8736_355 to i64
- %D.75808_357 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 2, i32 0), align 8
+ %D.75808_357 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 2, i32 0), align 8
%D.75809_358 = mul nsw i64 %D.75620_356, %D.75808_357
- %igrp.8737_359 = load i32* @__main1_MOD_igrp, align 4
+ %igrp.8737_359 = load i32, i32* @__main1_MOD_igrp, align 4
%D.75635_360 = sext i32 %igrp.8737_359 to i64
- %D.75810_361 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 1, i32 0), align 8
+ %D.75810_361 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 1, i32 0), align 8
%D.75811_362 = mul nsw i64 %D.75635_360, %D.75810_361
%D.75812_363 = add nsw i64 %D.75809_358, %D.75811_362
%D.75846_364 = add nsw i64 %D.75773_354, %D.75812_363
- %ityp.8750_365 = load i32* @__main1_MOD_ityp, align 4
+ %ityp.8750_365 = load i32, i32* @__main1_MOD_ityp, align 4
%D.75704_366 = sext i32 %ityp.8750_365 to i64
- %D.75814_367 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 3, i32 0), align 8
+ %D.75814_367 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 3, i64 3, i32 0), align 8
%D.75815_368 = mul nsw i64 %D.75704_366, %D.75814_367
%D.75847_369 = add nsw i64 %D.75846_364, %D.75815_368
- %D.75817_370 = load i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 1), align 8
+ %D.75817_370 = load i64, i64* getelementptr inbounds (%"struct.array4_real(kind=4)"* @__main1_MOD_rmxval, i64 0, i32 1), align 8
%D.75848_371 = add nsw i64 %D.75847_369, %D.75817_370
%tmp144 = bitcast i8* %D.75807_352 to [0 x float]*
%tmp145 = bitcast [0 x float]* %tmp144 to float*
%D.75849_372 = getelementptr inbounds float, float* %tmp145, i64 %D.75848_371
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* %D.75849_372, i32 4) nounwind
; CHECK: @_gfortran_transfer_real_write
- %D.75820_373 = load i8** getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 0), align 8
- %j1.8760_374 = load i32* @j1.4581, align 4
+ %D.75820_373 = load i8*, i8** getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 0), align 8
+ %j1.8760_374 = load i32, i32* @j1.4581, align 4
%D.75773_375 = sext i32 %j1.8760_374 to i64
- %iave.8736_376 = load i32* @__main1_MOD_iave, align 4
+ %iave.8736_376 = load i32, i32* @__main1_MOD_iave, align 4
%D.75620_377 = sext i32 %iave.8736_376 to i64
- %D.75821_378 = load i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 2, i32 0), align 8
+ %D.75821_378 = load i64, i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 2, i32 0), align 8
%D.75822_379 = mul nsw i64 %D.75620_377, %D.75821_378
- %igrp.8737_380 = load i32* @__main1_MOD_igrp, align 4
+ %igrp.8737_380 = load i32, i32* @__main1_MOD_igrp, align 4
%D.75635_381 = sext i32 %igrp.8737_380 to i64
- %D.75823_382 = load i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 1, i32 0), align 8
+ %D.75823_382 = load i64, i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 1, i32 0), align 8
%D.75824_383 = mul nsw i64 %D.75635_381, %D.75823_382
%D.75825_384 = add nsw i64 %D.75822_379, %D.75824_383
%D.75850_385 = add nsw i64 %D.75773_375, %D.75825_384
- %ityp.8750_386 = load i32* @__main1_MOD_ityp, align 4
+ %ityp.8750_386 = load i32, i32* @__main1_MOD_ityp, align 4
%D.75704_387 = sext i32 %ityp.8750_386 to i64
- %D.75827_388 = load i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 3, i32 0), align 8
+ %D.75827_388 = load i64, i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 3, i64 3, i32 0), align 8
%D.75828_389 = mul nsw i64 %D.75704_387, %D.75827_388
%D.75851_390 = add nsw i64 %D.75850_385, %D.75828_389
- %D.75830_391 = load i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 1), align 8
+ %D.75830_391 = load i64, i64* getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 1), align 8
%D.75852_392 = add nsw i64 %D.75851_390, %D.75830_391
%tmp146 = bitcast i8* %D.75820_373 to [0 x [1 x i8]]*
%tmp147 = bitcast [0 x [1 x i8]]* %tmp146 to [1 x i8]*
%D.75853_393 = getelementptr inbounds [1 x i8], [1 x i8]* %tmp147, i64 %D.75852_392
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_character_write to void (%struct.__st_parameter_dt*, [1 x i8]*, i32)*)(%struct.__st_parameter_dt* %memtmp3, [1 x i8]* %D.75853_393, i32 1) nounwind
; CHECK: @_gfortran_transfer_character_write
- %D.75833_394 = load i8** getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 0), align 8
- %j1.8760_395 = load i32* @j1.4581, align 4
+ %D.75833_394 = load i8*, i8** getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 0), align 8
+ %j1.8760_395 = load i32, i32* @j1.4581, align 4
%D.75773_396 = sext i32 %j1.8760_395 to i64
- %iave.8736_397 = load i32* @__main1_MOD_iave, align 4
+ %iave.8736_397 = load i32, i32* @__main1_MOD_iave, align 4
%D.75620_398 = sext i32 %iave.8736_397 to i64
- %D.75834_399 = load i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 2, i32 0), align 8
+ %D.75834_399 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 2, i32 0), align 8
%D.75835_400 = mul nsw i64 %D.75620_398, %D.75834_399
- %igrp.8737_401 = load i32* @__main1_MOD_igrp, align 4
+ %igrp.8737_401 = load i32, i32* @__main1_MOD_igrp, align 4
%D.75635_402 = sext i32 %igrp.8737_401 to i64
- %D.75836_403 = load i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 1, i32 0), align 8
+ %D.75836_403 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 1, i32 0), align 8
%D.75837_404 = mul nsw i64 %D.75635_402, %D.75836_403
%D.75838_405 = add nsw i64 %D.75835_400, %D.75837_404
%D.75854_406 = add nsw i64 %D.75773_396, %D.75838_405
- %ityp.8750_407 = load i32* @__main1_MOD_ityp, align 4
+ %ityp.8750_407 = load i32, i32* @__main1_MOD_ityp, align 4
%D.75704_408 = sext i32 %ityp.8750_407 to i64
- %D.75840_409 = load i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 3, i32 0), align 8
+ %D.75840_409 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 3, i64 3, i32 0), align 8
%D.75841_410 = mul nsw i64 %D.75704_408, %D.75840_409
%D.75855_411 = add nsw i64 %D.75854_406, %D.75841_410
- %D.75843_412 = load i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 1), align 8
+ %D.75843_412 = load i64, i64* getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 1), align 8
%D.75856_413 = add nsw i64 %D.75855_411, %D.75843_412
%tmp148 = bitcast i8* %D.75833_394 to [0 x i32]*
%tmp149 = bitcast [0 x i32]* %tmp148 to i32*
@@ -233,9 +233,9 @@ codeRepl80.exitStub: ; preds = %"<bb 34>"
; CHECK: @_gfortran_transfer_character_write
call void @_gfortran_st_write_done(%struct.__st_parameter_dt* %memtmp3) nounwind
; CHECK: @_gfortran_st_write_done
- %j.8758_415 = load i32* @j.4580, align 4
+ %j.8758_415 = load i32, i32* @j.4580, align 4
%D.4634_416 = icmp eq i32 %j.8758_415, %D.4627_188.reload
- %j.8758_417 = load i32* @j.4580, align 4
+ %j.8758_417 = load i32, i32* @j.4580, align 4
%j.8770_418 = add nsw i32 %j.8758_417, 1
store i32 %j.8770_418, i32* @j.4580, align 4
%tmp150 = icmp ne i1 %D.4634_416, false
diff --git a/llvm/test/Transforms/BBVectorize/ld1.ll b/llvm/test/Transforms/BBVectorize/ld1.ll
index 65fa49af3e0..368c38aa5ce 100644
--- a/llvm/test/Transforms/BBVectorize/ld1.ll
+++ b/llvm/test/Transforms/BBVectorize/ld1.ll
@@ -3,18 +3,18 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define double @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
- %i2 = load double* %c, align 8
+ %i2 = load double, double* %c, align 8
%add = fadd double %mul, %i2
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%arrayidx6 = getelementptr inbounds double, double* %c, i64 1
- %i5 = load double* %arrayidx6, align 8
+ %i5 = load double, double* %arrayidx6, align 8
%add7 = fadd double %mul5, %i5
%mul9 = fmul double %add, %i1
%add11 = fadd double %mul9, %i2
@@ -26,10 +26,10 @@ entry:
; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
; CHECK: %i2.v.i0 = bitcast double* %c to <2 x double>*
-; CHECK: %i0 = load <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>* %i1.v.i0, align 8
+; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
+; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
; CHECK: %mul = fmul <2 x double> %i0, %i1
-; CHECK: %i2 = load <2 x double>* %i2.v.i0, align 8
+; CHECK: %i2 = load <2 x double>, <2 x double>* %i2.v.i0, align 8
; CHECK: %add = fadd <2 x double> %mul, %i2
; CHECK: %mul9 = fmul <2 x double> %add, %i1
; CHECK: %add11 = fadd <2 x double> %mul9, %i2
diff --git a/llvm/test/Transforms/BBVectorize/loop1.ll b/llvm/test/Transforms/BBVectorize/loop1.ll
index 45e14df4c1a..70a5def4222 100644
--- a/llvm/test/Transforms/BBVectorize/loop1.ll
+++ b/llvm/test/Transforms/BBVectorize/loop1.ll
@@ -13,9 +13,9 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%mul = fmul double %0, %0
%mul3 = fmul double %0, %1
%add = fadd double %mul, %mul3
@@ -36,9 +36,9 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.end, label %for.body
; CHECK: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
; CHECK: %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
-; CHECK: %0 = load double* %arrayidx, align 8
+; CHECK: %0 = load double, double* %arrayidx, align 8
; CHECK: %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
-; CHECK: %1 = load double* %arrayidx2, align 8
+; CHECK: %1 = load double, double* %arrayidx2, align 8
; CHECK: %mul = fmul double %0, %0
; CHECK: %mul3 = fmul double %0, %1
; CHECK: %add = fadd double %mul, %mul3
@@ -67,8 +67,8 @@ for.body: ; preds = %for.body, %entry
; CHECK-UNRL: %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
; CHECK-UNRL: %1 = bitcast double* %arrayidx2 to <2 x double>*
; CHECK-UNRL: %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
-; CHECK-UNRL: %2 = load <2 x double>* %0, align 8
-; CHECK-UNRL: %3 = load <2 x double>* %1, align 8
+; CHECK-UNRL: %2 = load <2 x double>, <2 x double>* %0, align 8
+; CHECK-UNRL: %3 = load <2 x double>, <2 x double>* %1, align 8
; CHECK-UNRL: %mul = fmul <2 x double> %2, %2
; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3
; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
diff --git a/llvm/test/Transforms/BBVectorize/mem-op-depth.ll b/llvm/test/Transforms/BBVectorize/mem-op-depth.ll
index c31d4521183..c181c680e8e 100644
--- a/llvm/test/Transforms/BBVectorize/mem-op-depth.ll
+++ b/llvm/test/Transforms/BBVectorize/mem-op-depth.ll
@@ -7,11 +7,11 @@ target triple = "x86_64-unknown-linux-gnu"
define i32 @test1() nounwind {
; CHECK-LABEL: @test1(
- %V1 = load float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 0), align 16
- %V2 = load float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 1), align 4
- %V3= load float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 2), align 8
- %V4 = load float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 3), align 4
-; CHECK: %V1 = load <4 x float>* bitcast ([1024 x float]* @A to <4 x float>*), align 16
+ %V1 = load float, float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 0), align 16
+ %V2 = load float, float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 1), align 4
+ %V3= load float, float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 2), align 8
+ %V4 = load float, float* getelementptr inbounds ([1024 x float]* @A, i64 0, i64 3), align 4
+; CHECK: %V1 = load <4 x float>, <4 x float>* bitcast ([1024 x float]* @A to <4 x float>*), align 16
store float %V1, float* getelementptr inbounds ([1024 x float]* @B, i64 0, i64 0), align 16
store float %V2, float* getelementptr inbounds ([1024 x float]* @B, i64 0, i64 1), align 4
store float %V3, float* getelementptr inbounds ([1024 x float]* @B, i64 0, i64 2), align 8
diff --git a/llvm/test/Transforms/BBVectorize/metadata.ll b/llvm/test/Transforms/BBVectorize/metadata.ll
index fade4033089..f5580a88861 100644
--- a/llvm/test/Transforms/BBVectorize/metadata.ll
+++ b/llvm/test/Transforms/BBVectorize/metadata.ll
@@ -4,13 +4,13 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Simple 3-pair chain with loads and stores (with fpmath)
define void @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1, !fpmath !2
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4, !fpmath !3
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
@@ -24,13 +24,13 @@ entry:
; Simple 3-pair chain with loads and stores (ints with range)
define void @test2(i64* %a, i64* %b, i64* %c) nounwind uwtable readonly {
entry:
- %i0 = load i64* %a, align 8, !range !0
- %i1 = load i64* %b, align 8
+ %i0 = load i64, i64* %a, align 8, !range !0
+ %i1 = load i64, i64* %b, align 8
%mul = mul i64 %i0, %i1
%arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
- %i3 = load i64* %arrayidx3, align 8, !range !1
+ %i3 = load i64, i64* %arrayidx3, align 8, !range !1
%arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
- %i4 = load i64* %arrayidx4, align 8
+ %i4 = load i64, i64* %arrayidx4, align 8
%mul5 = mul i64 %i3, %i4
store i64 %mul, i64* %c, align 8
%arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
diff --git a/llvm/test/Transforms/BBVectorize/no-ldstr-conn.ll b/llvm/test/Transforms/BBVectorize/no-ldstr-conn.ll
index b864fe1da3a..a84cd658560 100644
--- a/llvm/test/Transforms/BBVectorize/no-ldstr-conn.ll
+++ b/llvm/test/Transforms/BBVectorize/no-ldstr-conn.ll
@@ -9,8 +9,8 @@ entry:
%a1 = inttoptr i64 %a to i64*
%a2 = getelementptr i64, i64* %a1, i64 1
%a3 = getelementptr i64, i64* %a1, i64 2
- %v2 = load i64* %a2, align 8
- %v3 = load i64* %a3, align 8
+ %v2 = load i64, i64* %a2, align 8
+ %v3 = load i64, i64* %a3, align 8
%v2a = add i64 %v2, 5
%v3a = add i64 %v3, 7
store i64 %v2a, i64* %a2, align 8
diff --git a/llvm/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll b/llvm/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll
index ff812b10d49..fcc0236bae9 100644
--- a/llvm/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll
+++ b/llvm/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll
@@ -8,20 +8,20 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Simple 3-pair chain also with loads and stores (using ptrs and gep)
define double @test1(i64* %a, i64* %b, i64* %c) nounwind uwtable readonly {
entry:
- %i0 = load i64* %a, align 8
- %i1 = load i64* %b, align 8
+ %i0 = load i64, i64* %a, align 8
+ %i1 = load i64, i64* %b, align 8
%mul = mul i64 %i0, %i1
%arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
- %i3 = load i64* %arrayidx3, align 8
+ %i3 = load i64, i64* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
- %i4 = load i64* %arrayidx4, align 8
+ %i4 = load i64, i64* %arrayidx4, align 8
%mul5 = mul i64 %i3, %i4
%ptr = inttoptr i64 %mul to double*
%ptr5 = inttoptr i64 %mul5 to double*
%aptr = getelementptr inbounds double, double* %ptr, i64 2
%aptr5 = getelementptr inbounds double, double* %ptr5, i64 3
- %av = load double* %aptr, align 16
- %av5 = load double* %aptr5, align 16
+ %av = load double, double* %aptr, align 16
+ %av5 = load double, double* %aptr5, align 16
%r = fmul double %av, %av5
store i64 %mul, i64* %c, align 8
%arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
@@ -30,15 +30,15 @@ entry:
; CHECK-LABEL: @test1(
; CHECK: %i0.v.i0 = bitcast i64* %a to <2 x i64>*
; CHECK: %i1.v.i0 = bitcast i64* %b to <2 x i64>*
-; CHECK: %i0 = load <2 x i64>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x i64>* %i1.v.i0, align 8
+; CHECK: %i0 = load <2 x i64>, <2 x i64>* %i0.v.i0, align 8
+; CHECK: %i1 = load <2 x i64>, <2 x i64>* %i1.v.i0, align 8
; CHECK: %mul = mul <2 x i64> %i0, %i1
; CHECK: %ptr = inttoptr <2 x i64> %mul to <2 x double*>
; CHECK: %aptr = getelementptr inbounds double, <2 x double*> %ptr, <2 x i64> <i64 2, i64 3>
; CHECK: %aptr.v.r1 = extractelement <2 x double*> %aptr, i32 0
; CHECK: %aptr.v.r2 = extractelement <2 x double*> %aptr, i32 1
-; CHECK: %av = load double* %aptr.v.r1, align 16
-; CHECK: %av5 = load double* %aptr.v.r2, align 16
+; CHECK: %av = load double, double* %aptr.v.r1, align 16
+; CHECK: %av5 = load double, double* %aptr.v.r2, align 16
; CHECK: %r = fmul double %av, %av5
; CHECK: %0 = bitcast i64* %c to <2 x i64>*
; CHECK: store <2 x i64> %mul, <2 x i64>* %0, align 8
@@ -50,14 +50,14 @@ entry:
; Simple 3-pair chain with loads and stores (using ptrs and gep)
define void @test2(i64** %a, i64** %b, i64** %c) nounwind uwtable readonly {
entry:
- %i0 = load i64** %a, align 8
- %i1 = load i64** %b, align 8
+ %i0 = load i64*, i64** %a, align 8
+ %i1 = load i64*, i64** %b, align 8
%arrayidx3 = getelementptr inbounds i64*, i64** %a, i64 1
- %i3 = load i64** %arrayidx3, align 8
+ %i3 = load i64*, i64** %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds i64*, i64** %b, i64 1
- %i4 = load i64** %arrayidx4, align 8
- %o1 = load i64* %i1, align 8
- %o4 = load i64* %i4, align 8
+ %i4 = load i64*, i64** %arrayidx4, align 8
+ %o1 = load i64, i64* %i1, align 8
+ %o4 = load i64, i64* %i4, align 8
%ptr0 = getelementptr inbounds i64, i64* %i0, i64 %o1
%ptr3 = getelementptr inbounds i64, i64* %i3, i64 %o4
store i64* %ptr0, i64** %c, align 8
@@ -66,12 +66,12 @@ entry:
ret void
; CHECK-LABEL: @test2(
; CHECK: %i0.v.i0 = bitcast i64** %a to <2 x i64*>*
-; CHECK: %i1 = load i64** %b, align 8
-; CHECK: %i0 = load <2 x i64*>* %i0.v.i0, align 8
+; CHECK: %i1 = load i64*, i64** %b, align 8
+; CHECK: %i0 = load <2 x i64*>, <2 x i64*>* %i0.v.i0, align 8
; CHECK: %arrayidx4 = getelementptr inbounds i64*, i64** %b, i64 1
-; CHECK: %i4 = load i64** %arrayidx4, align 8
-; CHECK: %o1 = load i64* %i1, align 8
-; CHECK: %o4 = load i64* %i4, align 8
+; CHECK: %i4 = load i64*, i64** %arrayidx4, align 8
+; CHECK: %o1 = load i64, i64* %i1, align 8
+; CHECK: %o4 = load i64, i64* %i4, align 8
; CHECK: %ptr0.v.i1.1 = insertelement <2 x i64> undef, i64 %o1, i32 0
; CHECK: %ptr0.v.i1.2 = insertelement <2 x i64> %ptr0.v.i1.1, i64 %o4, i32 1
; CHECK: %ptr0 = getelementptr inbounds i64, <2 x i64*> %i0, <2 x i64> %ptr0.v.i1.2
@@ -86,16 +86,16 @@ entry:
; using pointer vectors.
define void @test3(<2 x i64*>* %a, <2 x i64*>* %b, <2 x i64*>* %c) nounwind uwtable readonly {
entry:
- %i0 = load <2 x i64*>* %a, align 8
- %i1 = load <2 x i64*>* %b, align 8
+ %i0 = load <2 x i64*>, <2 x i64*>* %a, align 8
+ %i1 = load <2 x i64*>, <2 x i64*>* %b, align 8
%arrayidx3 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %a, i64 1
- %i3 = load <2 x i64*>* %arrayidx3, align 8
+ %i3 = load <2 x i64*>, <2 x i64*>* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %b, i64 1
- %i4 = load <2 x i64*>* %arrayidx4, align 8
+ %i4 = load <2 x i64*>, <2 x i64*>* %arrayidx4, align 8
%j1 = extractelement <2 x i64*> %i1, i32 0
%j4 = extractelement <2 x i64*> %i4, i32 0
- %o1 = load i64* %j1, align 8
- %o4 = load i64* %j4, align 8
+ %o1 = load i64, i64* %j1, align 8
+ %o4 = load i64, i64* %j4, align 8
%j0 = extractelement <2 x i64*> %i0, i32 0
%j3 = extractelement <2 x i64*> %i3, i32 0
%ptr0 = getelementptr inbounds i64, i64* %j0, i64 %o1
@@ -110,14 +110,14 @@ entry:
ret void
; CHECK-LABEL: @test3(
; CHECK: %i0.v.i0 = bitcast <2 x i64*>* %a to <4 x i64*>*
-; CHECK: %i1 = load <2 x i64*>* %b, align 8
-; CHECK: %i0 = load <4 x i64*>* %i0.v.i0, align 8
+; CHECK: %i1 = load <2 x i64*>, <2 x i64*>* %b, align 8
+; CHECK: %i0 = load <4 x i64*>, <4 x i64*>* %i0.v.i0, align 8
; CHECK: %arrayidx4 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %b, i64 1
-; CHECK: %i4 = load <2 x i64*>* %arrayidx4, align 8
+; CHECK: %i4 = load <2 x i64*>, <2 x i64*>* %arrayidx4, align 8
; CHECK: %j1 = extractelement <2 x i64*> %i1, i32 0
; CHECK: %j4 = extractelement <2 x i64*> %i4, i32 0
-; CHECK: %o1 = load i64* %j1, align 8
-; CHECK: %o4 = load i64* %j4, align 8
+; CHECK: %o1 = load i64, i64* %j1, align 8
+; CHECK: %o4 = load i64, i64* %j4, align 8
; CHECK: %ptr0.v.i1.1 = insertelement <2 x i64> undef, i64 %o1, i32 0
; CHECK: %ptr0.v.i1.2 = insertelement <2 x i64> %ptr0.v.i1.1, i64 %o4, i32 1
; CHECK: %ptr0.v.i0 = shufflevector <4 x i64*> %i0, <4 x i64*> undef, <2 x i32> <i32 0, i32 2>
diff --git a/llvm/test/Transforms/BBVectorize/simple-ldstr.ll b/llvm/test/Transforms/BBVectorize/simple-ldstr.ll
index d0858173b72..56c1a06b42e 100644
--- a/llvm/test/Transforms/BBVectorize/simple-ldstr.ll
+++ b/llvm/test/Transforms/BBVectorize/simple-ldstr.ll
@@ -5,13 +5,13 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; Simple 3-pair chain with loads and stores
define void @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
@@ -20,8 +20,8 @@ entry:
; CHECK-LABEL: @test1(
; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i0 = load <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>* %i1.v.i0, align 8
+; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
+; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
; CHECK: %mul = fmul <2 x double> %i0, %i1
; CHECK: %0 = bitcast double* %c to <2 x double>*
; CHECK: store <2 x double> %mul, <2 x double>* %0, align 8
@@ -33,16 +33,16 @@ entry:
; Simple chain with extending loads and stores
define void @test2(float* %a, float* %b, double* %c) nounwind uwtable readonly {
entry:
- %i0f = load float* %a, align 4
+ %i0f = load float, float* %a, align 4
%i0 = fpext float %i0f to double
- %i1f = load float* %b, align 4
+ %i1f = load float, float* %b, align 4
%i1 = fpext float %i1f to double
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds float, float* %a, i64 1
- %i3f = load float* %arrayidx3, align 4
+ %i3f = load float, float* %arrayidx3, align 4
%i3 = fpext float %i3f to double
%arrayidx4 = getelementptr inbounds float, float* %b, i64 1
- %i4f = load float* %arrayidx4, align 4
+ %i4f = load float, float* %arrayidx4, align 4
%i4 = fpext float %i4f to double
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
@@ -52,9 +52,9 @@ entry:
; CHECK-LABEL: @test2(
; CHECK: %i0f.v.i0 = bitcast float* %a to <2 x float>*
; CHECK: %i1f.v.i0 = bitcast float* %b to <2 x float>*
-; CHECK: %i0f = load <2 x float>* %i0f.v.i0, align 4
+; CHECK: %i0f = load <2 x float>, <2 x float>* %i0f.v.i0, align 4
; CHECK: %i0 = fpext <2 x float> %i0f to <2 x double>
-; CHECK: %i1f = load <2 x float>* %i1f.v.i0, align 4
+; CHECK: %i1f = load <2 x float>, <2 x float>* %i1f.v.i0, align 4
; CHECK: %i1 = fpext <2 x float> %i1f to <2 x double>
; CHECK: %mul = fmul <2 x double> %i0, %i1
; CHECK: %0 = bitcast double* %c to <2 x double>*
@@ -67,14 +67,14 @@ entry:
; Simple chain with loads and truncating stores
define void @test3(double* %a, double* %b, float* %c) nounwind uwtable readonly {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%mulf = fptrunc double %mul to float
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%mul5f = fptrunc double %mul5 to float
store float %mulf, float* %c, align 8
@@ -84,20 +84,20 @@ entry:
; CHECK-LABEL: @test3(
; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i0 = load <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>* %i1.v.i0, align 8
+; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
+; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
; CHECK: %mul = fmul <2 x double> %i0, %i1
; CHECK: %mulf = fptrunc <2 x double> %mul to <2 x float>
; CHECK: %0 = bitcast float* %c to <2 x float>*
; CHECK: store <2 x float> %mulf, <2 x float>* %0, align 8
; CHECK: ret void
; CHECK-AO-LABEL: @test3(
-; CHECK-AO: %i0 = load double* %a, align 8
-; CHECK-AO: %i1 = load double* %b, align 8
+; CHECK-AO: %i0 = load double, double* %a, align 8
+; CHECK-AO: %i1 = load double, double* %b, align 8
; CHECK-AO: %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
-; CHECK-AO: %i3 = load double* %arrayidx3, align 8
+; CHECK-AO: %i3 = load double, double* %arrayidx3, align 8
; CHECK-AO: %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
-; CHECK-AO: %i4 = load double* %arrayidx4, align 8
+; CHECK-AO: %i4 = load double, double* %arrayidx4, align 8
; CHECK-AO: %mul.v.i1.1 = insertelement <2 x double> undef, double %i1, i32 0
; CHECK-AO: %mul.v.i1.2 = insertelement <2 x double> %mul.v.i1.1, double %i4, i32 1
; CHECK-AO: %mul.v.i0.1 = insertelement <2 x double> undef, double %i0, i32 0
@@ -119,13 +119,13 @@ if.then1:
br label %if.then
if.then:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
@@ -143,13 +143,13 @@ if.end:
; Simple 3-pair chain with loads and stores
define void @test5(double* %a, double* %b, double* %c) nounwind uwtable readonly {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
@@ -158,8 +158,8 @@ entry:
; CHECK-LABEL: @test5(
; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
-; CHECK: %i0 = load <2 x double>* %i0.v.i0, align 8
-; CHECK: %i1 = load <2 x double>* %i1.v.i0, align 8
+; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
+; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
; CHECK: %mul = fmul <2 x double> %i0, %i1
; CHECK: %0 = bitcast double* %c to <2 x double>*
; CHECK: store <2 x double> %mul, <2 x double>* %0, align 4
diff --git a/llvm/test/Transforms/CodeExtractor/2004-03-14-DominanceProblem.ll b/llvm/test/Transforms/CodeExtractor/2004-03-14-DominanceProblem.ll
index a6ee63ec45a..2f9c0c7371c 100644
--- a/llvm/test/Transforms/CodeExtractor/2004-03-14-DominanceProblem.ll
+++ b/llvm/test/Transforms/CodeExtractor/2004-03-14-DominanceProblem.ll
@@ -13,7 +13,7 @@ loopentry.1: ; preds = %no_exit.1, %loopentry.1.preheader
br i1 false, label %no_exit.1, label %loopexit.0.loopexit1
no_exit.1: ; preds = %loopentry.1
- %tmp.53 = load i32* null ; <i32> [#uses=1]
+ %tmp.53 = load i32, i32* null ; <i32> [#uses=1]
br i1 false, label %shortcirc_next.2, label %loopentry.1
shortcirc_next.2: ; preds = %no_exit.1
diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll b/llvm/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll
index 430b99299d8..519e1ee2ce6 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll
@@ -9,7 +9,7 @@ define i128 @sink(i64* %mem1, i64* %mem2) {
; CHECK-LABEL: block1:
; CHECK-NEXT: load
block1:
- %l1 = load i64* %mem1
+ %l1 = load i64, i64* %mem1
%s1 = sext i64 %l1 to i128
br label %block2
@@ -18,7 +18,7 @@ block1:
; CHECK-NEXT: load
; CHECK-NEXT: sext
block2:
- %l2 = load i64* %mem2
+ %l2 = load i64, i64* %mem2
%s2 = sext i64 %l2 to i128
%res = mul i128 %s1, %s2
ret i128 %res
@@ -31,7 +31,7 @@ define i64 @hoist(i32* %mem1, i32* %mem2) {
; CHECK-NEXT: load
; CHECK-NEXT: sext
block1:
- %l1 = load i32* %mem1
+ %l1 = load i32, i32* %mem1
br label %block2
; CHECK-LABEL: block2:
@@ -39,7 +39,7 @@ block1:
; CHECK-NEXT: sext
block2:
%s1 = sext i32 %l1 to i64
- %l2 = load i32* %mem2
+ %l2 = load i32, i32* %mem2
%s2 = sext i32 %l2 to i64
%res = mul i64 %s1, %s2
ret i64 %res
diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll b/llvm/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
index 097e1f75538..c9f49b5d4f8 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
@@ -13,7 +13,7 @@ entry:
br i1 %cond, label %if.then, label %fallthrough
if.then:
- %v = load i32 addrspace(1)* %casted, align 4
+ %v = load i32, i32 addrspace(1)* %casted, align 4
br label %fallthrough
fallthrough:
diff --git a/llvm/test/Transforms/CodeGenPrepare/statepoint-relocate.ll b/llvm/test/Transforms/CodeGenPrepare/statepoint-relocate.ll
index 939e813ae70..7aa526fdc5e 100644
--- a/llvm/test/Transforms/CodeGenPrepare/statepoint-relocate.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/statepoint-relocate.ll
@@ -13,7 +13,7 @@ entry:
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32* %base, i32* %ptr)
%base-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 4)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
- %ret = load i32* %ptr-new
+ %ret = load i32, i32* %ptr-new
ret i32 %ret
}
@@ -29,7 +29,7 @@ entry:
%base-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 4)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
%ptr2-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 6)
- %ret = load i32* %ptr-new
+ %ret = load i32, i32* %ptr-new
ret i32 %ret
}
@@ -41,7 +41,7 @@ entry:
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32* %base, i32* %ptr)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
%base-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 4)
- %ret = load i32* %ptr-new
+ %ret = load i32, i32* %ptr-new
ret i32 %ret
}
@@ -53,7 +53,7 @@ entry:
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, [3 x i32]* %base, i32* %ptr)
%base-new = call [3 x i32]* @llvm.experimental.gc.relocate.p0a3i32(i32 %tok, i32 4, i32 4)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
- %ret = load i32* %ptr-new
+ %ret = load i32, i32* %ptr-new
ret i32 %ret
}
@@ -65,7 +65,7 @@ entry:
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, [3 x i32]* %base, i32* %ptr)
%base-new = call [3 x i32]* @llvm.experimental.gc.relocate.p0a3i32(i32 %tok, i32 4, i32 4)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
- %ret = load i32* %ptr-new
+ %ret = load i32, i32* %ptr-new
ret i32 %ret
}
@@ -79,7 +79,7 @@ entry:
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32* %base, i32* %ptr, i32* %ptr2)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
%ptr2-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 6)
- %ret = load i32* %ptr-new
+ %ret = load i32, i32* %ptr-new
ret i32 %ret
}
diff --git a/llvm/test/Transforms/ConstProp/loads.ll b/llvm/test/Transforms/ConstProp/loads.ll
index 5a23dad87f3..dbfd9921790 100644
--- a/llvm/test/Transforms/ConstProp/loads.ll
+++ b/llvm/test/Transforms/ConstProp/loads.ll
@@ -9,7 +9,7 @@
; Simple load
define i32 @test1() {
- %r = load i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0)
+ %r = load i32, i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0)
ret i32 %r
; 0xDEADBEEF
@@ -24,7 +24,7 @@ define i32 @test1() {
; PR3152
; Load of first 16 bits of 32-bit value.
define i16 @test2() {
- %r = load i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*)
+ %r = load i16, i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*)
ret i16 %r
; 0xBEEF
@@ -37,7 +37,7 @@ define i16 @test2() {
}
define i16 @test2_addrspacecast() {
- %r = load i16 addrspace(1)* addrspacecast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16 addrspace(1)*)
+ %r = load i16, i16 addrspace(1)* addrspacecast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16 addrspace(1)*)
ret i16 %r
; 0xBEEF
@@ -51,7 +51,7 @@ define i16 @test2_addrspacecast() {
; Load of second 16 bits of 32-bit value.
define i16 @test3() {
- %r = load i16* getelementptr(i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 1)
+ %r = load i16, i16* getelementptr(i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 1)
ret i16 %r
; 0xDEAD
@@ -65,7 +65,7 @@ define i16 @test3() {
; Load of 8 bit field + tail padding.
define i16 @test4() {
- %r = load i16* getelementptr(i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 2)
+ %r = load i16, i16* getelementptr(i16* bitcast(i32* getelementptr ({{i32,i8},i32}* @g1, i32 0, i32 0, i32 0) to i16*), i32 2)
ret i16 %r
; 0x00BA
@@ -79,7 +79,7 @@ define i16 @test4() {
; Load of double bits.
define i64 @test6() {
- %r = load i64* bitcast(double* @g2 to i64*)
+ %r = load i64, i64* bitcast(double* @g2 to i64*)
ret i64 %r
; 0x3FF_0000000000000
@@ -93,7 +93,7 @@ define i64 @test6() {
; Load of double bits.
define i16 @test7() {
- %r = load i16* bitcast(double* @g2 to i16*)
+ %r = load i16, i16* bitcast(double* @g2 to i16*)
ret i16 %r
; 0x0000
@@ -107,7 +107,7 @@ define i16 @test7() {
; Double load.
define double @test8() {
- %r = load double* bitcast({{i32,i8},i32}* @g1 to double*)
+ %r = load double, double* bitcast({{i32,i8},i32}* @g1 to double*)
ret double %r
; LE-LABEL: @test8(
@@ -120,7 +120,7 @@ define double @test8() {
; i128 load.
define i128 @test9() {
- %r = load i128* bitcast({i64, i64}* @g3 to i128*)
+ %r = load i128, i128* bitcast({i64, i64}* @g3 to i128*)
ret i128 %r
; 0x00000000_06B1BFF8_00000000_0000007B
@@ -134,7 +134,7 @@ define i128 @test9() {
; vector load.
define <2 x i64> @test10() {
- %r = load <2 x i64>* bitcast({i64, i64}* @g3 to <2 x i64>*)
+ %r = load <2 x i64>, <2 x i64>* bitcast({i64, i64}* @g3 to <2 x i64>*)
ret <2 x i64> %r
; LE-LABEL: @test10(
@@ -151,7 +151,7 @@ define <2 x i64> @test10() {
define i16 @test11() nounwind {
entry:
- %a = load i16* bitcast ({ i8, i8 }* @g4 to i16*)
+ %a = load i16, i16* bitcast ({ i8, i8 }* @g4 to i16*)
ret i16 %a
; 0x08A1
@@ -168,7 +168,7 @@ entry:
@test12g = private constant [6 x i8] c"a\00b\00\00\00"
define i16 @test12() {
- %a = load i16* getelementptr inbounds ([3 x i16]* bitcast ([6 x i8]* @test12g to [3 x i16]*), i32 0, i64 1)
+ %a = load i16, i16* getelementptr inbounds ([3 x i16]* bitcast ([6 x i8]* @test12g to [3 x i16]*), i32 0, i64 1)
ret i16 %a
; 0x0062
@@ -184,7 +184,7 @@ define i16 @test12() {
; PR5978
@g5 = constant i8 4
define i1 @test13() {
- %A = load i1* bitcast (i8* @g5 to i1*)
+ %A = load i1, i1* bitcast (i8* @g5 to i1*)
ret i1 %A
; LE-LABEL: @test13(
@@ -197,7 +197,7 @@ define i1 @test13() {
@g6 = constant [2 x i8*] [i8* inttoptr (i64 1 to i8*), i8* inttoptr (i64 2 to i8*)]
define i64 @test14() nounwind {
entry:
- %tmp = load i64* bitcast ([2 x i8*]* @g6 to i64*)
+ %tmp = load i64, i64* bitcast ([2 x i8*]* @g6 to i64*)
ret i64 %tmp
; LE-LABEL: @test14(
@@ -211,7 +211,7 @@ entry:
@g6_as1 = constant [2 x i8 addrspace(1)*] [i8 addrspace(1)* inttoptr (i16 1 to i8 addrspace(1)*), i8 addrspace(1)* inttoptr (i16 2 to i8 addrspace(1)*)]
define i16 @test14_as1() nounwind {
entry:
- %tmp = load i16* bitcast ([2 x i8 addrspace(1)*]* @g6_as1 to i16*)
+ %tmp = load i16, i16* bitcast ([2 x i8 addrspace(1)*]* @g6_as1 to i16*)
ret i16 %tmp
; LE: @test14_as1
@@ -223,7 +223,7 @@ entry:
define i64 @test15() nounwind {
entry:
- %tmp = load i64* bitcast (i8** getelementptr inbounds ([2 x i8*]* @g6, i32 0, i64 1) to i64*)
+ %tmp = load i64, i64* bitcast (i8** getelementptr inbounds ([2 x i8*]* @g6, i32 0, i64 1) to i64*)
ret i64 %tmp
; LE-LABEL: @test15(
@@ -235,7 +235,7 @@ entry:
@gv7 = constant [4 x i8*] [i8* null, i8* inttoptr (i64 -14 to i8*), i8* null, i8* null]
define i64 @test16.1() {
- %v = load i64* bitcast ([4 x i8*]* @gv7 to i64*), align 8
+ %v = load i64, i64* bitcast ([4 x i8*]* @gv7 to i64*), align 8
ret i64 %v
; LE-LABEL: @test16.1(
@@ -246,7 +246,7 @@ define i64 @test16.1() {
}
define i64 @test16.2() {
- %v = load i64* bitcast (i8** getelementptr inbounds ([4 x i8*]* @gv7, i64 0, i64 1) to i64*), align 8
+ %v = load i64, i64* bitcast (i8** getelementptr inbounds ([4 x i8*]* @gv7, i64 0, i64 1) to i64*), align 8
ret i64 %v
; LE-LABEL: @test16.2(
@@ -257,7 +257,7 @@ define i64 @test16.2() {
}
define i64 @test16.3() {
- %v = load i64* bitcast (i8** getelementptr inbounds ([4 x i8*]* @gv7, i64 0, i64 2) to i64*), align 8
+ %v = load i64, i64* bitcast (i8** getelementptr inbounds ([4 x i8*]* @gv7, i64 0, i64 2) to i64*), align 8
ret i64 %v
; LE-LABEL: @test16.3(
diff --git a/llvm/test/Transforms/ConstantHoisting/AArch64/const-addr.ll b/llvm/test/Transforms/ConstantHoisting/AArch64/const-addr.ll
index a317e5ccaf7..4c36d20dc6b 100644
--- a/llvm/test/Transforms/ConstantHoisting/AArch64/const-addr.ll
+++ b/llvm/test/Transforms/ConstantHoisting/AArch64/const-addr.ll
@@ -11,12 +11,12 @@ define i32 @test1() nounwind {
; CHECK: %o3 = getelementptr %T, %T* %1, i32 0, i32 3
%at = inttoptr i64 68141056 to %T*
%o1 = getelementptr %T, %T* %at, i32 0, i32 1
- %t1 = load i32* %o1
+ %t1 = load i32, i32* %o1
%o2 = getelementptr %T, %T* %at, i32 0, i32 2
- %t2 = load i32* %o2
+ %t2 = load i32, i32* %o2
%a1 = add i32 %t1, %t2
%o3 = getelementptr %T, %T* %at, i32 0, i32 3
- %t3 = load i32* %o3
+ %t3 = load i32, i32* %o3
%a2 = add i32 %a1, %t3
ret i32 %a2
}
diff --git a/llvm/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll b/llvm/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll
index 82ee04bf466..69b13cf9c9a 100644
--- a/llvm/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll
+++ b/llvm/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll
@@ -11,11 +11,11 @@ define i32 @test1() nounwind {
; CHECK: %1 = inttoptr i32 %const to %T*
; CHECK: %addr1 = getelementptr %T, %T* %1, i32 0, i32 1
%addr1 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
- %tmp1 = load i32* %addr1
+ %tmp1 = load i32, i32* %addr1
%addr2 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
- %tmp2 = load i32* %addr2
+ %tmp2 = load i32, i32* %addr2
%addr3 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
- %tmp3 = load i32* %addr3
+ %tmp3 = load i32, i32* %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
ret i32 %tmp5
diff --git a/llvm/test/Transforms/ConstantHoisting/PowerPC/masks.ll b/llvm/test/Transforms/ConstantHoisting/PowerPC/masks.ll
index d5531820113..4cc504f11bd 100644
--- a/llvm/test/Transforms/ConstantHoisting/PowerPC/masks.ll
+++ b/llvm/test/Transforms/ConstantHoisting/PowerPC/masks.ll
@@ -19,7 +19,7 @@ if.then152:
if.end167:
; CHECK: and i32 {{.*}}, 32768
%shl161 = shl nuw nsw i32 %conv121, 15
- %0 = load i8* undef, align 1
+ %0 = load i8, i8* undef, align 1
%conv169 = zext i8 %0 to i32
%shl170 = shl nuw nsw i32 %conv169, 7
%shl161.masked = and i32 %shl161, 32768
@@ -49,7 +49,7 @@ if.then152:
if.end167:
; CHECK: add i32 {{.*}}, -32758
%shl161 = shl nuw nsw i32 %conv121, 15
- %0 = load i8* undef, align 1
+ %0 = load i8, i8* undef, align 1
%conv169 = zext i8 %0 to i32
%shl170 = shl nuw nsw i32 %conv169, 7
%shl161.masked = and i32 %shl161, 32773
diff --git a/llvm/test/Transforms/ConstantHoisting/X86/cast-inst.ll b/llvm/test/Transforms/ConstantHoisting/X86/cast-inst.ll
index f490f4a3723..bd1e196b644 100644
--- a/llvm/test/Transforms/ConstantHoisting/X86/cast-inst.ll
+++ b/llvm/test/Transforms/ConstantHoisting/X86/cast-inst.ll
@@ -9,19 +9,19 @@ define i32 @cast_inst_test() {
; CHECK-LABEL: @cast_inst_test
; CHECK: %const = bitcast i64 4646526064 to i64
; CHECK: %1 = inttoptr i64 %const to i32*
-; CHECK: %v0 = load i32* %1, align 16
+; CHECK: %v0 = load i32, i32* %1, align 16
; CHECK: %const_mat = add i64 %const, 16
; CHECK-NEXT: %2 = inttoptr i64 %const_mat to i32*
-; CHECK-NEXT: %v1 = load i32* %2, align 16
+; CHECK-NEXT: %v1 = load i32, i32* %2, align 16
; CHECK: %const_mat1 = add i64 %const, 32
; CHECK-NEXT: %3 = inttoptr i64 %const_mat1 to i32*
-; CHECK-NEXT: %v2 = load i32* %3, align 16
+; CHECK-NEXT: %v2 = load i32, i32* %3, align 16
%a0 = inttoptr i64 4646526064 to i32*
- %v0 = load i32* %a0, align 16
+ %v0 = load i32, i32* %a0, align 16
%a1 = inttoptr i64 4646526080 to i32*
- %v1 = load i32* %a1, align 16
+ %v1 = load i32, i32* %a1, align 16
%a2 = inttoptr i64 4646526096 to i32*
- %v2 = load i32* %a2, align 16
+ %v2 = load i32, i32* %a2, align 16
%r0 = add i32 %v0, %v1
%r1 = add i32 %r0, %v2
ret i32 %r1
diff --git a/llvm/test/Transforms/ConstantHoisting/X86/const-base-addr.ll b/llvm/test/Transforms/ConstantHoisting/X86/const-base-addr.ll
index eabf80d182f..db5dfdd1538 100644
--- a/llvm/test/Transforms/ConstantHoisting/X86/const-base-addr.ll
+++ b/llvm/test/Transforms/ConstantHoisting/X86/const-base-addr.ll
@@ -12,11 +12,11 @@ define i32 @test1() nounwind {
; CHECK: %1 = inttoptr i32 %const to %T*
; CHECK: %addr1 = getelementptr %T, %T* %1, i32 0, i32 1
%addr1 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
- %tmp1 = load i32* %addr1
+ %tmp1 = load i32, i32* %addr1
%addr2 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
- %tmp2 = load i32* %addr2
+ %tmp2 = load i32, i32* %addr2
%addr3 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
- %tmp3 = load i32* %addr3
+ %tmp3 = load i32, i32* %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
ret i32 %tmp5
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
index 9a2264793c5..9d1253a1888 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll
@@ -52,8 +52,8 @@ bb: ; preds = %entry
ret i8 0
bb2: ; preds = %entry
-; CHECK: %should_be_const = load i8* @gv
- %should_be_const = load i8* %a
+; CHECK: %should_be_const = load i8, i8* @gv
+ %should_be_const = load i8, i8* %a
ret i8 %should_be_const
}
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll b/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll
index b14abd83694..6bb8bb07c45 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll
@@ -2,7 +2,7 @@
define void @test1(i8* %ptr) {
; CHECK: test1
- %A = load i8* %ptr
+ %A = load i8, i8* %ptr
br label %bb
bb:
icmp ne i8* %ptr, null
diff --git a/llvm/test/Transforms/DeadArgElim/aggregates.ll b/llvm/test/Transforms/DeadArgElim/aggregates.ll
index f54c6c9ea44..68d25342558 100644
--- a/llvm/test/Transforms/DeadArgElim/aggregates.ll
+++ b/llvm/test/Transforms/DeadArgElim/aggregates.ll
@@ -157,6 +157,6 @@ define internal i8 @outer() {
%res = call {i8*, i32} @mid()
%resptr = extractvalue {i8*, i32} %res, 0
- %val = load i8* %resptr
+ %val = load i8, i8* %resptr
ret i8 %val
} \ No newline at end of file
diff --git a/llvm/test/Transforms/DeadArgElim/deadexternal.ll b/llvm/test/Transforms/DeadArgElim/deadexternal.ll
index 665d7dbf493..21cbc37a3f0 100644
--- a/llvm/test/Transforms/DeadArgElim/deadexternal.ll
+++ b/llvm/test/Transforms/DeadArgElim/deadexternal.ll
@@ -31,9 +31,9 @@ define void @h() {
entry:
%i = alloca i32, align 4
store volatile i32 10, i32* %i, align 4
-; CHECK: %tmp = load volatile i32* %i, align 4
+; CHECK: %tmp = load volatile i32, i32* %i, align 4
; CHECK-NEXT: call void @f(i32 undef)
- %tmp = load volatile i32* %i, align 4
+ %tmp = load volatile i32, i32* %i, align 4
call void @f(i32 %tmp)
ret void
}
diff --git a/llvm/test/Transforms/DeadArgElim/deadretval2.ll b/llvm/test/Transforms/DeadArgElim/deadretval2.ll
index dcdc36e319a..b0d2428fbdc 100644
--- a/llvm/test/Transforms/DeadArgElim/deadretval2.ll
+++ b/llvm/test/Transforms/DeadArgElim/deadretval2.ll
@@ -20,7 +20,7 @@ define void @test3(i32 %X) {
}
define internal i32 @foo() {
- %DEAD = load i32* @P ; <i32> [#uses=1]
+ %DEAD = load i32, i32* @P ; <i32> [#uses=1]
ret i32 %DEAD
}
diff --git a/llvm/test/Transforms/DeadArgElim/keepalive.ll b/llvm/test/Transforms/DeadArgElim/keepalive.ll
index 16569db4d38..d8a09933dcf 100644
--- a/llvm/test/Transforms/DeadArgElim/keepalive.ll
+++ b/llvm/test/Transforms/DeadArgElim/keepalive.ll
@@ -31,7 +31,7 @@ define void @caller() {
; We can't remove 'this' here, as that would put argmem in ecx instead of
; memory.
define internal x86_thiscallcc i32 @unused_this(i32* %this, i32* inalloca %argmem) {
- %v = load i32* %argmem
+ %v = load i32, i32* %argmem
ret i32 %v
}
; CHECK-LABEL: define internal x86_thiscallcc i32 @unused_this(i32* %this, i32* inalloca %argmem)
diff --git a/llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll b/llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll
index d5706c90f6a..7746ccee521 100644
--- a/llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll
@@ -13,7 +13,7 @@ entry:
; CHECK: store i32 add (i32 ptrtoint ([0 x i32]* @A to i32), i32 1), i32* %Arg2
%ln2gz = getelementptr i32, i32* %Arg1, i32 14
%ln2gA = bitcast i32* %ln2gz to double*
- %ln2gB = load double* %ln2gA
+ %ln2gB = load double, double* %ln2gA
%ln2gD = getelementptr i32, i32* %Arg2, i32 -3
%ln2gE = bitcast i32* %ln2gD to double*
store double %ln2gB, double* %ln2gE
diff --git a/llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll b/llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
index cb8da4668de..7e46d28a9c4 100644
--- a/llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
@@ -11,7 +11,7 @@ _ZNSt8auto_ptrIiED1Ev.exit:
%temp.lvalue = alloca %"class.std::auto_ptr", align 8
call void @_Z3barv(%"class.std::auto_ptr"* sret %temp.lvalue)
%_M_ptr.i.i = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
- %tmp.i.i = load i32** %_M_ptr.i.i, align 8
+ %tmp.i.i = load i32*, i32** %_M_ptr.i.i, align 8
; CHECK-NOT: store i32* null
store i32* null, i32** %_M_ptr.i.i, align 8
%_M_ptr.i.i4 = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
diff --git a/llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll b/llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll
index 0baaea55fa5..d30e9a2e6c1 100644
--- a/llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll
@@ -70,11 +70,11 @@ entry:
%memtmp = alloca %struct.pair.162, align 8
%0 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %memtmp, i64 0, i32 0
%1 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %__a, i64 0, i32 0
- %2 = load %struct.BasicBlock** %1, align 8
+ %2 = load %struct.BasicBlock*, %struct.BasicBlock** %1, align 8
store %struct.BasicBlock* %2, %struct.BasicBlock** %0, align 8
%3 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %memtmp, i64 0, i32 1
%4 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %__a, i64 0, i32 1
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
store i32 %5, i32* %3, align 8
%6 = bitcast %struct.pair.162* %__a to i8*
%7 = bitcast %struct.pair.162* %__b to i8*
diff --git a/llvm/test/Transforms/DeadStoreElimination/PartialStore.ll b/llvm/test/Transforms/DeadStoreElimination/PartialStore.ll
index 4582c840971..d85b4de0ab9 100644
--- a/llvm/test/Transforms/DeadStoreElimination/PartialStore.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/PartialStore.ll
@@ -31,7 +31,7 @@ define i32 @test3(double %__x) {
%tmp.1 = bitcast { [3 x i32] }* %__u to double*
store double %__x, double* %tmp.1
%tmp.4 = getelementptr { [3 x i32] }, { [3 x i32] }* %__u, i32 0, i32 0, i32 1
- %tmp.5 = load i32* %tmp.4
+ %tmp.5 = load i32, i32* %tmp.4
%tmp.6 = icmp slt i32 %tmp.5, 0
%tmp.7 = zext i1 %tmp.6 to i32
ret i32 %tmp.7
diff --git a/llvm/test/Transforms/DeadStoreElimination/atomic.ll b/llvm/test/Transforms/DeadStoreElimination/atomic.ll
index af303fa983e..4d2cb37f258 100644
--- a/llvm/test/Transforms/DeadStoreElimination/atomic.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/atomic.ll
@@ -29,7 +29,7 @@ define i32 @test2() {
; CHECK-NOT: store i32 0
; CHECK: store i32 1
store i32 0, i32* @x
- %x = load atomic i32* @y seq_cst, align 4
+ %x = load atomic i32, i32* @y seq_cst, align 4
store i32 1, i32* @x
ret i32 %x
}
@@ -69,7 +69,7 @@ define void @test6() {
; CHECK-LABEL: test6
; CHECK-NOT: store
; CHECK: ret void
- %x = load atomic i32* @x unordered, align 4
+ %x = load atomic i32, i32* @x unordered, align 4
store atomic i32 %x, i32* @x unordered, align 4
ret void
}
@@ -93,7 +93,7 @@ define i32 @test8() {
%a = alloca i32
call void @randomop(i32* %a)
store i32 0, i32* %a, align 4
- %x = load atomic i32* @x seq_cst, align 4
+ %x = load atomic i32, i32* @x seq_cst, align 4
ret i32 %x
}
@@ -103,7 +103,7 @@ define i32 @test9() {
; CHECK-NOT: store i32 0
; CHECK: store i32 1
store i32 0, i32* @x
- %x = load atomic i32* @y monotonic, align 4
+ %x = load atomic i32, i32* @y monotonic, align 4
store i32 1, i32* @x
ret i32 %x
}
@@ -125,7 +125,7 @@ define i32 @test11() {
; CHECK: store atomic i32 0
; CHECK: store atomic i32 1
store atomic i32 0, i32* @x monotonic, align 4
- %x = load atomic i32* @y monotonic, align 4
+ %x = load atomic i32, i32* @y monotonic, align 4
store atomic i32 1, i32* @x monotonic, align 4
ret i32 %x
}
@@ -147,7 +147,7 @@ define i32 @test13() {
; CHECK-NOT: store i32 0
; CHECK: store i32 1
store i32 0, i32* @x
- %x = load atomic i32* @y seq_cst, align 4
+ %x = load atomic i32, i32* @y seq_cst, align 4
store atomic i32 %x, i32* @y seq_cst, align 4
store i32 1, i32* @x
ret i32 %x
@@ -159,7 +159,7 @@ define i32 @test14() {
; CHECK-NOT: store i32 0
; CHECK: store i32 1
store i32 0, i32* @x
- %x = load atomic i32* @y acquire, align 4
+ %x = load atomic i32, i32* @y acquire, align 4
store atomic i32 %x, i32* @y release, align 4
store i32 1, i32* @x
ret i32 %x
@@ -172,7 +172,7 @@ define i32 @test15() {
; CHECK: store i32 1
store i32 0, i32* @x
store atomic i32 0, i32* @y release, align 4
- %x = load atomic i32* @y acquire, align 4
+ %x = load atomic i32, i32* @y acquire, align 4
store i32 1, i32* @x
ret i32 %x
}
diff --git a/llvm/test/Transforms/DeadStoreElimination/const-pointers.ll b/llvm/test/Transforms/DeadStoreElimination/const-pointers.ll
index f2c5c6acf2f..e4403edbed2 100644
--- a/llvm/test/Transforms/DeadStoreElimination/const-pointers.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/const-pointers.ll
@@ -9,7 +9,7 @@ define void @test1(%t* noalias %pp) {
%p = getelementptr inbounds %t, %t* %pp, i32 0, i32 0
store i32 1, i32* %p; <-- This is dead
- %x = load i32* inttoptr (i32 12345 to i32*)
+ %x = load i32, i32* inttoptr (i32 12345 to i32*)
store i32 %x, i32* %p
ret void
; CHECK-LABEL: define void @test1(
@@ -30,7 +30,7 @@ define void @test3() {
define void @test4(i32* %p) {
store i32 1, i32* %p
- %x = load i32* @g; <-- %p and @g could alias
+ %x = load i32, i32* @g; <-- %p and @g could alias
store i32 %x, i32* %p
ret void
; CHECK-LABEL: define void @test4(
diff --git a/llvm/test/Transforms/DeadStoreElimination/crash.ll b/llvm/test/Transforms/DeadStoreElimination/crash.ll
index ab04482f337..78cb842e60e 100644
--- a/llvm/test/Transforms/DeadStoreElimination/crash.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/crash.ll
@@ -66,8 +66,8 @@ define void @test4(%struct.f393a00_2__windmill* %a, %struct.f393a00_2__windmill*
entry:
%t = alloca %struct.f393a00_2__windmill ; <%struct.f393a00_2__windmill*> [#uses=1]
%0 = getelementptr %struct.f393a00_2__windmill, %struct.f393a00_2__windmill* %t, i32 0, i32 0, i32 0 ; <%struct.ada__tags__dispatch_table**> [#uses=1]
- %1 = load %struct.ada__tags__dispatch_table** null, align 4 ; <%struct.ada__tags__dispatch_table*> [#uses=1]
- %2 = load %struct.ada__tags__dispatch_table** %0, align 8 ; <%struct.ada__tags__dispatch_table*> [#uses=1]
+ %1 = load %struct.ada__tags__dispatch_table*, %struct.ada__tags__dispatch_table** null, align 4 ; <%struct.ada__tags__dispatch_table*> [#uses=1]
+ %2 = load %struct.ada__tags__dispatch_table*, %struct.ada__tags__dispatch_table** %0, align 8 ; <%struct.ada__tags__dispatch_table*> [#uses=1]
store %struct.ada__tags__dispatch_table* %2, %struct.ada__tags__dispatch_table** null, align 4
store %struct.ada__tags__dispatch_table* %1, %struct.ada__tags__dispatch_table** null, align 4
ret void
diff --git a/llvm/test/Transforms/DeadStoreElimination/free.ll b/llvm/test/Transforms/DeadStoreElimination/free.ll
index bac325e2277..6b69ec86020 100644
--- a/llvm/test/Transforms/DeadStoreElimination/free.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/free.ll
@@ -10,7 +10,7 @@ declare noalias i8* @malloc(i64)
; CHECK-NEXT: @free
; CHECK-NEXT: ret void
define void @test(i32* %Q, i32* %P) {
- %DEAD = load i32* %Q ; <i32> [#uses=1]
+ %DEAD = load i32, i32* %Q ; <i32> [#uses=1]
store i32 %DEAD, i32* %P
%1 = bitcast i32* %P to i8*
tail call void @free(i8* %1)
diff --git a/llvm/test/Transforms/DeadStoreElimination/simple.ll b/llvm/test/Transforms/DeadStoreElimination/simple.ll
index 3e5f7b8c30c..dd1443e77e4 100644
--- a/llvm/test/Transforms/DeadStoreElimination/simple.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/simple.ll
@@ -6,7 +6,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
declare i8* @llvm.init.trampoline(i8*, i8*, i8*)
define void @test1(i32* %Q, i32* %P) {
- %DEAD = load i32* %Q
+ %DEAD = load i32, i32* %Q
store i32 %DEAD, i32* %P
store i32 0, i32* %P
ret void
@@ -31,17 +31,17 @@ define void @test2(i32 *%p, i32 *%q) {
define i32 @test3(i32* %g_addr) nounwind {
; CHECK-LABEL: @test3(
-; CHECK: load i32* %g_addr
- %g_value = load i32* %g_addr, align 4
+; CHECK: load i32, i32* %g_addr
+ %g_value = load i32, i32* %g_addr, align 4
store i32 -1, i32* @g, align 4
store i32 %g_value, i32* %g_addr, align 4
- %tmp3 = load i32* @g, align 4
+ %tmp3 = load i32, i32* @g, align 4
ret i32 %tmp3
}
define void @test4(i32* %Q) {
- %a = load i32* %Q
+ %a = load i32, i32* %Q
store volatile i32 %a, i32* %Q
ret void
; CHECK-LABEL: @test4(
@@ -51,7 +51,7 @@ define void @test4(i32* %Q) {
}
define void @test5(i32* %Q) {
- %a = load volatile i32* %Q
+ %a = load volatile i32, i32* %Q
store i32 %a, i32* %Q
ret void
; CHECK-LABEL: @test5(
@@ -87,7 +87,7 @@ define i32 @test8() {
store i32 1234567, i32* %V
%V2 = bitcast i32* %V to i8*
store i8 0, i8* %V2
- %X = load i32* %V
+ %X = load i32, i32* %V
ret i32 %X
; CHECK-LABEL: @test8(
@@ -142,9 +142,9 @@ define void @test11() {
; PR2599 - load -> store to same address.
define void @test12({ i32, i32 }* %x) nounwind {
%tmp4 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 0
- %tmp5 = load i32* %tmp4, align 4
+ %tmp5 = load i32, i32* %tmp4, align 4
%tmp7 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 1
- %tmp8 = load i32* %tmp7, align 4
+ %tmp8 = load i32, i32* %tmp7, align 4
%tmp17 = sub i32 0, %tmp8
store i32 %tmp5, i32* %tmp4, align 4
store i32 %tmp17, i32* %tmp7, align 4
@@ -160,7 +160,7 @@ declare void @test13f()
define i32* @test13() {
%p = tail call i8* @malloc(i32 4)
%P = bitcast i8* %p to i32*
- %DEAD = load i32* %P
+ %DEAD = load i32, i32* %P
%DEAD2 = add i32 %DEAD, 1
store i32 %DEAD2, i32* %P
call void @test13f( )
@@ -176,7 +176,7 @@ define i32 addrspace(1)* @test13_addrspacecast() {
%p = tail call i8* @malloc(i32 4)
%p.bc = bitcast i8* %p to i32*
%P = addrspacecast i32* %p.bc to i32 addrspace(1)*
- %DEAD = load i32 addrspace(1)* %P
+ %DEAD = load i32, i32 addrspace(1)* %P
%DEAD2 = add i32 %DEAD, 1
store i32 %DEAD2, i32 addrspace(1)* %P
call void @test13f( )
@@ -195,7 +195,7 @@ declare noalias i8* @calloc(i32, i32)
define void @test14(i32* %Q) {
%P = alloca i32
- %DEAD = load i32* %Q
+ %DEAD = load i32, i32* %Q
store i32 %DEAD, i32* %P
ret void
@@ -344,7 +344,7 @@ define void @test24([2 x i32]* %a, i32 %b, i32 %c) nounwind {
; CHECK: store i8 %tmp
define i8* @test25(i8* %p) nounwind {
%p.4 = getelementptr i8, i8* %p, i64 4
- %tmp = load i8* %p.4, align 1
+ %tmp = load i8, i8* %p.4, align 1
store i8 0, i8* %p.4, align 1
%q = call i8* @strdup(i8* %p) nounwind optsize
store i8 %tmp, i8* %p.4, align 1
diff --git a/llvm/test/Transforms/EarlyCSE/basic.ll b/llvm/test/Transforms/EarlyCSE/basic.ll
index a36a10357f6..43b5e6098f6 100644
--- a/llvm/test/Transforms/EarlyCSE/basic.ll
+++ b/llvm/test/Transforms/EarlyCSE/basic.ll
@@ -37,8 +37,8 @@ define void @test1(i8 %V, i32 *%P) {
;; Simple load value numbering.
; CHECK-LABEL: @test2(
define i32 @test2(i32 *%P) {
- %V1 = load i32* %P
- %V2 = load i32* %P
+ %V1 = load i32, i32* %P
+ %V2 = load i32, i32* %P
%Diff = sub i32 %V1, %V2
ret i32 %Diff
; CHECK: ret i32 0
@@ -46,9 +46,9 @@ define i32 @test2(i32 *%P) {
; CHECK-LABEL: @test2a(
define i32 @test2a(i32 *%P, i1 %b) {
- %V1 = load i32* %P
+ %V1 = load i32, i32* %P
tail call void @llvm.assume(i1 %b)
- %V2 = load i32* %P
+ %V2 = load i32, i32* %P
%Diff = sub i32 %V1, %V2
ret i32 %Diff
; CHECK: ret i32 0
@@ -57,13 +57,13 @@ define i32 @test2a(i32 *%P, i1 %b) {
;; Cross block load value numbering.
; CHECK-LABEL: @test3(
define i32 @test3(i32 *%P, i1 %Cond) {
- %V1 = load i32* %P
+ %V1 = load i32, i32* %P
br i1 %Cond, label %T, label %F
T:
store i32 4, i32* %P
ret i32 42
F:
- %V2 = load i32* %P
+ %V2 = load i32, i32* %P
%Diff = sub i32 %V1, %V2
ret i32 %Diff
; CHECK: F:
@@ -72,14 +72,14 @@ F:
; CHECK-LABEL: @test3a(
define i32 @test3a(i32 *%P, i1 %Cond, i1 %b) {
- %V1 = load i32* %P
+ %V1 = load i32, i32* %P
br i1 %Cond, label %T, label %F
T:
store i32 4, i32* %P
ret i32 42
F:
tail call void @llvm.assume(i1 %b)
- %V2 = load i32* %P
+ %V2 = load i32, i32* %P
%Diff = sub i32 %V1, %V2
ret i32 %Diff
; CHECK: F:
@@ -89,7 +89,7 @@ F:
;; Cross block load value numbering stops when stores happen.
; CHECK-LABEL: @test4(
define i32 @test4(i32 *%P, i1 %Cond) {
- %V1 = load i32* %P
+ %V1 = load i32, i32* %P
br i1 %Cond, label %T, label %F
T:
ret i32 42
@@ -97,7 +97,7 @@ F:
; Clobbers V1
store i32 42, i32* %P
- %V2 = load i32* %P
+ %V2 = load i32, i32* %P
%Diff = sub i32 %V1, %V2
ret i32 %Diff
; CHECK: F:
@@ -120,7 +120,7 @@ define i32 @test5(i32 *%P) {
; CHECK-LABEL: @test6(
define i32 @test6(i32 *%P) {
store i32 42, i32* %P
- %V1 = load i32* %P
+ %V1 = load i32, i32* %P
ret i32 %V1
; CHECK: ret i32 42
}
@@ -129,7 +129,7 @@ define i32 @test6(i32 *%P) {
define i32 @test6a(i32 *%P, i1 %b) {
store i32 42, i32* %P
tail call void @llvm.assume(i1 %b)
- %V1 = load i32* %P
+ %V1 = load i32, i32* %P
ret i32 %V1
; CHECK: ret i32 42
}
@@ -195,11 +195,11 @@ define void @test11(i32 *%P) {
; CHECK-LABEL: @test12(
define i32 @test12(i1 %B, i32* %P1, i32* %P2) {
- %load0 = load i32* %P1
- %1 = load atomic i32* %P2 seq_cst, align 4
- %load1 = load i32* %P1
+ %load0 = load i32, i32* %P1
+ %1 = load atomic i32, i32* %P2 seq_cst, align 4
+ %load1 = load i32, i32* %P1
%sel = select i1 %B, i32 %load0, i32 %load1
ret i32 %sel
- ; CHECK: load i32* %P1
- ; CHECK: load i32* %P1
+ ; CHECK: load i32, i32* %P1
+ ; CHECK: load i32, i32* %P1
}
diff --git a/llvm/test/Transforms/FunctionAttrs/2008-09-03-ReadNone.ll b/llvm/test/Transforms/FunctionAttrs/2008-09-03-ReadNone.ll
index 36a76587348..ca05d63743b 100644
--- a/llvm/test/Transforms/FunctionAttrs/2008-09-03-ReadNone.ll
+++ b/llvm/test/Transforms/FunctionAttrs/2008-09-03-ReadNone.ll
@@ -17,7 +17,7 @@ define i32 @g() readonly {
; CHECK: define i32 @h() #0
define i32 @h() readnone {
- %tmp = load i32* @x ; <i32> [#uses=1]
+ %tmp = load i32, i32* @x ; <i32> [#uses=1]
ret i32 %tmp
}
diff --git a/llvm/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll b/llvm/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll
index b7e4d1f8726..fef872c794a 100644
--- a/llvm/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll
+++ b/llvm/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll
@@ -4,6 +4,6 @@
@g = global i32 0 ; <i32*> [#uses=1]
define i32 @f() {
- %t = load volatile i32* @g ; <i32> [#uses=1]
+ %t = load volatile i32, i32* @g ; <i32> [#uses=1]
ret i32 %t
}
diff --git a/llvm/test/Transforms/FunctionAttrs/2008-12-29-Constant.ll b/llvm/test/Transforms/FunctionAttrs/2008-12-29-Constant.ll
index 9655da45c64..e3a8f0161b8 100644
--- a/llvm/test/Transforms/FunctionAttrs/2008-12-29-Constant.ll
+++ b/llvm/test/Transforms/FunctionAttrs/2008-12-29-Constant.ll
@@ -3,6 +3,6 @@
@s = external constant i8 ; <i8*> [#uses=1]
define i8 @f() {
- %tmp = load i8* @s ; <i8> [#uses=1]
+ %tmp = load i8, i8* @s ; <i8> [#uses=1]
ret i8 %tmp
}
diff --git a/llvm/test/Transforms/FunctionAttrs/2009-01-02-LocalStores.ll b/llvm/test/Transforms/FunctionAttrs/2009-01-02-LocalStores.ll
index 0cf1cb7c638..ec1db095728 100644
--- a/llvm/test/Transforms/FunctionAttrs/2009-01-02-LocalStores.ll
+++ b/llvm/test/Transforms/FunctionAttrs/2009-01-02-LocalStores.ll
@@ -2,7 +2,7 @@
; CHECK: define i32* @a(i32** nocapture readonly %p)
define i32* @a(i32** %p) {
- %tmp = load i32** %p
+ %tmp = load i32*, i32** %p
ret i32* %tmp
}
diff --git a/llvm/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll b/llvm/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll
index 93991d21a1f..1a64a839380 100644
--- a/llvm/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll
+++ b/llvm/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll
@@ -5,6 +5,6 @@
define void @foo() {
; CHECK: void @foo() {
- %tmp = load volatile i32* @g
+ %tmp = load volatile i32, i32* @g
ret void
}
diff --git a/llvm/test/Transforms/FunctionAttrs/atomic.ll b/llvm/test/Transforms/FunctionAttrs/atomic.ll
index d5a8db7d53b..bb867011cc2 100644
--- a/llvm/test/Transforms/FunctionAttrs/atomic.ll
+++ b/llvm/test/Transforms/FunctionAttrs/atomic.ll
@@ -7,7 +7,7 @@ define i32 @test1(i32 %x) uwtable ssp {
entry:
%x.addr = alloca i32, align 4
store atomic i32 %x, i32* %x.addr seq_cst, align 4
- %r = load atomic i32* %x.addr seq_cst, align 4
+ %r = load atomic i32, i32* %x.addr seq_cst, align 4
ret i32 %r
}
@@ -15,7 +15,7 @@ entry:
define i32 @test2(i32* %x) uwtable ssp {
; CHECK: define i32 @test2(i32* nocapture readonly %x) #1 {
entry:
- %r = load atomic i32* %x seq_cst, align 4
+ %r = load atomic i32, i32* %x seq_cst, align 4
ret i32 %r
}
diff --git a/llvm/test/Transforms/FunctionAttrs/nocapture.ll b/llvm/test/Transforms/FunctionAttrs/nocapture.ll
index f09c98bab73..23cbc854f37 100644
--- a/llvm/test/Transforms/FunctionAttrs/nocapture.ll
+++ b/llvm/test/Transforms/FunctionAttrs/nocapture.ll
@@ -40,7 +40,7 @@ define i1 @c5(i32* %q, i32 %bitno) {
%bit = and i32 %tmp2, 1
; subtle escape mechanism follows
%lookup = getelementptr [2 x i1], [2 x i1]* @lookup_table, i32 0, i32 %bit
- %val = load i1* %lookup
+ %val = load i1, i1* %lookup
ret i1 %val
}
@@ -71,7 +71,7 @@ define i1* @lookup_bit(i32* %q, i32 %bitno) readnone nounwind {
; CHECK: define i1 @c7(i32* readonly %q, i32 %bitno)
define i1 @c7(i32* %q, i32 %bitno) {
%ptr = call i1* @lookup_bit(i32* %q, i32 %bitno)
- %val = load i1* %ptr
+ %val = load i1, i1* %ptr
ret i1 %val
}
@@ -85,7 +85,7 @@ l:
%y = phi i32* [ %q, %e ]
%tmp = bitcast i32* %x to i32* ; <i32*> [#uses=2]
%tmp2 = select i1 %b, i32* %tmp, i32* %y
- %val = load i32* %tmp2 ; <i32> [#uses=1]
+ %val = load i32, i32* %tmp2 ; <i32> [#uses=1]
store i32 0, i32* %tmp
store i32* %y, i32** @g
ret i32 %val
@@ -100,7 +100,7 @@ l:
%y = phi i32* [ %q, %e ]
%tmp = addrspacecast i32 addrspace(1)* %x to i32* ; <i32*> [#uses=2]
%tmp2 = select i1 %b, i32* %tmp, i32* %y
- %val = load i32* %tmp2 ; <i32> [#uses=1]
+ %val = load i32, i32* %tmp2 ; <i32> [#uses=1]
store i32 0, i32* %tmp
store i32* %y, i32** @g
ret i32 %val
diff --git a/llvm/test/Transforms/FunctionAttrs/optnone-simple.ll b/llvm/test/Transforms/FunctionAttrs/optnone-simple.ll
index 9d0f8e3710a..beaa588da50 100644
--- a/llvm/test/Transforms/FunctionAttrs/optnone-simple.ll
+++ b/llvm/test/Transforms/FunctionAttrs/optnone-simple.ll
@@ -15,8 +15,8 @@ entry:
%b.addr = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 %b, i32* %b.addr, align 4
- %0 = load i32* %a.addr, align 4
- %1 = load i32* %b.addr, align 4
+ %0 = load i32, i32* %a.addr, align 4
+ %1 = load i32, i32* %b.addr, align 4
%add = add nsw i32 %0, %1
ret i32 %add
}
@@ -33,8 +33,8 @@ entry:
%b.addr = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 %b, i32* %b.addr, align 4
- %0 = load i32* %a.addr, align 4
- %1 = load i32* %b.addr, align 4
+ %0 = load i32, i32* %a.addr, align 4
+ %1 = load i32, i32* %b.addr, align 4
%add = add nsw i32 %0, %1
ret i32 %add
}
@@ -57,8 +57,8 @@ entry:
%b.addr = alloca float, align 4
store float %a, float* %a.addr, align 4
store float %b, float* %b.addr, align 4
- %0 = load float* %a.addr, align 4
- %1 = load float* %b.addr, align 4
+ %0 = load float, float* %a.addr, align 4
+ %1 = load float, float* %b.addr, align 4
%sub = fsub float %0, %1
ret float %sub
}
@@ -75,8 +75,8 @@ entry:
%b.addr = alloca float, align 4
store float %a, float* %a.addr, align 4
store float %b, float* %b.addr, align 4
- %0 = load float* %a.addr, align 4
- %1 = load float* %b.addr, align 4
+ %0 = load float, float* %a.addr, align 4
+ %1 = load float, float* %b.addr, align 4
%sub = fsub float %0, %1
ret float %sub
}
@@ -100,8 +100,8 @@ entry:
%b.addr = alloca <4 x float>, align 16
store <4 x float> %a, <4 x float>* %a.addr, align 16
store <4 x float> %b, <4 x float>* %b.addr, align 16
- %0 = load <4 x float>* %a.addr, align 16
- %1 = load <4 x float>* %b.addr, align 16
+ %0 = load <4 x float>, <4 x float>* %a.addr, align 16
+ %1 = load <4 x float>, <4 x float>* %b.addr, align 16
%mul = fmul <4 x float> %0, %1
ret <4 x float> %mul
}
@@ -118,8 +118,8 @@ entry:
%b.addr = alloca <4 x float>, align 16
store <4 x float> %a, <4 x float>* %a.addr, align 16
store <4 x float> %b, <4 x float>* %b.addr, align 16
- %0 = load <4 x float>* %a.addr, align 16
- %1 = load <4 x float>* %b.addr, align 16
+ %0 = load <4 x float>, <4 x float>* %a.addr, align 16
+ %1 = load <4 x float>, <4 x float>* %b.addr, align 16
%mul = fmul <4 x float> %0, %1
ret <4 x float> %mul
}
diff --git a/llvm/test/Transforms/GCOVProfiling/linezero.ll b/llvm/test/Transforms/GCOVProfiling/linezero.ll
index f0b32ec9cd2..c9c07f4c3e5 100644
--- a/llvm/test/Transforms/GCOVProfiling/linezero.ll
+++ b/llvm/test/Transforms/GCOVProfiling/linezero.ll
@@ -26,30 +26,30 @@ entry:
call void @_Z13TagFieldSpecsv(), !dbg !31
store %struct.vector* %ref.tmp, %struct.vector** %__range, align 8, !dbg !31
call void @llvm.dbg.declare(metadata i8** %__begin, metadata !32, metadata !{}), !dbg !30
- %1 = load %struct.vector** %__range, align 8, !dbg !31
+ %1 = load %struct.vector*, %struct.vector** %__range, align 8, !dbg !31
%call = call i8* @_ZN6vector5beginEv(%struct.vector* %1), !dbg !31
store i8* %call, i8** %__begin, align 8, !dbg !31
call void @llvm.dbg.declare(metadata i8** %__end, metadata !33, metadata !{}), !dbg !30
- %2 = load %struct.vector** %__range, align 8, !dbg !31
+ %2 = load %struct.vector*, %struct.vector** %__range, align 8, !dbg !31
%call1 = call i8* @_ZN6vector3endEv(%struct.vector* %2), !dbg !31
store i8* %call1, i8** %__end, align 8, !dbg !31
br label %for.cond, !dbg !31
for.cond: ; preds = %for.inc, %0
- %3 = load i8** %__begin, align 8, !dbg !34
- %4 = load i8** %__end, align 8, !dbg !34
+ %3 = load i8*, i8** %__begin, align 8, !dbg !34
+ %4 = load i8*, i8** %__end, align 8, !dbg !34
%cmp = icmp ne i8* %3, %4, !dbg !34
br i1 %cmp, label %for.body, label %for.end, !dbg !34
for.body: ; preds = %for.cond
call void @llvm.dbg.declare(metadata i8* %spec, metadata !37, metadata !{}), !dbg !31
- %5 = load i8** %__begin, align 8, !dbg !38
- %6 = load i8* %5, align 1, !dbg !38
+ %5 = load i8*, i8** %__begin, align 8, !dbg !38
+ %6 = load i8, i8* %5, align 1, !dbg !38
store i8 %6, i8* %spec, align 1, !dbg !38
br label %for.inc, !dbg !38
for.inc: ; preds = %for.body
- %7 = load i8** %__begin, align 8, !dbg !40
+ %7 = load i8*, i8** %__begin, align 8, !dbg !40
%incdec.ptr = getelementptr inbounds i8, i8* %7, i32 1, !dbg !40
store i8* %incdec.ptr, i8** %__begin, align 8, !dbg !40
br label %for.cond, !dbg !40
@@ -59,7 +59,7 @@ for.end: ; preds = %for.cond
unreachable, !dbg !42
return: ; No predecessors!
- %8 = load i32* %retval, !dbg !44
+ %8 = load i32, i32* %retval, !dbg !44
ret i32 %8, !dbg !44
}
diff --git a/llvm/test/Transforms/GCOVProfiling/return-block.ll b/llvm/test/Transforms/GCOVProfiling/return-block.ll
index f0be3d21aeb..787a75ebd84 100644
--- a/llvm/test/Transforms/GCOVProfiling/return-block.ll
+++ b/llvm/test/Transforms/GCOVProfiling/return-block.ll
@@ -13,7 +13,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @test() #0 {
entry:
tail call void (...)* @f() #2, !dbg !14
- %0 = load i32* @A, align 4, !dbg !15
+ %0 = load i32, i32* @A, align 4, !dbg !15
%tobool = icmp eq i32 %0, 0, !dbg !15
br i1 %tobool, label %if.end, label %if.then, !dbg !15
diff --git a/llvm/test/Transforms/GVN/2007-07-25-DominatedLoop.ll b/llvm/test/Transforms/GVN/2007-07-25-DominatedLoop.ll
index ad580ce1677..10d1e229b12 100644
--- a/llvm/test/Transforms/GVN/2007-07-25-DominatedLoop.ll
+++ b/llvm/test/Transforms/GVN/2007-07-25-DominatedLoop.ll
@@ -71,11 +71,11 @@ cond_true23.i: ; preds = %Perl_safefree.exit68
ret void
cond_next150: ; preds = %Perl_safefree.exit68
- %tmp16092 = load i32* @PL_sv_count, align 4 ; <i32> [#uses=0]
+ %tmp16092 = load i32, i32* @PL_sv_count, align 4 ; <i32> [#uses=0]
br label %cond_next165
bb157: ; preds = %cond_next165
- %tmp158 = load i32* @PL_sv_count, align 4 ; <i32> [#uses=0]
+ %tmp158 = load i32, i32* @PL_sv_count, align 4 ; <i32> [#uses=0]
br label %cond_next165
cond_next165: ; preds = %bb157, %cond_next150
diff --git a/llvm/test/Transforms/GVN/2007-07-25-InfiniteLoop.ll b/llvm/test/Transforms/GVN/2007-07-25-InfiniteLoop.ll
index 7e9c982de51..0ffb34c39b4 100644
--- a/llvm/test/Transforms/GVN/2007-07-25-InfiniteLoop.ll
+++ b/llvm/test/Transforms/GVN/2007-07-25-InfiniteLoop.ll
@@ -9,6 +9,6 @@ entry:
br label %bb
bb: ; preds = %bb, %entry
- %tmp10 = load %struct.INT2** @blkshifts, align 4 ; <%struct.INT2*> [#uses=0]
+ %tmp10 = load %struct.INT2*, %struct.INT2** @blkshifts, align 4 ; <%struct.INT2*> [#uses=0]
br label %bb
}
diff --git a/llvm/test/Transforms/GVN/2007-07-25-Loop.ll b/llvm/test/Transforms/GVN/2007-07-25-Loop.ll
index 6a9f58e02f6..54c0d982de4 100644
--- a/llvm/test/Transforms/GVN/2007-07-25-Loop.ll
+++ b/llvm/test/Transforms/GVN/2007-07-25-Loop.ll
@@ -10,6 +10,6 @@ bb278: ; preds = %bb278, %entry
br i1 false, label %bb278, label %bb344
bb344: ; preds = %bb278, %entry
- %tmp38758 = load i16* null, align 2 ; <i16> [#uses=0]
+ %tmp38758 = load i16, i16* null, align 2 ; <i16> [#uses=0]
ret void
}
diff --git a/llvm/test/Transforms/GVN/2007-07-25-NestedLoop.ll b/llvm/test/Transforms/GVN/2007-07-25-NestedLoop.ll
index 6a7a4090315..8f2c182faa8 100644
--- a/llvm/test/Transforms/GVN/2007-07-25-NestedLoop.ll
+++ b/llvm/test/Transforms/GVN/2007-07-25-NestedLoop.ll
@@ -11,21 +11,21 @@ bb534.preheader: ; preds = %entry
bb556.preheader: ; preds = %entry
%tmp56119 = getelementptr %struct.TypHeader, %struct.TypHeader* %hdR, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp56220 = load i32* %tmp56119 ; <i32> [#uses=0]
+ %tmp56220 = load i32, i32* %tmp56119 ; <i32> [#uses=0]
br i1 false, label %bb.nph23, label %bb675.preheader
bb.nph23: ; preds = %bb556.preheader
ret %struct.TypHeader* null
bb656: ; preds = %bb675.outer, %bb656
- %tmp678 = load i32* %tmp677 ; <i32> [#uses=0]
+ %tmp678 = load i32, i32* %tmp677 ; <i32> [#uses=0]
br i1 false, label %bb684, label %bb656
bb684: ; preds = %bb675.outer, %bb656
br i1 false, label %bb924.preheader, label %bb675.outer
bb675.outer: ; preds = %bb675.preheader, %bb684
- %tmp67812 = load i32* %tmp67711 ; <i32> [#uses=0]
+ %tmp67812 = load i32, i32* %tmp67711 ; <i32> [#uses=0]
br i1 false, label %bb684, label %bb656
bb675.preheader: ; preds = %bb556.preheader
diff --git a/llvm/test/Transforms/GVN/2007-07-25-SinglePredecessor.ll b/llvm/test/Transforms/GVN/2007-07-25-SinglePredecessor.ll
index 6f214234d3c..d7e6c20324f 100644
--- a/llvm/test/Transforms/GVN/2007-07-25-SinglePredecessor.ll
+++ b/llvm/test/Transforms/GVN/2007-07-25-SinglePredecessor.ll
@@ -21,7 +21,7 @@ cond_next.i: ; preds = %entry
cond_true: ; preds = %cond_next.i
%tmp3.i8 = getelementptr %struct.mrViewingHitRecord, %struct.mrViewingHitRecord* %VHR, i32 0, i32 1, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp46 = load double* %tmp3.i8 ; <double> [#uses=0]
+ %tmp46 = load double, double* %tmp3.i8 ; <double> [#uses=0]
ret i32 1
return: ; preds = %cond_next.i, %entry
diff --git a/llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll b/llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
index 5a15f0e43aa..f88ffcf59b9 100644
--- a/llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
+++ b/llvm/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
@@ -5,7 +5,7 @@
define i32 @NextRootMove(i32 %wtm, i32 %x, i32 %y, i32 %z) {
entry:
%A = alloca i32*
- %tmp17618 = load i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
+ %tmp17618 = load i32*, i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
store i32* %tmp17618, i32** %A
; CHECK: entry:
; CHECK-NEXT: alloca i32
@@ -19,7 +19,7 @@ cond_true116:
br i1 %cmp, label %cond_true128, label %cond_true145
cond_true128:
- %tmp17625 = load i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
+ %tmp17625 = load i32*, i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
store i32* %tmp17625, i32** %A
%cmp1 = icmp eq i32 %x, %z
br i1 %cmp1 , label %bb98.backedge, label %return.loopexit
@@ -28,7 +28,7 @@ bb98.backedge:
br label %cond_true116
cond_true145:
- %tmp17631 = load i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
+ %tmp17631 = load i32*, i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
store i32* %tmp17631, i32** %A
br i1 false, label %bb98.backedge, label %return.loopexit
diff --git a/llvm/test/Transforms/GVN/2007-07-26-NonRedundant.ll b/llvm/test/Transforms/GVN/2007-07-26-NonRedundant.ll
index 7579e8aff08..211830a39af 100644
--- a/llvm/test/Transforms/GVN/2007-07-26-NonRedundant.ll
+++ b/llvm/test/Transforms/GVN/2007-07-26-NonRedundant.ll
@@ -11,6 +11,6 @@ cond_next: ; preds = %entry
br label %bb19
bb19: ; preds = %cond_next, %entry
- %tmp29 = load i32* @bsLive, align 4 ; <i32> [#uses=0]
+ %tmp29 = load i32, i32* @bsLive, align 4 ; <i32> [#uses=0]
ret i32 0
}
diff --git a/llvm/test/Transforms/GVN/2007-07-26-PhiErasure.ll b/llvm/test/Transforms/GVN/2007-07-26-PhiErasure.ll
index d898ab8e2fa..82af521ab3d 100644
--- a/llvm/test/Transforms/GVN/2007-07-26-PhiErasure.ll
+++ b/llvm/test/Transforms/GVN/2007-07-26-PhiErasure.ll
@@ -9,7 +9,7 @@
define i32 @reload(%struct.rtx_def* %first, i32 %global, %struct.FILE* %dumpfile) {
cond_next2835.1: ; preds = %cond_next2861
- %tmp2922 = load i32* @n_spills, align 4 ; <i32> [#uses=0]
+ %tmp2922 = load i32, i32* @n_spills, align 4 ; <i32> [#uses=0]
br label %bb2928
bb2928: ; preds = %cond_next2835.1, %cond_next2943
@@ -22,7 +22,7 @@ cond_next2943: ; preds = %cond_true2935, %bb2928
br i1 false, label %bb2982.preheader, label %bb2928
bb2982.preheader: ; preds = %cond_next2943
- %tmp298316 = load i32* @n_spills, align 4 ; <i32> [#uses=0]
+ %tmp298316 = load i32, i32* @n_spills, align 4 ; <i32> [#uses=0]
ret i32 %tmp298316
}
diff --git a/llvm/test/Transforms/GVN/2007-07-30-PredIDom.ll b/llvm/test/Transforms/GVN/2007-07-30-PredIDom.ll
index 5cb6bb3ecff..3a7eec7d142 100644
--- a/llvm/test/Transforms/GVN/2007-07-30-PredIDom.ll
+++ b/llvm/test/Transforms/GVN/2007-07-30-PredIDom.ll
@@ -269,6 +269,6 @@ bb970: ; preds = %entry
br label %return
return: ; preds = %0, %cond_next967, %cond_next922, %cond_next879, %cond_next807, %cond_next630, %cond_next415, %cond_next267, %cond_next191, %bb
- %retval980 = load i8** null ; <i8*> [#uses=1]
+ %retval980 = load i8*, i8** null ; <i8*> [#uses=1]
ret i8* %retval980
}
diff --git a/llvm/test/Transforms/GVN/2007-07-31-NoDomInherit.ll b/llvm/test/Transforms/GVN/2007-07-31-NoDomInherit.ll
index c9acdbb5314..c30a28331b7 100644
--- a/llvm/test/Transforms/GVN/2007-07-31-NoDomInherit.ll
+++ b/llvm/test/Transforms/GVN/2007-07-31-NoDomInherit.ll
@@ -149,27 +149,27 @@ entry:
br label %bb91
bb: ; preds = %cond_next97
- %tmp1 = load i32* @numi ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* @numi ; <i32> [#uses=1]
%tmp2 = getelementptr [44 x i8], [44 x i8]* @.str43, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp3 = call i32 (i8*, ...)* @printf( i8* %tmp2, i32 %tmp1 ) ; <i32> [#uses=0]
store i32 0, i32* %i
br label %bb13
bb4: ; preds = %bb13
- %tmp5 = load i32* %i ; <i32> [#uses=1]
- %tmp6 = load i32* %i ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp6 = load i32, i32* %i ; <i32> [#uses=1]
%tmp7 = getelementptr [17 x i32], [17 x i32]* @trialx, i32 0, i32 %tmp6 ; <i32*> [#uses=1]
- %tmp8 = load i32* %tmp7 ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %tmp7 ; <i32> [#uses=1]
%tmp9 = call i32 @userfun( i32 %tmp8 ) ; <i32> [#uses=1]
%tmp10 = getelementptr [17 x i32], [17 x i32]* @correct_result, i32 0, i32 %tmp5 ; <i32*> [#uses=1]
store i32 %tmp9, i32* %tmp10
- %tmp11 = load i32* %i ; <i32> [#uses=1]
+ %tmp11 = load i32, i32* %i ; <i32> [#uses=1]
%tmp12 = add i32 %tmp11, 1 ; <i32> [#uses=1]
store i32 %tmp12, i32* %i
br label %bb13
bb13: ; preds = %bb4, %bb
- %tmp14 = load i32* %i ; <i32> [#uses=1]
+ %tmp14 = load i32, i32* %i ; <i32> [#uses=1]
%tmp15 = icmp sle i32 %tmp14, 16 ; <i1> [#uses=1]
%tmp1516 = zext i1 %tmp15 to i32 ; <i32> [#uses=1]
%toBool = icmp ne i32 %tmp1516, 0 ; <i1> [#uses=1]
@@ -180,47 +180,47 @@ bb17: ; preds = %bb13
br label %bb49
bb18: ; preds = %bb49
- %tmp19 = load i32* %i ; <i32> [#uses=1]
+ %tmp19 = load i32, i32* %i ; <i32> [#uses=1]
%tmp20 = getelementptr [5 x { i32, [3 x i32] }], [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp19 ; <{ i32, [3 x i32] }*> [#uses=1]
%tmp21 = getelementptr { i32, [3 x i32] }, { i32, [3 x i32] }* %tmp20, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %tmp21
- %tmp22 = load i32* %i ; <i32> [#uses=1]
+ %tmp22 = load i32, i32* %i ; <i32> [#uses=1]
%tmp23 = getelementptr [13 x %struct.anon], [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
%tmp24 = getelementptr %struct.anon, %struct.anon* %tmp23, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
%tmp25 = getelementptr [3 x i32], [3 x i32]* %tmp24, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp26 = load i32* %tmp25 ; <i32> [#uses=1]
+ %tmp26 = load i32, i32* %tmp25 ; <i32> [#uses=1]
%tmp27 = getelementptr [5 x { i32, [3 x i32] }], [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp22 ; <{ i32, [3 x i32] }*> [#uses=1]
%tmp28 = getelementptr { i32, [3 x i32] }, { i32, [3 x i32] }* %tmp27, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
%tmp29 = getelementptr [3 x i32], [3 x i32]* %tmp28, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 %tmp26, i32* %tmp29
- %tmp30 = load i32* %i ; <i32> [#uses=1]
+ %tmp30 = load i32, i32* %i ; <i32> [#uses=1]
%tmp31 = getelementptr [13 x %struct.anon], [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
%tmp32 = getelementptr %struct.anon, %struct.anon* %tmp31, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
%tmp33 = getelementptr [3 x i32], [3 x i32]* %tmp32, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp34 = load i32* %tmp33 ; <i32> [#uses=1]
+ %tmp34 = load i32, i32* %tmp33 ; <i32> [#uses=1]
%tmp35 = getelementptr [5 x { i32, [3 x i32] }], [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp30 ; <{ i32, [3 x i32] }*> [#uses=1]
%tmp36 = getelementptr { i32, [3 x i32] }, { i32, [3 x i32] }* %tmp35, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
%tmp37 = getelementptr [3 x i32], [3 x i32]* %tmp36, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %tmp34, i32* %tmp37
- %tmp38 = load i32* %i ; <i32> [#uses=1]
+ %tmp38 = load i32, i32* %i ; <i32> [#uses=1]
%tmp39 = getelementptr [13 x %struct.anon], [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
%tmp40 = getelementptr %struct.anon, %struct.anon* %tmp39, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
%tmp41 = getelementptr [3 x i32], [3 x i32]* %tmp40, i32 0, i32 2 ; <i32*> [#uses=1]
- %tmp42 = load i32* %tmp41 ; <i32> [#uses=1]
+ %tmp42 = load i32, i32* %tmp41 ; <i32> [#uses=1]
%tmp43 = getelementptr [5 x { i32, [3 x i32] }], [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp38 ; <{ i32, [3 x i32] }*> [#uses=1]
%tmp44 = getelementptr { i32, [3 x i32] }, { i32, [3 x i32] }* %tmp43, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
%tmp45 = getelementptr [3 x i32], [3 x i32]* %tmp44, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 %tmp42, i32* %tmp45
- %tmp46 = load i32* %i ; <i32> [#uses=1]
+ %tmp46 = load i32, i32* %i ; <i32> [#uses=1]
call void @fix_operands( i32 %tmp46 )
- %tmp47 = load i32* %i ; <i32> [#uses=1]
+ %tmp47 = load i32, i32* %i ; <i32> [#uses=1]
%tmp48 = add i32 %tmp47, 1 ; <i32> [#uses=1]
store i32 %tmp48, i32* %i
br label %bb49
bb49: ; preds = %bb18, %bb17
- %tmp50 = load i32* @numi ; <i32> [#uses=1]
- %tmp51 = load i32* %i ; <i32> [#uses=1]
+ %tmp50 = load i32, i32* @numi ; <i32> [#uses=1]
+ %tmp51 = load i32, i32* %i ; <i32> [#uses=1]
%tmp52 = icmp slt i32 %tmp51, %tmp50 ; <i1> [#uses=1]
%tmp5253 = zext i1 %tmp52 to i32 ; <i32> [#uses=1]
%toBool54 = icmp ne i32 %tmp5253, 0 ; <i1> [#uses=1]
@@ -230,9 +230,9 @@ bb55: ; preds = %bb49
%tmp56 = call i32 @search( ) ; <i32> [#uses=1]
store i32 %tmp56, i32* %num_sol
%tmp57 = getelementptr [21 x i8], [21 x i8]* @.str44, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp58 = load i32* %num_sol ; <i32> [#uses=1]
+ %tmp58 = load i32, i32* %num_sol ; <i32> [#uses=1]
%tmp59 = call i32 (i8*, ...)* @printf( i8* %tmp57, i32 %tmp58 ) ; <i32> [#uses=0]
- %tmp60 = load i32* @counters ; <i32> [#uses=1]
+ %tmp60 = load i32, i32* @counters ; <i32> [#uses=1]
%tmp61 = icmp ne i32 %tmp60, 0 ; <i1> [#uses=1]
%tmp6162 = zext i1 %tmp61 to i32 ; <i32> [#uses=1]
%toBool63 = icmp ne i32 %tmp6162, 0 ; <i1> [#uses=1]
@@ -246,25 +246,25 @@ cond_true: ; preds = %bb55
br label %bb79
bb66: ; preds = %bb79
- %tmp67 = load i32* %i ; <i32> [#uses=1]
+ %tmp67 = load i32, i32* %i ; <i32> [#uses=1]
%tmp68 = getelementptr [5 x i32], [5 x i32]* @counter, i32 0, i32 %tmp67 ; <i32*> [#uses=1]
- %tmp69 = load i32* %tmp68 ; <i32> [#uses=1]
+ %tmp69 = load i32, i32* %tmp68 ; <i32> [#uses=1]
%tmp70 = getelementptr [5 x i8], [5 x i8]* @.str46, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp71 = call i32 (i8*, ...)* @printf( i8* %tmp70, i32 %tmp69 ) ; <i32> [#uses=0]
- %tmp72 = load i32* %i ; <i32> [#uses=1]
+ %tmp72 = load i32, i32* %i ; <i32> [#uses=1]
%tmp73 = getelementptr [5 x i32], [5 x i32]* @counter, i32 0, i32 %tmp72 ; <i32*> [#uses=1]
- %tmp74 = load i32* %tmp73 ; <i32> [#uses=1]
- %tmp75 = load i32* %total ; <i32> [#uses=1]
+ %tmp74 = load i32, i32* %tmp73 ; <i32> [#uses=1]
+ %tmp75 = load i32, i32* %total ; <i32> [#uses=1]
%tmp76 = add i32 %tmp74, %tmp75 ; <i32> [#uses=1]
store i32 %tmp76, i32* %total
- %tmp77 = load i32* %i ; <i32> [#uses=1]
+ %tmp77 = load i32, i32* %i ; <i32> [#uses=1]
%tmp78 = add i32 %tmp77, 1 ; <i32> [#uses=1]
store i32 %tmp78, i32* %i
br label %bb79
bb79: ; preds = %bb66, %cond_true
- %tmp80 = load i32* @numi ; <i32> [#uses=1]
- %tmp81 = load i32* %i ; <i32> [#uses=1]
+ %tmp80 = load i32, i32* @numi ; <i32> [#uses=1]
+ %tmp81 = load i32, i32* %i ; <i32> [#uses=1]
%tmp82 = icmp slt i32 %tmp81, %tmp80 ; <i1> [#uses=1]
%tmp8283 = zext i1 %tmp82 to i32 ; <i32> [#uses=1]
%toBool84 = icmp ne i32 %tmp8283, 0 ; <i1> [#uses=1]
@@ -272,18 +272,18 @@ bb79: ; preds = %bb66, %cond_true
bb85: ; preds = %bb79
%tmp86 = getelementptr [12 x i8], [12 x i8]* @.str47, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp87 = load i32* %total ; <i32> [#uses=1]
+ %tmp87 = load i32, i32* %total ; <i32> [#uses=1]
%tmp88 = call i32 (i8*, ...)* @printf( i8* %tmp86, i32 %tmp87 ) ; <i32> [#uses=0]
br label %cond_next
cond_next: ; preds = %bb85, %bb55
- %tmp89 = load i32* @numi ; <i32> [#uses=1]
+ %tmp89 = load i32, i32* @numi ; <i32> [#uses=1]
%tmp90 = add i32 %tmp89, 1 ; <i32> [#uses=1]
store i32 %tmp90, i32* @numi
br label %bb91
bb91: ; preds = %cond_next, %entry
- %tmp92 = load i32* @numi ; <i32> [#uses=1]
+ %tmp92 = load i32, i32* @numi ; <i32> [#uses=1]
%tmp93 = icmp sgt i32 %tmp92, 5 ; <i1> [#uses=1]
%tmp9394 = zext i1 %tmp93 to i32 ; <i32> [#uses=1]
%toBool95 = icmp ne i32 %tmp9394, 0 ; <i1> [#uses=1]
@@ -293,7 +293,7 @@ cond_true96: ; preds = %bb91
br label %bb102
cond_next97: ; preds = %bb91
- %tmp98 = load i32* %num_sol ; <i32> [#uses=1]
+ %tmp98 = load i32, i32* %num_sol ; <i32> [#uses=1]
%tmp99 = icmp eq i32 %tmp98, 0 ; <i1> [#uses=1]
%tmp99100 = zext i1 %tmp99 to i32 ; <i32> [#uses=1]
%toBool101 = icmp ne i32 %tmp99100, 0 ; <i1> [#uses=1]
@@ -301,12 +301,12 @@ cond_next97: ; preds = %bb91
bb102: ; preds = %cond_next97, %cond_true96
store i32 0, i32* %tmp
- %tmp103 = load i32* %tmp ; <i32> [#uses=1]
+ %tmp103 = load i32, i32* %tmp ; <i32> [#uses=1]
store i32 %tmp103, i32* %retval
br label %return
return: ; preds = %bb102
- %retval104 = load i32* %retval ; <i32> [#uses=1]
+ %retval104 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval104
}
diff --git a/llvm/test/Transforms/GVN/2007-07-31-RedundantPhi.ll b/llvm/test/Transforms/GVN/2007-07-31-RedundantPhi.ll
index 13419d19e1d..b285560e6a9 100644
--- a/llvm/test/Transforms/GVN/2007-07-31-RedundantPhi.ll
+++ b/llvm/test/Transforms/GVN/2007-07-31-RedundantPhi.ll
@@ -17,6 +17,6 @@ cond_false470: ; preds = %cond_next449
br label %cond_next698
cond_next698: ; preds = %cond_true492
- %tmp701 = load i16* @img_width, align 2 ; <i16> [#uses=0]
+ %tmp701 = load i16, i16* @img_width, align 2 ; <i16> [#uses=0]
ret i32 0
}
diff --git a/llvm/test/Transforms/GVN/2008-02-12-UndefLoad.ll b/llvm/test/Transforms/GVN/2008-02-12-UndefLoad.ll
index 2a35f1074d7..8ebeb1485a2 100644
--- a/llvm/test/Transforms/GVN/2008-02-12-UndefLoad.ll
+++ b/llvm/test/Transforms/GVN/2008-02-12-UndefLoad.ll
@@ -8,7 +8,7 @@ entry:
%c = alloca %struct.anon ; <%struct.anon*> [#uses=2]
%tmp = getelementptr %struct.anon, %struct.anon* %c, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp1 = getelementptr i32, i32* %tmp, i32 1 ; <i32*> [#uses=2]
- %tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
%tmp3 = or i32 %tmp2, 11 ; <i32> [#uses=1]
%tmp4 = and i32 %tmp3, -21 ; <i32> [#uses=1]
store i32 %tmp4, i32* %tmp1, align 4
diff --git a/llvm/test/Transforms/GVN/2008-02-13-NewPHI.ll b/llvm/test/Transforms/GVN/2008-02-13-NewPHI.ll
index 80b519d14e9..638939b1534 100644
--- a/llvm/test/Transforms/GVN/2008-02-13-NewPHI.ll
+++ b/llvm/test/Transforms/GVN/2008-02-13-NewPHI.ll
@@ -8,7 +8,7 @@ entry:
br label %bb33
bb: ; preds = %bb33
- %tmp27 = load float** %sx_addr, align 4 ; <float*> [#uses=1]
+ %tmp27 = load float*, float** %sx_addr, align 4 ; <float*> [#uses=1]
store float 0.000000e+00, float* %tmp27, align 4
store float* null, float** %sx_addr, align 4
br label %bb33
@@ -17,6 +17,6 @@ bb33: ; preds = %bb, %entry
br i1 false, label %bb, label %return
return: ; preds = %bb33
- %retval59 = load i32* null, align 4 ; <i32> [#uses=1]
+ %retval59 = load i32, i32* null, align 4 ; <i32> [#uses=1]
ret i32 %retval59
}
diff --git a/llvm/test/Transforms/GVN/2008-07-02-Unreachable.ll b/llvm/test/Transforms/GVN/2008-07-02-Unreachable.ll
index ce83fa4e4be..d9932644da3 100644
--- a/llvm/test/Transforms/GVN/2008-07-02-Unreachable.ll
+++ b/llvm/test/Transforms/GVN/2008-07-02-Unreachable.ll
@@ -13,7 +13,7 @@ ifthen: ; preds = %entry
br label %ifend
ifelse: ; preds = %entry
- %tmp3 = load i8* @g_3 ; <i8> [#uses=0]
+ %tmp3 = load i8, i8* @g_3 ; <i8> [#uses=0]
store i8 %tmp3, i8* %A
br label %afterfor
@@ -27,7 +27,7 @@ forinc: ; preds = %forbody
br label %forcond
afterfor: ; preds = %forcond, %forcond.thread
- %tmp10 = load i8* @g_3 ; <i8> [#uses=0]
+ %tmp10 = load i8, i8* @g_3 ; <i8> [#uses=0]
ret i8 %tmp10
ifend: ; preds = %afterfor, %ifthen
diff --git a/llvm/test/Transforms/GVN/2008-12-09-SelfRemove.ll b/llvm/test/Transforms/GVN/2008-12-09-SelfRemove.ll
index dacea144853..d8ab1bae48e 100644
--- a/llvm/test/Transforms/GVN/2008-12-09-SelfRemove.ll
+++ b/llvm/test/Transforms/GVN/2008-12-09-SelfRemove.ll
@@ -14,7 +14,7 @@ entry:
br i1 false, label %return, label %bb
bb: ; preds = %entry
- %1 = load i8** %0, align 4 ; <i8*> [#uses=0]
+ %1 = load i8*, i8** %0, align 4 ; <i8*> [#uses=0]
%2 = getelementptr %struct.d_print_info, %struct.d_print_info* %dpi, i32 0, i32 1 ; <i8**> [#uses=0]
br label %bb21
diff --git a/llvm/test/Transforms/GVN/2008-12-12-RLE-Crash.ll b/llvm/test/Transforms/GVN/2008-12-12-RLE-Crash.ll
index bcb5427a945..dabf7fa7aeb 100644
--- a/llvm/test/Transforms/GVN/2008-12-12-RLE-Crash.ll
+++ b/llvm/test/Transforms/GVN/2008-12-12-RLE-Crash.ll
@@ -7,7 +7,7 @@ entry:
br label %bb84
bb41: ; preds = %bb82
- %tmp = load i8* %opt.0, align 1 ; <i8> [#uses=0]
+ %tmp = load i8, i8* %opt.0, align 1 ; <i8> [#uses=0]
%tmp1 = getelementptr i8, i8* %opt.0, i32 1 ; <i8*> [#uses=2]
switch i32 0, label %bb81 [
i32 102, label %bb82
@@ -26,7 +26,7 @@ bb81: ; preds = %bb41
bb82: ; preds = %bb84, %bb79, %bb41
%opt.0 = phi i8* [ %tmp3, %bb84 ], [ %tmp1, %bb79 ], [ %tmp1, %bb41 ] ; <i8*> [#uses=3]
- %tmp2 = load i8* %opt.0, align 1 ; <i8> [#uses=0]
+ %tmp2 = load i8, i8* %opt.0, align 1 ; <i8> [#uses=0]
br i1 false, label %bb84, label %bb41
bb84: ; preds = %bb82, %entry
diff --git a/llvm/test/Transforms/GVN/2008-12-14-rle-reanalyze.ll b/llvm/test/Transforms/GVN/2008-12-14-rle-reanalyze.ll
index de2950f3647..84540221464 100644
--- a/llvm/test/Transforms/GVN/2008-12-14-rle-reanalyze.ll
+++ b/llvm/test/Transforms/GVN/2008-12-14-rle-reanalyze.ll
@@ -12,7 +12,7 @@ bb22: ; preds = %bb23, %bb22, %entry
bb23: ; preds = %bb23, %bb22
%sortv.233 = phi i32* [ getelementptr ([256 x i32]* @sort_value, i32 0, i32 0), %bb22 ], [ %sortv.2, %bb23 ] ; <i32*> [#uses=1]
- %0 = load i32* %sortv.233, align 4 ; <i32> [#uses=0]
+ %0 = load i32, i32* %sortv.233, align 4 ; <i32> [#uses=0]
%sortv.2 = getelementptr [256 x i32], [256 x i32]* @sort_value, i32 0, i32 0 ; <i32*> [#uses=1]
br i1 false, label %bb23, label %bb22
}
diff --git a/llvm/test/Transforms/GVN/2008-12-15-CacheVisited.ll b/llvm/test/Transforms/GVN/2008-12-15-CacheVisited.ll
index 6c2e4da524e..73adacd3cfa 100644
--- a/llvm/test/Transforms/GVN/2008-12-15-CacheVisited.ll
+++ b/llvm/test/Transforms/GVN/2008-12-15-CacheVisited.ll
@@ -11,7 +11,7 @@ bb202: ; preds = %entry
bb203: ; preds = %entry
%tmp = getelementptr i32, i32* %decl, i32 1 ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=0]
+ %tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=0]
br i1 false, label %bb207, label %bb204
bb204: ; preds = %bb203
@@ -23,6 +23,6 @@ bb207: ; preds = %bb203
bb208: ; preds = %bb207, %bb204
%iftmp.1374.0.in = phi i32* [ null, %bb207 ], [ %tmp2, %bb204 ] ; <i32*> [#uses=1]
- %iftmp.1374.0 = load i32* %iftmp.1374.0.in ; <i32> [#uses=0]
+ %iftmp.1374.0 = load i32, i32* %iftmp.1374.0.in ; <i32> [#uses=0]
unreachable
}
diff --git a/llvm/test/Transforms/GVN/2009-01-21-SortInvalidation.ll b/llvm/test/Transforms/GVN/2009-01-21-SortInvalidation.ll
index 36775936c8b..6144697bb5b 100644
--- a/llvm/test/Transforms/GVN/2009-01-21-SortInvalidation.ll
+++ b/llvm/test/Transforms/GVN/2009-01-21-SortInvalidation.ll
@@ -37,7 +37,7 @@ bb550: ; preds = %bb543
br i1 false, label %bb554, label %bb552
bb552: ; preds = %bb550
- %0 = load i8* %d.0, align 8 ; <i8> [#uses=0]
+ %0 = load i8, i8* %d.0, align 8 ; <i8> [#uses=0]
br label %bb554
bb554: ; preds = %bb552, %bb550, %bb549
diff --git a/llvm/test/Transforms/GVN/2009-01-22-SortInvalidation.ll b/llvm/test/Transforms/GVN/2009-01-22-SortInvalidation.ll
index 8a4d48bd0aa..89b058a235b 100644
--- a/llvm/test/Transforms/GVN/2009-01-22-SortInvalidation.ll
+++ b/llvm/test/Transforms/GVN/2009-01-22-SortInvalidation.ll
@@ -79,11 +79,11 @@ bb54: ; preds = %entry
br label %bb69.loopexit
bb59: ; preds = %bb63.preheader
- %0 = load %struct..4sPragmaType** %3, align 4 ; <%struct..4sPragmaType*> [#uses=0]
+ %0 = load %struct..4sPragmaType*, %struct..4sPragmaType** %3, align 4 ; <%struct..4sPragmaType*> [#uses=0]
br label %bb65
bb65: ; preds = %bb63.preheader, %bb59
- %1 = load %struct..4sPragmaType** %4, align 4 ; <%struct..4sPragmaType*> [#uses=0]
+ %1 = load %struct..4sPragmaType*, %struct..4sPragmaType** %4, align 4 ; <%struct..4sPragmaType*> [#uses=0]
br i1 false, label %bb67, label %bb63.preheader
bb67: ; preds = %bb65
diff --git a/llvm/test/Transforms/GVN/2009-02-17-LoadPRECrash.ll b/llvm/test/Transforms/GVN/2009-02-17-LoadPRECrash.ll
index cc2bb1deb8c..378d7e73b00 100644
--- a/llvm/test/Transforms/GVN/2009-02-17-LoadPRECrash.ll
+++ b/llvm/test/Transforms/GVN/2009-02-17-LoadPRECrash.ll
@@ -24,11 +24,11 @@ entry:
%addr = alloca %struct.rtx_def* ; <%struct.rtx_def**> [#uses=5]
%iftmp.1532 = alloca %struct.rtx_def* ; <%struct.rtx_def**> [#uses=3]
store %struct.rtx_def* %orig, %struct.rtx_def** null
- %0 = load %struct.rtx_def** null, align 4 ; <%struct.rtx_def*> [#uses=0]
+ %0 = load %struct.rtx_def*, %struct.rtx_def** null, align 4 ; <%struct.rtx_def*> [#uses=0]
br i1 false, label %bb96, label %bb59
bb59: ; preds = %entry
- %1 = load %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=1]
+ %1 = load %struct.rtx_def*, %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=1]
%2 = call i32 @local_symbolic_operand(%struct.rtx_def* %1, i32 0) nounwind ; <i32> [#uses=0]
br i1 false, label %bb96, label %bb63
@@ -89,22 +89,22 @@ bb94: ; preds = %bb93, %bb92
unreachable
bb96: ; preds = %bb59, %entry
- %5 = load %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=1]
+ %5 = load %struct.rtx_def*, %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=1]
%6 = getelementptr %struct.rtx_def, %struct.rtx_def* %5, i32 0, i32 0 ; <i16*> [#uses=1]
- %7 = load i16* %6, align 2 ; <i16> [#uses=0]
+ %7 = load i16, i16* %6, align 2 ; <i16> [#uses=0]
br i1 false, label %bb147, label %bb97
bb97: ; preds = %bb96
- %8 = load %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=0]
+ %8 = load %struct.rtx_def*, %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=0]
br i1 false, label %bb147, label %bb99
bb99: ; preds = %bb97
unreachable
bb147: ; preds = %bb97, %bb96
- %9 = load %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=1]
+ %9 = load %struct.rtx_def*, %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=1]
%10 = getelementptr %struct.rtx_def, %struct.rtx_def* %9, i32 0, i32 0 ; <i16*> [#uses=1]
- %11 = load i16* %10, align 2 ; <i16> [#uses=0]
+ %11 = load i16, i16* %10, align 2 ; <i16> [#uses=0]
br i1 false, label %bb164, label %bb148
bb148: ; preds = %bb147
@@ -167,7 +167,7 @@ bb181: ; preds = %bb180, %bb170
unreachable
bb211: ; preds = %bb168, %bb167
- %14 = load %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=0]
+ %14 = load %struct.rtx_def*, %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=0]
%15 = getelementptr [1 x %struct.cgraph_rtl_info], [1 x %struct.cgraph_rtl_info]* null, i32 0, i32 0 ; <%struct.cgraph_rtl_info*> [#uses=0]
store %struct.rtx_def* null, %struct.rtx_def** null, align 4
br i1 false, label %bb212, label %bb213
@@ -183,7 +183,7 @@ bb213: ; preds = %bb211
bb214: ; preds = %bb213, %bb212
%16 = bitcast %struct.block_symbol* null to [1 x %struct.cgraph_rtl_info]* ; <[1 x %struct.cgraph_rtl_info]*> [#uses=1]
%17 = getelementptr [1 x %struct.cgraph_rtl_info], [1 x %struct.cgraph_rtl_info]* %16, i32 0, i32 1 ; <%struct.cgraph_rtl_info*> [#uses=0]
- %18 = load %struct.rtx_def** %iftmp.1532, align 4 ; <%struct.rtx_def*> [#uses=0]
+ %18 = load %struct.rtx_def*, %struct.rtx_def** %iftmp.1532, align 4 ; <%struct.rtx_def*> [#uses=0]
%19 = getelementptr %struct.rtx_def, %struct.rtx_def* null, i32 0, i32 3 ; <%struct.u*> [#uses=1]
%20 = getelementptr %struct.u, %struct.u* %19, i32 0, i32 0 ; <%struct.block_symbol*> [#uses=1]
%21 = bitcast %struct.block_symbol* %20 to [1 x i64]* ; <[1 x i64]*> [#uses=1]
diff --git a/llvm/test/Transforms/GVN/2009-06-17-InvalidPRE.ll b/llvm/test/Transforms/GVN/2009-06-17-InvalidPRE.ll
index 5b695c55242..bf0a234da4a 100644
--- a/llvm/test/Transforms/GVN/2009-06-17-InvalidPRE.ll
+++ b/llvm/test/Transforms/GVN/2009-06-17-InvalidPRE.ll
@@ -21,7 +21,7 @@ bb2: ; preds = %bb1
%3 = getelementptr %struct.mbuf, %struct.mbuf* %m.0.ph, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 0, i32* %3, align 4
%4 = getelementptr %struct.mbuf, %struct.mbuf* %m.0.ph, i32 0, i32 0 ; <%struct.mbuf**> [#uses=1]
- %5 = load %struct.mbuf** %4, align 4 ; <%struct.mbuf*> [#uses=1]
+ %5 = load %struct.mbuf*, %struct.mbuf** %4, align 4 ; <%struct.mbuf*> [#uses=1]
br label %bb4.outer
bb4.outer: ; preds = %bb4.preheader, %bb2
@@ -41,21 +41,21 @@ bb4: ; preds = %bb4.outer, %bb3
br i1 %12, label %bb1, label %bb7
bb1: ; preds = %bb4
- %13 = load i32* %7, align 4 ; <i32> [#uses=3]
+ %13 = load i32, i32* %7, align 4 ; <i32> [#uses=3]
%14 = icmp sgt i32 %13, %len.0 ; <i1> [#uses=1]
br i1 %14, label %bb3, label %bb2
bb3: ; preds = %bb1
%15 = sub i32 %13, %len.0 ; <i32> [#uses=1]
store i32 %15, i32* %8, align 4
- %16 = load i8** %9, align 4 ; <i8*> [#uses=1]
+ %16 = load i8*, i8** %9, align 4 ; <i8*> [#uses=1]
%17 = getelementptr i8, i8* %16, i32 %len.0 ; <i8*> [#uses=1]
store i8* %17, i8** %10, align 4
br label %bb4
bb7: ; preds = %bb4
%18 = getelementptr %struct.mbuf, %struct.mbuf* %mp, i32 0, i32 5 ; <i16*> [#uses=1]
- %19 = load i16* %18, align 2 ; <i16> [#uses=1]
+ %19 = load i16, i16* %18, align 2 ; <i16> [#uses=1]
%20 = zext i16 %19 to i32 ; <i32> [#uses=1]
%21 = and i32 %20, 2 ; <i32> [#uses=1]
%22 = icmp eq i32 %21, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/GVN/2009-07-13-MemDepSortFail.ll b/llvm/test/Transforms/GVN/2009-07-13-MemDepSortFail.ll
index f079108b9bd..0ed52370f80 100644
--- a/llvm/test/Transforms/GVN/2009-07-13-MemDepSortFail.ll
+++ b/llvm/test/Transforms/GVN/2009-07-13-MemDepSortFail.ll
@@ -22,11 +22,11 @@ bb2: ; preds = %bb
br label %bb62
bb9: ; preds = %bb
- %0 = load i8* %sp.1, align 1 ; <i8> [#uses=0]
+ %0 = load i8, i8* %sp.1, align 1 ; <i8> [#uses=0]
br label %bb62
bb51: ; preds = %bb
- %1 = load i8* %sp.1, align 1 ; <i8> [#uses=0]
+ %1 = load i8, i8* %sp.1, align 1 ; <i8> [#uses=0]
ret i8* null
bb62: ; preds = %bb9, %bb2, %bb
diff --git a/llvm/test/Transforms/GVN/2009-11-12-MemDepMallocBitCast.ll b/llvm/test/Transforms/GVN/2009-11-12-MemDepMallocBitCast.ll
index b433297bba6..e0dbb4bccf3 100644
--- a/llvm/test/Transforms/GVN/2009-11-12-MemDepMallocBitCast.ll
+++ b/llvm/test/Transforms/GVN/2009-11-12-MemDepMallocBitCast.ll
@@ -6,9 +6,9 @@ define i64 @test() {
%1 = tail call i8* @malloc(i64 mul (i64 4, i64 ptrtoint (i64* getelementptr (i64* null, i64 1) to i64))) ; <i8*> [#uses=2]
store i8 42, i8* %1
%X = bitcast i8* %1 to i64* ; <i64*> [#uses=1]
- %Y = load i64* %X ; <i64> [#uses=1]
+ %Y = load i64, i64* %X ; <i64> [#uses=1]
ret i64 %Y
-; CHECK: %Y = load i64* %X
+; CHECK: %Y = load i64, i64* %X
; CHECK: ret i64 %Y
}
diff --git a/llvm/test/Transforms/GVN/2010-03-31-RedundantPHIs.ll b/llvm/test/Transforms/GVN/2010-03-31-RedundantPHIs.ll
index d6e1c6b76d0..9d9ad542407 100644
--- a/llvm/test/Transforms/GVN/2010-03-31-RedundantPHIs.ll
+++ b/llvm/test/Transforms/GVN/2010-03-31-RedundantPHIs.ll
@@ -24,8 +24,8 @@ bb8: ; preds = %bb12
br i1 undef, label %bb9, label %bb10
bb9: ; preds = %bb8
- %0 = load i8** undef, align 4 ; <i8*> [#uses=0]
- %1 = load i8** undef, align 4 ; <i8*> [#uses=0]
+ %0 = load i8*, i8** undef, align 4 ; <i8*> [#uses=0]
+ %1 = load i8*, i8** undef, align 4 ; <i8*> [#uses=0]
br label %bb11
bb10: ; preds = %bb8
diff --git a/llvm/test/Transforms/GVN/2010-05-08-OneBit.ll b/llvm/test/Transforms/GVN/2010-05-08-OneBit.ll
index 1e75410db5d..0e3fa4b8a49 100644
--- a/llvm/test/Transforms/GVN/2010-05-08-OneBit.ll
+++ b/llvm/test/Transforms/GVN/2010-05-08-OneBit.ll
@@ -31,7 +31,7 @@ l129.i.i: ; preds = %k121.i.i
k133.i.i: ; preds = %k121.i.i
%2 = getelementptr i8, i8* undef, i64 5 ; <i8*> [#uses=1]
%3 = bitcast i8* %2 to i1* ; <i1*> [#uses=1]
- %4 = load i1* %3 ; <i1> [#uses=1]
+ %4 = load i1, i1* %3 ; <i1> [#uses=1]
br i1 %4, label %k151.i.i, label %l147.i.i
l147.i.i: ; preds = %k133.i.i
diff --git a/llvm/test/Transforms/GVN/2011-04-27-phioperands.ll b/llvm/test/Transforms/GVN/2011-04-27-phioperands.ll
index 42c46500c48..05cf6c7310a 100644
--- a/llvm/test/Transforms/GVN/2011-04-27-phioperands.ll
+++ b/llvm/test/Transforms/GVN/2011-04-27-phioperands.ll
@@ -50,7 +50,7 @@ doemit.exit76.i:
"<bb 53>.i":
%wascaret_2.i = phi i32 [ 0, %"<L39>.i" ], [ 0, %"<L29>.i" ], [ 0, %"<L28>.i" ], [ 0, %"<bb 35>.i" ], [ 0, %"<L99>.i" ], [ 0, %"<L98>.i" ], [ 0, %doemit.exit76.i ], [ 1, %doemit.exit51.i ], [ 0, %"<L24>.i" ]
- %D.5496_84.i = load i8** undef, align 8
+ %D.5496_84.i = load i8*, i8** undef, align 8
br i1 undef, label %"<bb 54>.i", label %"<bb 5>"
"<bb 54>.i":
diff --git a/llvm/test/Transforms/GVN/2011-06-01-NonLocalMemdepMiscompile.ll b/llvm/test/Transforms/GVN/2011-06-01-NonLocalMemdepMiscompile.ll
index 395c01f4452..635da27887c 100644
--- a/llvm/test/Transforms/GVN/2011-06-01-NonLocalMemdepMiscompile.ll
+++ b/llvm/test/Transforms/GVN/2011-06-01-NonLocalMemdepMiscompile.ll
@@ -22,7 +22,7 @@ bb1:
; CHECK: [[TMP:%.*]] = phi i8* [ getelementptr (i8* null, i64 undef), %bb10 ], [ null, %bb ]
; CHECK: bb1.bb15_crit_edge:
-; CHECK: %tmp17.pre = load i8* [[TMP]], align 1
+; CHECK: %tmp17.pre = load i8, i8* [[TMP]], align 1
bb3:
call void @isalnum()
@@ -32,22 +32,22 @@ bb5:
br i1 undef, label %bb10, label %bb6
bb6:
- %tmp7 = load i8** %tmp, align 8
- %tmp8 = load i8* %tmp7, align 1
+ %tmp7 = load i8*, i8** %tmp, align 8
+ %tmp8 = load i8, i8* %tmp7, align 1
%tmp9 = zext i8 %tmp8 to i64
br i1 undef, label %bb15, label %bb10
bb10:
- %tmp11 = load i8** %tmp, align 8
- %tmp12 = load i8* %tmp11, align 1
+ %tmp11 = load i8*, i8** %tmp, align 8
+ %tmp12 = load i8, i8* %tmp11, align 1
%tmp13 = zext i8 %tmp12 to i64
%tmp14 = getelementptr inbounds i8, i8* null, i64 undef
store i8* %tmp14, i8** %tmp, align 8
br label %bb1
bb15:
- %tmp16 = load i8** %tmp, align 8
- %tmp17 = load i8* %tmp16, align 1
+ %tmp16 = load i8*, i8** %tmp, align 8
+ %tmp17 = load i8, i8* %tmp16, align 1
%tmp18 = icmp eq i8 %tmp17, 0
br label %bb19
diff --git a/llvm/test/Transforms/GVN/MemdepMiscompile.ll b/llvm/test/Transforms/GVN/MemdepMiscompile.ll
index d4201696157..0652304b3d7 100644
--- a/llvm/test/Transforms/GVN/MemdepMiscompile.ll
+++ b/llvm/test/Transforms/GVN/MemdepMiscompile.ll
@@ -13,14 +13,14 @@ entry:
; CHECK: call void @RunInMode
; CHECK: br i1 %tobool, label %while.cond.backedge, label %if.then
; CHECK: while.cond.backedge:
-; CHECK: load i32* %shouldExit
+; CHECK: load i32, i32* %shouldExit
; CHECK: br i1 %cmp, label %while.body
%shouldExit = alloca i32, align 4
%tasksIdle = alloca i32, align 4
store i32 0, i32* %shouldExit, align 4
store i32 0, i32* %tasksIdle, align 4
call void @CTestInitialize(i32* %tasksIdle) nounwind
- %0 = load i32* %shouldExit, align 4
+ %0 = load i32, i32* %shouldExit, align 4
%cmp1 = icmp eq i32 %0, 0
br i1 %cmp1, label %while.body.lr.ph, label %while.end
@@ -29,7 +29,7 @@ while.body.lr.ph:
while.body:
call void @RunInMode(i32 100) nounwind
- %1 = load i32* %tasksIdle, align 4
+ %1 = load i32, i32* %tasksIdle, align 4
%tobool = icmp eq i32 %1, 0
br i1 %tobool, label %while.cond.backedge, label %if.then
@@ -39,7 +39,7 @@ if.then:
br label %while.cond.backedge
while.cond.backedge:
- %2 = load i32* %shouldExit, align 4
+ %2 = load i32, i32* %shouldExit, align 4
%cmp = icmp eq i32 %2, 0
br i1 %cmp, label %while.body, label %while.cond.while.end_crit_edge
diff --git a/llvm/test/Transforms/GVN/atomic.ll b/llvm/test/Transforms/GVN/atomic.ll
index 8c13d209c53..aada1378ee2 100644
--- a/llvm/test/Transforms/GVN/atomic.ll
+++ b/llvm/test/Transforms/GVN/atomic.ll
@@ -11,9 +11,9 @@ define i32 @test1() nounwind uwtable ssp {
; CHECK-LABEL: test1
; CHECK: add i32 %x, %x
entry:
- %x = load i32* @y
+ %x = load i32, i32* @y
store atomic i32 %x, i32* @x unordered, align 4
- %y = load i32* @y
+ %y = load i32, i32* @y
%z = add i32 %x, %y
ret i32 %z
}
@@ -23,9 +23,9 @@ define i32 @test2() nounwind uwtable ssp {
; CHECK-LABEL: test2
; CHECK: add i32 %x, %x
entry:
- %x = load i32* @y
+ %x = load i32, i32* @y
store atomic i32 %x, i32* @x seq_cst, align 4
- %y = load i32* @y
+ %y = load i32, i32* @y
%z = add i32 %x, %y
ret i32 %z
}
@@ -35,9 +35,9 @@ define i32 @test3() nounwind uwtable ssp {
; CHECK-LABEL: test3
; CHECK: add i32 %x, %x
entry:
- %x = load i32* @y
- %y = load atomic i32* @x unordered, align 4
- %z = load i32* @y
+ %x = load i32, i32* @y
+ %y = load atomic i32, i32* @x unordered, align 4
+ %z = load i32, i32* @y
%a = add i32 %x, %z
%b = add i32 %y, %a
ret i32 %b
@@ -46,12 +46,12 @@ entry:
; GVN across acquire load (allowed as the original load was not atomic)
define i32 @test4() nounwind uwtable ssp {
; CHECK-LABEL: test4
-; CHECK: load atomic i32* @x
-; CHECK-NOT: load i32* @y
+; CHECK: load atomic i32, i32* @x
+; CHECK-NOT: load i32, i32* @y
entry:
- %x = load i32* @y
- %y = load atomic i32* @x seq_cst, align 4
- %x2 = load i32* @y
+ %x = load i32, i32* @y
+ %y = load atomic i32, i32* @x seq_cst, align 4
+ %x2 = load i32, i32* @y
%x3 = add i32 %x, %x2
%y2 = add i32 %y, %x3
ret i32 %y2
@@ -62,8 +62,8 @@ define i32 @test5() nounwind uwtable ssp {
; CHECK-LABEL: test5
; CHECK: add i32 %x, %x
entry:
- %x = load atomic i32* @x unordered, align 4
- %y = load i32* @x
+ %x = load atomic i32, i32* @x unordered, align 4
+ %y = load i32, i32* @x
%z = add i32 %x, %y
ret i32 %z
}
@@ -71,10 +71,10 @@ entry:
; GVN unordered load to load (unordered load must not be removed)
define i32 @test6() nounwind uwtable ssp {
; CHECK-LABEL: test6
-; CHECK: load atomic i32* @x unordered
+; CHECK: load atomic i32, i32* @x unordered
entry:
- %x = load i32* @x
- %x2 = load atomic i32* @x unordered, align 4
+ %x = load i32, i32* @x
+ %x2 = load atomic i32, i32* @x unordered, align 4
%x3 = add i32 %x, %x2
ret i32 %x3
}
@@ -84,10 +84,10 @@ define i32 @test7() nounwind uwtable ssp {
; CHECK-LABEL: test7
; CHECK: add i32 %x, %y
entry:
- %x = load i32* @y
+ %x = load i32, i32* @y
store atomic i32 %x, i32* @x release, align 4
- %w = load atomic i32* @x acquire, align 4
- %y = load i32* @y
+ %w = load atomic i32, i32* @x acquire, align 4
+ %y = load i32, i32* @y
%z = add i32 %x, %y
ret i32 %z
}
@@ -97,10 +97,10 @@ define i32 @test8() nounwind uwtable ssp {
; CHECK-LABEL: test8
; CHECK: add i32 %x, %x
entry:
- %x = load i32* @y
- %w = load atomic i32* @x acquire, align 4
+ %x = load i32, i32* @y
+ %w = load atomic i32, i32* @x acquire, align 4
store atomic i32 %x, i32* @x release, align 4
- %y = load i32* @y
+ %y = load i32, i32* @y
%z = add i32 %x, %y
ret i32 %z
}
@@ -110,9 +110,9 @@ define i32 @test9() nounwind uwtable ssp {
; CHECK-LABEL: test9
; CHECK: add i32 %x, %x
entry:
- %x = load i32* @y
+ %x = load i32, i32* @y
store atomic i32 %x, i32* @x monotonic, align 4
- %y = load i32* @y
+ %y = load i32, i32* @y
%z = add i32 %x, %y
ret i32 %z
}
@@ -122,9 +122,9 @@ define i32 @test10() nounwind uwtable ssp {
; CHECK-LABEL: test10
; CHECK: add i32 %x, %y
entry:
- %x = load atomic i32* @y unordered, align 4
- %clobber = load atomic i32* @x monotonic, align 4
- %y = load atomic i32* @y monotonic, align 4
+ %x = load atomic i32, i32* @y unordered, align 4
+ %clobber = load atomic i32, i32* @x monotonic, align 4
+ %y = load atomic i32, i32* @y monotonic, align 4
%z = add i32 %x, %y
ret i32 %z
}
diff --git a/llvm/test/Transforms/GVN/calloc-load-removal.ll b/llvm/test/Transforms/GVN/calloc-load-removal.ll
index 2dde5b7b414..a51f71fd77a 100644
--- a/llvm/test/Transforms/GVN/calloc-load-removal.ll
+++ b/llvm/test/Transforms/GVN/calloc-load-removal.ll
@@ -9,11 +9,11 @@ define i32 @test1() {
%1 = tail call noalias i8* @calloc(i64 1, i64 4)
%2 = bitcast i8* %1 to i32*
; This load is trivially constant zero
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
ret i32 %3
; CHECK-LABEL: @test1(
-; CHECK-NOT: %3 = load i32* %2, align 4
+; CHECK-NOT: %3 = load i32, i32* %2, align 4
; CHECK: ret i32 0
; CHECK_NO_LIBCALLS-LABEL: @test1(
diff --git a/llvm/test/Transforms/GVN/cond_br.ll b/llvm/test/Transforms/GVN/cond_br.ll
index 918e7d41f12..aeb1a6e6bf7 100644
--- a/llvm/test/Transforms/GVN/cond_br.ll
+++ b/llvm/test/Transforms/GVN/cond_br.ll
@@ -5,11 +5,11 @@
; Function Attrs: nounwind ssp uwtable
define void @foo(i32 %x) {
; CHECK: @foo(i32 %x)
-; CHECK: %.pre = load i32* @y
+; CHECK: %.pre = load i32, i32* @y
; CHECK: call void @bar(i32 %.pre)
%t = sub i32 %x, %x
- %.pre = load i32* @y, align 4
+ %.pre = load i32, i32* @y, align 4
%cmp = icmp sgt i32 %t, 2
br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
@@ -29,11 +29,11 @@ if.end: ; preds = %entry.if.end_crit_e
define void @foo2(i32 %x) {
; CHECK: @foo2(i32 %x)
-; CHECK: %.pre = load i32* @y
+; CHECK: %.pre = load i32, i32* @y
; CHECK: tail call void @bar(i32 %.pre)
entry:
%t = sub i32 %x, %x
- %.pre = load i32* @y, align 4
+ %.pre = load i32, i32* @y, align 4
%cmp = icmp sgt i32 %t, 2
br i1 %cmp, label %if.then, label %if.else
diff --git a/llvm/test/Transforms/GVN/cond_br2.ll b/llvm/test/Transforms/GVN/cond_br2.ll
index 10e1a855fd5..02154a78391 100644
--- a/llvm/test/Transforms/GVN/cond_br2.ll
+++ b/llvm/test/Transforms/GVN/cond_br2.ll
@@ -30,7 +30,7 @@ entry:
%add.ptr.i.i.i.i.i.i = bitcast %"union.llvm::SmallVectorBase::U"* %add.ptr.i.i.i.i2.i.i to i8*
store i8* %add.ptr.i.i.i.i.i.i, i8** %CapacityX.i.i.i.i.i.i, align 16, !tbaa !4
%EndX.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
- %2 = load i8** %EndX.i, align 8, !tbaa !4
+ %2 = load i8*, i8** %EndX.i, align 8, !tbaa !4
%CapacityX.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 2
%cmp.i = icmp ult i8* %2, %add.ptr.i.i.i.i.i.i
br i1 %cmp.i, label %Retry.i, label %if.end.i
@@ -51,18 +51,18 @@ if.end.i: ; preds = %entry
to label %.noexc unwind label %lpad
.noexc: ; preds = %if.end.i
- %.pre.i = load i8** %EndX.i, align 8, !tbaa !4
+ %.pre.i = load i8*, i8** %EndX.i, align 8, !tbaa !4
br label %Retry.i
invoke.cont: ; preds = %new.notnull.i, %Retry.i
%add.ptr.i = getelementptr inbounds i8, i8* %3, i64 4
store i8* %add.ptr.i, i8** %EndX.i, align 8, !tbaa !4
- %6 = load i8** %CapacityX.i, align 16, !tbaa !4
+ %6 = load i8*, i8** %CapacityX.i, align 16, !tbaa !4
%cmp.i8 = icmp ult i8* %add.ptr.i, %6
br i1 %cmp.i8, label %new.notnull.i11, label %if.end.i14
Retry.i10: ; preds = %if.end.i14
- %.pre.i13 = load i8** %EndX.i, align 8, !tbaa !4
+ %.pre.i13 = load i8*, i8** %EndX.i, align 8, !tbaa !4
%new.isnull.i9 = icmp eq i8* %.pre.i13, null
br i1 %new.isnull.i9, label %invoke.cont2, label %new.notnull.i11
@@ -85,7 +85,7 @@ invoke.cont2: ; preds = %new.notnull.i11, %R
to label %invoke.cont3 unwind label %lpad
invoke.cont3: ; preds = %invoke.cont2
- %11 = load i8** %BeginX.i.i.i.i.i.i, align 16, !tbaa !4
+ %11 = load i8*, i8** %BeginX.i.i.i.i.i.i, align 16, !tbaa !4
%cmp.i.i.i.i19 = icmp eq i8* %11, %1
br i1 %cmp.i.i.i.i19, label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21, label %if.then.i.i.i20
@@ -100,7 +100,7 @@ _ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.t
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
%12 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
cleanup
- %13 = load i8** %BeginX.i.i.i.i.i.i, align 16, !tbaa !4
+ %13 = load i8*, i8** %BeginX.i.i.i.i.i.i, align 16, !tbaa !4
%cmp.i.i.i.i = icmp eq i8* %13, %1
br i1 %cmp.i.i.i.i, label %eh.resume, label %if.then.i.i.i
diff --git a/llvm/test/Transforms/GVN/condprop.ll b/llvm/test/Transforms/GVN/condprop.ll
index 845f88e1589..6aa3cb86250 100644
--- a/llvm/test/Transforms/GVN/condprop.ll
+++ b/llvm/test/Transforms/GVN/condprop.ll
@@ -5,7 +5,7 @@
; CHECK-LABEL: @test1(
define i32 @test1() nounwind {
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%1 = icmp eq i32 %0, 4
br i1 %1, label %bb, label %bb1
@@ -13,7 +13,7 @@ bb: ; preds = %entry
br label %bb8
bb1: ; preds = %entry
- %2 = load i32* @a, align 4
+ %2 = load i32, i32* @a, align 4
%3 = icmp eq i32 %2, 5
br i1 %3, label %bb2, label %bb3
@@ -21,29 +21,29 @@ bb2: ; preds = %bb1
br label %bb8
bb3: ; preds = %bb1
- %4 = load i32* @a, align 4
+ %4 = load i32, i32* @a, align 4
%5 = icmp eq i32 %4, 4
; CHECK: br i1 false, label %bb4, label %bb5
br i1 %5, label %bb4, label %bb5
bb4: ; preds = %bb3
- %6 = load i32* @a, align 4
+ %6 = load i32, i32* @a, align 4
%7 = add i32 %6, 5
br label %bb8
bb5: ; preds = %bb3
- %8 = load i32* @a, align 4
+ %8 = load i32, i32* @a, align 4
%9 = icmp eq i32 %8, 5
; CHECK: br i1 false, label %bb6, label %bb7
br i1 %9, label %bb6, label %bb7
bb6: ; preds = %bb5
- %10 = load i32* @a, align 4
+ %10 = load i32, i32* @a, align 4
%11 = add i32 %10, 4
br label %bb8
bb7: ; preds = %bb5
- %12 = load i32* @a, align 4
+ %12 = load i32, i32* @a, align 4
br label %bb8
bb8: ; preds = %bb7, %bb6, %bb4, %bb2, %bb
diff --git a/llvm/test/Transforms/GVN/crash-no-aa.ll b/llvm/test/Transforms/GVN/crash-no-aa.ll
index cc303297181..f076a8d81ac 100644
--- a/llvm/test/Transforms/GVN/crash-no-aa.ll
+++ b/llvm/test/Transforms/GVN/crash-no-aa.ll
@@ -9,7 +9,7 @@ define i32 @test1({i16, i32} *%P) {
store i16 42, i16* %P2
%P3 = getelementptr {i16, i32}, {i16, i32} *%P, i32 0, i32 1
- %V = load i32* %P3
+ %V = load i32, i32* %P3
ret i32 %V
}
diff --git a/llvm/test/Transforms/GVN/crash.ll b/llvm/test/Transforms/GVN/crash.ll
index 5efba4061da..2abb4194f15 100644
--- a/llvm/test/Transforms/GVN/crash.ll
+++ b/llvm/test/Transforms/GVN/crash.ll
@@ -22,7 +22,7 @@ while.body: ; preds = %while.body.backedge
lor.lhs.false: ; preds = %while.body
%tmp20 = bitcast i32* %o.addr.0 to i32* ; <i32*> [#uses=1]
- %tmp22 = load i32* %tmp20 ; <i32> [#uses=0]
+ %tmp22 = load i32, i32* %tmp20 ; <i32> [#uses=0]
br i1 undef, label %land.lhs.true24, label %if.end31
land.lhs.true24: ; preds = %lor.lhs.false
@@ -34,11 +34,11 @@ if.end31: ; preds = %land.lhs.true24, %l
if.end41: ; preds = %if.end31
%tmp43 = bitcast i32* %o.addr.0 to i32* ; <i32*> [#uses=1]
- %tmp45 = load i32* %tmp43 ; <i32> [#uses=0]
+ %tmp45 = load i32, i32* %tmp43 ; <i32> [#uses=0]
br i1 undef, label %if.then50, label %if.else
if.then50: ; preds = %if.end41
- %tmp53 = load i32** undef ; <i32*> [#uses=1]
+ %tmp53 = load i32*, i32** undef ; <i32*> [#uses=1]
br label %while.body.backedge
if.else: ; preds = %if.end41
@@ -75,14 +75,14 @@ entry:
bb69.i: ; preds = %bb57.i.preheader
%tmp4 = getelementptr inbounds [4 x %struct.attribute_spec*], [4 x %struct.attribute_spec*]* @attribute_tables, i32 0, i32 undef ; <%struct.attribute_spec**> [#uses=1]
- %tmp3 = load %struct.attribute_spec** %tmp4, align 4 ; <%struct.attribute_spec*> [#uses=1]
+ %tmp3 = load %struct.attribute_spec*, %struct.attribute_spec** %tmp4, align 4 ; <%struct.attribute_spec*> [#uses=1]
br label %bb65.i
bb65.i: ; preds = %bb65.i.preheader, %bb64.i
%storemerge6.i = phi i32 [ 1, %bb64.i ], [ 0, %bb69.i ] ; <i32> [#uses=3]
%scevgep14 = getelementptr inbounds %struct.attribute_spec, %struct.attribute_spec* %tmp3, i32 %storemerge6.i, i32 0 ; <i8**> [#uses=1]
- %tmp2 = load i8** %scevgep14, align 4 ; <i8*> [#uses=0]
- %tmp = load %struct.attribute_spec** %tmp4, align 4 ; <%struct.attribute_spec*> [#uses=1]
+ %tmp2 = load i8*, i8** %scevgep14, align 4 ; <i8*> [#uses=0]
+ %tmp = load %struct.attribute_spec*, %struct.attribute_spec** %tmp4, align 4 ; <%struct.attribute_spec*> [#uses=1]
%scevgep1516 = getelementptr inbounds %struct.attribute_spec, %struct.attribute_spec* %tmp, i32 %storemerge6.i, i32 0 ; <i8**> [#uses=0]
unreachable
@@ -101,7 +101,7 @@ bb66.i: ; Unreachable
define i32* @test3() {
do.end17.i:
- %tmp18.i = load i7** undef
+ %tmp18.i = load i7*, i7** undef
%tmp1 = bitcast i7* %tmp18.i to i8*
br i1 undef, label %do.body36.i, label %if.then21.i
@@ -110,12 +110,12 @@ if.then21.i:
ret i32* undef
do.body36.i:
- %ivar38.i = load i64* @g
+ %ivar38.i = load i64, i64* @g
%tmp3 = bitcast i7* %tmp18.i to i8*
%add.ptr39.sum.i = add i64 %ivar38.i, 8
%tmp40.i = getelementptr inbounds i8, i8* %tmp3, i64 %add.ptr39.sum.i
%tmp4 = bitcast i8* %tmp40.i to i64*
- %tmp41.i = load i64* %tmp4
+ %tmp41.i = load i64, i64* %tmp4
br i1 undef, label %if.then48.i, label %do.body57.i
if.then48.i:
@@ -123,13 +123,13 @@ if.then48.i:
br label %do.body57.i
do.body57.i:
- %tmp58.i = load i7** undef
- %ivar59.i = load i64* @g
+ %tmp58.i = load i7*, i7** undef
+ %ivar59.i = load i64, i64* @g
%tmp5 = bitcast i7* %tmp58.i to i8*
%add.ptr65.sum.i = add i64 %ivar59.i, 8
%tmp66.i = getelementptr inbounds i8, i8* %tmp5, i64 %add.ptr65.sum.i
%tmp6 = bitcast i8* %tmp66.i to i64*
- %tmp67.i = load i64* %tmp6
+ %tmp67.i = load i64, i64* %tmp6
ret i32* undef
}
@@ -145,7 +145,7 @@ dead:
%P2 = getelementptr i32, i32 *%P2, i32 52
%Q2 = getelementptr i32, i32 *%Q2, i32 52
store i32 4, i32* %P2
- %A = load i32* %Q2
+ %A = load i32, i32* %Q2
br i1 true, label %dead, label %dead2
dead2:
@@ -156,10 +156,10 @@ dead2:
; PR9841
define fastcc i8 @test5(i8* %P) nounwind {
entry:
- %0 = load i8* %P, align 2
+ %0 = load i8, i8* %P, align 2
%Q = getelementptr i8, i8* %P, i32 1
- %1 = load i8* %Q, align 1
+ %1 = load i8, i8* %Q, align 1
ret i8 %1
}
@@ -187,7 +187,7 @@ u1.bb:
br label %unreachable.bb
u2.bb:
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%conv.i.i.i.i.i = zext i32 %0 to i64
br label %u2.bb
diff --git a/llvm/test/Transforms/GVN/invariant-load.ll b/llvm/test/Transforms/GVN/invariant-load.ll
index 2a83c4507b5..162d49862b7 100644
--- a/llvm/test/Transforms/GVN/invariant-load.ll
+++ b/llvm/test/Transforms/GVN/invariant-load.ll
@@ -3,13 +3,13 @@
define i32 @test1(i32* nocapture %p, i8* nocapture %q) {
; CHECK-LABEL: test1
-; CHECK: %x = load i32* %p, align 4, !invariant.load !0
+; CHECK: %x = load i32, i32* %p, align 4, !invariant.load !0
; CHECK-NOT: %y = load
entry:
- %x = load i32* %p, align 4, !invariant.load !0
+ %x = load i32, i32* %p, align 4, !invariant.load !0
%conv = trunc i32 %x to i8
store i8 %conv, i8* %q, align 1
- %y = load i32* %p, align 4, !invariant.load !0
+ %y = load i32, i32* %p, align 4, !invariant.load !0
%add = add i32 %y, 1
ret i32 %add
}
@@ -19,10 +19,10 @@ define i32 @test2(i32* nocapture %p, i8* nocapture %q) {
; CHECK-NOT: !invariant.load
; CHECK-NOT: %y = load
entry:
- %x = load i32* %p, align 4
+ %x = load i32, i32* %p, align 4
%conv = trunc i32 %x to i8
store i8 %conv, i8* %q, align 1
- %y = load i32* %p, align 4, !invariant.load !0
+ %y = load i32, i32* %p, align 4, !invariant.load !0
%add = add i32 %y, 1
ret i32 %add
}
@@ -33,7 +33,7 @@ define i32 @test3(i1 %cnd, i32* %p, i32* %q) {
; CHECK-LABEL: test3
; CHECK-NOT: load
entry:
- %v1 = load i32* %p
+ %v1 = load i32, i32* %p
br i1 %cnd, label %bb1, label %bb2
bb1:
@@ -41,7 +41,7 @@ bb1:
br label %bb2
bb2:
- %v2 = load i32* %p, !invariant.load !0
+ %v2 = load i32, i32* %p, !invariant.load !0
%res = sub i32 %v1, %v2
ret i32 %res
}
@@ -52,7 +52,7 @@ define i32 @test4(i1 %cnd, i32* %p, i32* %q) {
; CHECK-LABEL: test4
; %v2 is redundant, but GVN currently doesn't catch that
entry:
- %v1 = load i32* %p, !invariant.load !0
+ %v1 = load i32, i32* %p, !invariant.load !0
br i1 %cnd, label %bb1, label %bb2
bb1:
@@ -60,7 +60,7 @@ bb1:
br label %bb2
bb2:
- %v2 = load i32* %p
+ %v2 = load i32, i32* %p
%res = sub i32 %v1, %v2
ret i32 %res
}
diff --git a/llvm/test/Transforms/GVN/lifetime-simple.ll b/llvm/test/Transforms/GVN/lifetime-simple.ll
index 02f7bcc9e85..d03b62c8158 100644
--- a/llvm/test/Transforms/GVN/lifetime-simple.ll
+++ b/llvm/test/Transforms/GVN/lifetime-simple.ll
@@ -9,10 +9,10 @@ define i8 @test(i8* %P) nounwind {
; CHECK: lifetime.end
entry:
call void @llvm.lifetime.start(i64 32, i8* %P)
- %0 = load i8* %P
+ %0 = load i8, i8* %P
store i8 1, i8* %P
call void @llvm.lifetime.end(i64 32, i8* %P)
- %1 = load i8* %P
+ %1 = load i8, i8* %P
ret i8 %1
}
diff --git a/llvm/test/Transforms/GVN/load-constant-mem.ll b/llvm/test/Transforms/GVN/load-constant-mem.ll
index 9bcf69c753c..f870485630f 100644
--- a/llvm/test/Transforms/GVN/load-constant-mem.ll
+++ b/llvm/test/Transforms/GVN/load-constant-mem.ll
@@ -5,9 +5,9 @@
define i32 @test(i8* %p, i32 %i) nounwind {
entry:
%P = getelementptr [4 x i32], [4 x i32]* @G, i32 0, i32 %i
- %A = load i32* %P
+ %A = load i32, i32* %P
store i8 4, i8* %p
- %B = load i32* %P
+ %B = load i32, i32* %P
%C = sub i32 %A, %B
ret i32 %C
}
diff --git a/llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll b/llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll
index b676d958aa4..29ea14d62b4 100644
--- a/llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll
+++ b/llvm/test/Transforms/GVN/load-from-unreachable-predecessor.ll
@@ -8,13 +8,13 @@ define i32 @f(i32** %f) {
; Load should be removed, since it's ignored.
; CHECK-NEXT: br label
bb0:
- %bar = load i32** %f
+ %bar = load i32*, i32** %f
br label %bb2
bb1:
- %zed = load i32** %f
+ %zed = load i32*, i32** %f
br i1 false, label %bb1, label %bb2
bb2:
%foo = phi i32* [ null, %bb0 ], [ %zed, %bb1 ]
- %storemerge = load i32* %foo
+ %storemerge = load i32, i32* %foo
ret i32 %storemerge
}
diff --git a/llvm/test/Transforms/GVN/load-pre-align.ll b/llvm/test/Transforms/GVN/load-pre-align.ll
index 4816af2f441..1198cafaeed 100644
--- a/llvm/test/Transforms/GVN/load-pre-align.ll
+++ b/llvm/test/Transforms/GVN/load-pre-align.ll
@@ -25,7 +25,7 @@ for.cond.for.end_crit_edge:
br label %for.end
for.body:
- %tmp3 = load i32* @p, align 8
+ %tmp3 = load i32, i32* @p, align 8
%dec = add i32 %tmp3, -1
store i32 %dec, i32* @p
%cmp6 = icmp slt i32 %dec, 0
@@ -39,6 +39,6 @@ for.inc:
br label %for.cond
for.end:
- %tmp9 = load i32* @p, align 8
+ %tmp9 = load i32, i32* @p, align 8
ret i32 %tmp9
}
diff --git a/llvm/test/Transforms/GVN/load-pre-licm.ll b/llvm/test/Transforms/GVN/load-pre-licm.ll
index 85608536c9e..d14b01caf77 100644
--- a/llvm/test/Transforms/GVN/load-pre-licm.ll
+++ b/llvm/test/Transforms/GVN/load-pre-licm.ll
@@ -19,8 +19,8 @@ while.body5:
%arrayidx9 = getelementptr [5001 x i32], [5001 x i32]* @sortlist, i32 0, i32 %tmp5
%tmp6 = add i32 %indvar, 1
%arrayidx = getelementptr [5001 x i32], [5001 x i32]* @sortlist, i32 0, i32 %tmp6
- %tmp7 = load i32* %arrayidx, align 4
- %tmp10 = load i32* %arrayidx9, align 4
+ %tmp7 = load i32, i32* %arrayidx, align 4
+ %tmp10 = load i32, i32* %arrayidx9, align 4
%cmp11 = icmp sgt i32 %tmp7, %tmp10
br i1 %cmp11, label %if.then, label %if.end
diff --git a/llvm/test/Transforms/GVN/load-pre-nonlocal.ll b/llvm/test/Transforms/GVN/load-pre-nonlocal.ll
index f8a5b66fcae..91c29bf8e5a 100644
--- a/llvm/test/Transforms/GVN/load-pre-nonlocal.ll
+++ b/llvm/test/Transforms/GVN/load-pre-nonlocal.ll
@@ -12,8 +12,8 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK-LABEL: define i32 @volatile_load
; CHECK: for.body:
-; CHECK: %2 = load i32*
-; CHECK: %3 = load volatile i32*
+; CHECK: %2 = load i32, i32*
+; CHECK: %3 = load volatile i32, i32*
; CHECK: for.cond.for.end_crit_edge:
define i32 @volatile_load(i32 %n) {
@@ -22,18 +22,18 @@ entry:
br i1 %cmp6, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load i32** @a2, align 8, !tbaa !1
- %1 = load i32** @a, align 8, !tbaa !1
+ %0 = load i32*, i32** @a2, align 8, !tbaa !1
+ %1 = load i32*, i32** @a, align 8, !tbaa !1
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%s.09 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
%p.08 = phi i32* [ %0, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %2 = load i32* %p.08, align 4, !tbaa !5
+ %2 = load i32, i32* %p.08, align 4, !tbaa !5
%arrayidx = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
store i32 %2, i32* %arrayidx, align 4, !tbaa !5
- %3 = load volatile i32* %p.08, align 4, !tbaa !5
+ %3 = load volatile i32, i32* %p.08, align 4, !tbaa !5
%add = add nsw i32 %3, %s.09
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%incdec.ptr = getelementptr inbounds i32, i32* %p.08, i64 1
@@ -54,7 +54,7 @@ for.end:
; CHECK-LABEL: define i32 @overaligned_load
; CHECK: if.end:
-; CHECK-NOT: %1 = load i32*
+; CHECK-NOT: %1 = load i32, i32*
define i32 @overaligned_load(i32 %a, i32* nocapture %b) {
entry:
@@ -62,7 +62,7 @@ entry:
br i1 %cmp, label %if.then, label %if.else
if.then:
- %0 = load i32* getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), align 8, !tbaa !5
+ %0 = load i32, i32* getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), align 8, !tbaa !5
br label %if.end
if.else:
@@ -74,7 +74,7 @@ if.end:
%i.0 = phi i32 [ %0, %if.then ], [ 0, %if.else ]
%p.0 = phi i32* [ getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), %if.then ], [ %b, %if.else ]
%add.ptr = getelementptr inbounds i32, i32* %p.0, i64 1
- %1 = load i32* %add.ptr, align 4, !tbaa !5
+ %1 = load i32, i32* %add.ptr, align 4, !tbaa !5
%add1 = add nsw i32 %1, %i.0
ret i32 %add1
}
diff --git a/llvm/test/Transforms/GVN/lpre-call-wrap-2.ll b/llvm/test/Transforms/GVN/lpre-call-wrap-2.ll
index c11c31c4f64..5dc779ef40d 100644
--- a/llvm/test/Transforms/GVN/lpre-call-wrap-2.ll
+++ b/llvm/test/Transforms/GVN/lpre-call-wrap-2.ll
@@ -16,10 +16,10 @@ target triple = "i386-apple-darwin7"
define void @bi_windup(i8* %outbuf, i8 zeroext %bi_buf) nounwind {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load i32* @outcnt, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @outcnt, align 4 ; <i32> [#uses=1]
%1 = getelementptr i8, i8* %outbuf, i32 %0 ; <i8*> [#uses=1]
store i8 %bi_buf, i8* %1, align 1
- %2 = load i32* @outcnt, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* @outcnt, align 4 ; <i32> [#uses=1]
%3 = icmp eq i32 %2, 16384 ; <i1> [#uses=1]
br i1 %3, label %bb, label %bb1
@@ -31,7 +31,7 @@ bb1: ; preds = %bb, %entry
; CHECK: bb1:
; CHECK-NEXT: phi
; CHECK-NEXT: getelementptr
- %4 = load i32* @outcnt, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* @outcnt, align 4 ; <i32> [#uses=1]
%5 = getelementptr i8, i8* %outbuf, i32 %4 ; <i8*> [#uses=1]
store i8 %bi_buf, i8* %5, align 1
ret void
diff --git a/llvm/test/Transforms/GVN/lpre-call-wrap.ll b/llvm/test/Transforms/GVN/lpre-call-wrap.ll
index fb41d940313..27483054939 100644
--- a/llvm/test/Transforms/GVN/lpre-call-wrap.ll
+++ b/llvm/test/Transforms/GVN/lpre-call-wrap.ll
@@ -20,7 +20,7 @@ target triple = "i386-apple-darwin7"
define void @_Z12testfunctionR1A(%struct.A* %iter) {
entry:
%0 = getelementptr %struct.A, %struct.A* %iter, i32 0, i32 0 ; <i32*> [#uses=3]
- %1 = load i32* %0, align 4 ; <i32> [#uses=2]
+ %1 = load i32, i32* %0, align 4 ; <i32> [#uses=2]
%2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %return, label %bb.nph
@@ -32,7 +32,7 @@ bb: ; preds = %bb3.backedge, %bb.nph
%.rle = phi i32 [ %1, %bb.nph ], [ %7, %bb3.backedge ] ; <i32> [#uses=1]
%4 = add i32 %.rle, 1 ; <i32> [#uses=2]
store i32 %4, i32* %0, align 4
- %5 = load i32* %3, align 4 ; <i32> [#uses=1]
+ %5 = load i32, i32* %3, align 4 ; <i32> [#uses=1]
%6 = icmp eq i32 %4, %5 ; <i1> [#uses=1]
br i1 %6, label %bb1, label %bb3.backedge
@@ -44,7 +44,7 @@ bb3.backedge: ; preds = %bb, %bb1
; CHECK: bb3.backedge:
; CHECK-NEXT: phi
; CHECK-NEXT: icmp
- %7 = load i32* %0, align 4 ; <i32> [#uses=2]
+ %7 = load i32, i32* %0, align 4 ; <i32> [#uses=2]
%8 = icmp eq i32 %7, 0 ; <i1> [#uses=1]
br i1 %8, label %return, label %bb
diff --git a/llvm/test/Transforms/GVN/malloc-load-removal.ll b/llvm/test/Transforms/GVN/malloc-load-removal.ll
index d2d2fd77afe..1d7a2ddc4c2 100644
--- a/llvm/test/Transforms/GVN/malloc-load-removal.ll
+++ b/llvm/test/Transforms/GVN/malloc-load-removal.ll
@@ -10,7 +10,7 @@ declare i8* @malloc(i64) nounwind
define noalias i8* @test1() nounwind uwtable ssp {
entry:
%call = tail call i8* @malloc(i64 100) nounwind
- %0 = load i8* %call, align 1
+ %0 = load i8, i8* %call, align 1
%tobool = icmp eq i8 %0, 0
br i1 %tobool, label %if.end, label %if.then
@@ -35,7 +35,7 @@ declare i8* @_Znwm(i64) nounwind
define noalias i8* @test2() nounwind uwtable ssp {
entry:
%call = tail call i8* @_Znwm(i64 100) nounwind
- %0 = load i8* %call, align 1
+ %0 = load i8, i8* %call, align 1
%tobool = icmp eq i8 %0, 0
br i1 %tobool, label %if.end, label %if.then
diff --git a/llvm/test/Transforms/GVN/noalias.ll b/llvm/test/Transforms/GVN/noalias.ll
index 6c310fad7ec..cfff096b695 100644
--- a/llvm/test/Transforms/GVN/noalias.ll
+++ b/llvm/test/Transforms/GVN/noalias.ll
@@ -2,21 +2,21 @@
define i32 @test1(i32* %p, i32* %q) {
; CHECK-LABEL: @test1(i32* %p, i32* %q)
-; CHECK: load i32* %p
+; CHECK: load i32, i32* %p
; CHECK-NOT: noalias
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !noalias !0
- %b = load i32* %p
+ %a = load i32, i32* %p, !noalias !0
+ %b = load i32, i32* %p
%c = add i32 %a, %b
ret i32 %c
}
define i32 @test2(i32* %p, i32* %q) {
; CHECK-LABEL: @test2(i32* %p, i32* %q)
-; CHECK: load i32* %p, !alias.scope !0
+; CHECK: load i32, i32* %p, !alias.scope !0
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !alias.scope !0
- %b = load i32* %p, !alias.scope !0
+ %a = load i32, i32* %p, !alias.scope !0
+ %b = load i32, i32* %p, !alias.scope !0
%c = add i32 %a, %b
ret i32 %c
}
@@ -27,10 +27,10 @@ define i32 @test2(i32* %p, i32* %q) {
; throw in between.
define i32 @test3(i32* %p, i32* %q) {
; CHECK-LABEL: @test3(i32* %p, i32* %q)
-; CHECK: load i32* %p, !alias.scope !1
+; CHECK: load i32, i32* %p, !alias.scope !1
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !alias.scope !1
- %b = load i32* %p, !alias.scope !2
+ %a = load i32, i32* %p, !alias.scope !1
+ %b = load i32, i32* %p, !alias.scope !2
%c = add i32 %a, %b
ret i32 %c
}
diff --git a/llvm/test/Transforms/GVN/non-local-offset.ll b/llvm/test/Transforms/GVN/non-local-offset.ll
index 36d2f1a8152..2373ef5888b 100644
--- a/llvm/test/Transforms/GVN/non-local-offset.ll
+++ b/llvm/test/Transforms/GVN/non-local-offset.ll
@@ -18,7 +18,7 @@ entry:
br i1 %c, label %if.else, label %if.then
if.then:
- %t = load i32* %p
+ %t = load i32, i32* %p
store i32 %t, i32* %q
ret void
@@ -35,7 +35,7 @@ if.else:
; CHECK-NEXT: store i32 0, i32* %q
; CHECK-NEXT: ret void
; CHECK: if.else:
-; CHECK: load i64* %pc
+; CHECK: load i64, i64* %pc
; CHECK: store i64
define void @watch_out_for_size_change(i1 %c, i32* %p, i32* %q) nounwind {
@@ -46,14 +46,14 @@ entry:
br i1 %c, label %if.else, label %if.then
if.then:
- %t = load i32* %p
+ %t = load i32, i32* %p
store i32 %t, i32* %q
ret void
if.else:
%pc = bitcast i32* %p to i64*
%qc = bitcast i32* %q to i64*
- %t64 = load i64* %pc
+ %t64 = load i64, i64* %pc
store i64 %t64, i64* %qc
ret void
}
diff --git a/llvm/test/Transforms/GVN/nonescaping-malloc.ll b/llvm/test/Transforms/GVN/nonescaping-malloc.ll
index 1ff9b144436..f83b317fade 100644
--- a/llvm/test/Transforms/GVN/nonescaping-malloc.ll
+++ b/llvm/test/Transforms/GVN/nonescaping-malloc.ll
@@ -40,16 +40,16 @@ declare i32 @_ZN4llvm13StringMapImpl15LookupBucketForENS_9StringRefE(%"struct.ll
define linkonce_odr %"struct.llvm::StringMapEntry<void*>"* @_ZN4llvm9StringMapIPvNS_15MallocAllocatorEE16GetOrCreateValueERKNS_9StringRefE(%"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, %"struct.llvm::StringRef"* nocapture %Key) ssp align 2 {
entry:
%elt = bitcast %"struct.llvm::StringRef"* %Key to i64*
- %val = load i64* %elt
+ %val = load i64, i64* %elt
%tmp = getelementptr inbounds %"struct.llvm::StringRef", %"struct.llvm::StringRef"* %Key, i64 0, i32 1
- %val2 = load i64* %tmp
+ %val2 = load i64, i64* %tmp
%tmp2.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>", %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0
%tmp3.i = tail call i32 @_ZN4llvm13StringMapImpl15LookupBucketForENS_9StringRefE(%"struct.llvm::StringMapImpl"* %tmp2.i, i64 %val, i64 %val2)
%tmp4.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>", %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0, i32 0
- %tmp5.i = load %"struct.llvm::StringMapImpl::ItemBucket"** %tmp4.i, align 8
+ %tmp5.i = load %"struct.llvm::StringMapImpl::ItemBucket"*, %"struct.llvm::StringMapImpl::ItemBucket"** %tmp4.i, align 8
%tmp6.i = zext i32 %tmp3.i to i64
%tmp7.i = getelementptr inbounds %"struct.llvm::StringMapImpl::ItemBucket", %"struct.llvm::StringMapImpl::ItemBucket"* %tmp5.i, i64 %tmp6.i, i32 1
- %tmp8.i = load %"struct.llvm::StringMapEntryBase"** %tmp7.i, align 8
+ %tmp8.i = load %"struct.llvm::StringMapEntryBase"*, %"struct.llvm::StringMapEntryBase"** %tmp7.i, align 8
%tmp9.i = icmp eq %"struct.llvm::StringMapEntryBase"* %tmp8.i, null
%tmp13.i = icmp eq %"struct.llvm::StringMapEntryBase"* %tmp8.i, inttoptr (i64 -1 to %"struct.llvm::StringMapEntryBase"*)
%or.cond.i = or i1 %tmp9.i, %tmp13.i
@@ -87,7 +87,7 @@ _ZN4llvm14StringMapEntryIPvE6CreateINS_15MallocAllocatorES1_EEPS2_PKcS7_RT_T0_.e
%tmp.i.i.i = getelementptr inbounds i8, i8* %tmp.i20.i.i, i64 8
%1 = bitcast i8* %tmp.i.i.i to i8**
store i8* null, i8** %1, align 8
- %tmp22.i = load %"struct.llvm::StringMapEntryBase"** %tmp7.i, align 8
+ %tmp22.i = load %"struct.llvm::StringMapEntryBase"*, %"struct.llvm::StringMapEntryBase"** %tmp7.i, align 8
%tmp24.i = icmp eq %"struct.llvm::StringMapEntryBase"* %tmp22.i, inttoptr (i64 -1 to %"struct.llvm::StringMapEntryBase"*)
br i1 %tmp24.i, label %bb9.i, label %_ZN4llvm9StringMapIPvNS_15MallocAllocatorEE16GetOrCreateValueIS1_EERNS_14StringMapEntryIS1_EENS_9StringRefET_.exit
@@ -97,7 +97,7 @@ bb6.i: ; preds = %entry
bb9.i: ; preds = %_ZN4llvm14StringMapEntryIPvE6CreateINS_15MallocAllocatorES1_EEPS2_PKcS7_RT_T0_.exit.i
%tmp25.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>", %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0, i32 3
- %tmp26.i = load i32* %tmp25.i, align 8
+ %tmp26.i = load i32, i32* %tmp25.i, align 8
%tmp27.i = add i32 %tmp26.i, -1
store i32 %tmp27.i, i32* %tmp25.i, align 8
ret %"struct.llvm::StringMapEntry<void*>"* %tmp10.i.i
diff --git a/llvm/test/Transforms/GVN/null-aliases-nothing.ll b/llvm/test/Transforms/GVN/null-aliases-nothing.ll
index 0826f9e663f..0b7c5eb5bf9 100644
--- a/llvm/test/Transforms/GVN/null-aliases-nothing.ll
+++ b/llvm/test/Transforms/GVN/null-aliases-nothing.ll
@@ -5,11 +5,11 @@ declare void @test1f(i8*)
define void @test1(%t* noalias %stuff ) {
%p = getelementptr inbounds %t, %t* %stuff, i32 0, i32 0
- %before = load i32* %p
+ %before = load i32, i32* %p
call void @test1f(i8* null)
- %after = load i32* %p ; <--- This should be a dead load
+ %after = load i32, i32* %p ; <--- This should be a dead load
%sum = add i32 %before, %after
store i32 %sum, i32* %p
diff --git a/llvm/test/Transforms/GVN/phi-translate-partial-alias.ll b/llvm/test/Transforms/GVN/phi-translate-partial-alias.ll
index 84aeed1ecc6..f1cf53e45de 100644
--- a/llvm/test/Transforms/GVN/phi-translate-partial-alias.ll
+++ b/llvm/test/Transforms/GVN/phi-translate-partial-alias.ll
@@ -8,18 +8,18 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; CHECK: define void @test0(i8* %begin)
; CHECK: loop:
-; CHECK: %l0 = load i8* %phi
+; CHECK: %l0 = load i8, i8* %phi
; CHECK: call void @bar(i8 %l0)
-; CHECK: %l1 = load i8* %phi
+; CHECK: %l1 = load i8, i8* %phi
define void @test0(i8* %begin) {
entry:
br label %loop
loop:
%phi = phi i8* [ %begin, %entry ], [ %next, %loop ]
- %l0 = load i8* %phi
+ %l0 = load i8, i8* %phi
call void @bar(i8 %l0)
- %l1 = load i8* %phi
+ %l1 = load i8, i8* %phi
%next = getelementptr inbounds i8, i8* %phi, i8 %l1
br label %loop
}
diff --git a/llvm/test/Transforms/GVN/phi-translate.ll b/llvm/test/Transforms/GVN/phi-translate.ll
index 7fe95c6bb48..6068b05aadf 100644
--- a/llvm/test/Transforms/GVN/phi-translate.ll
+++ b/llvm/test/Transforms/GVN/phi-translate.ll
@@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64"
; CHECK-LABEL: @foo(
; CHECK: entry.end_crit_edge:
-; CHECK: %n.pre = load i32* %q.phi.trans.insert
+; CHECK: %n.pre = load i32, i32* %q.phi.trans.insert
; CHECK: then:
; CHECK: store i32 %z
; CHECK: end:
@@ -26,6 +26,6 @@ then:
end:
%j = sext i32 %x to i64
%q = getelementptr [100 x i32], [100 x i32]* @G, i64 0, i64 %j
- %n = load i32* %q
+ %n = load i32, i32* %q
ret i32 %n
}
diff --git a/llvm/test/Transforms/GVN/pr10820.ll b/llvm/test/Transforms/GVN/pr10820.ll
index 12c1e70fc38..c6a9a931df0 100644
--- a/llvm/test/Transforms/GVN/pr10820.ll
+++ b/llvm/test/Transforms/GVN/pr10820.ll
@@ -11,7 +11,7 @@ entry:
; CHECK: store i32
store i32 402662078, i32* bitcast (i31* @g to i32*), align 8
; CHECK-NOT: load i31
- %0 = load i31* @g, align 8
+ %0 = load i31, i31* @g, align 8
; CHECK: store i31
store i31 %0, i31* undef, align 1
unreachable
diff --git a/llvm/test/Transforms/GVN/pr14166.ll b/llvm/test/Transforms/GVN/pr14166.ll
index 4d682050687..eafe418dbdc 100644
--- a/llvm/test/Transforms/GVN/pr14166.ll
+++ b/llvm/test/Transforms/GVN/pr14166.ll
@@ -4,16 +4,16 @@ target triple = "i386-pc-linux-gnu"
define <2 x i32> @test1() {
%v1 = alloca <2 x i32>
call void @anything(<2 x i32>* %v1)
- %v2 = load <2 x i32>* %v1
+ %v2 = load <2 x i32>, <2 x i32>* %v1
%v3 = inttoptr <2 x i32> %v2 to <2 x i8*>
%v4 = bitcast <2 x i32>* %v1 to <2 x i8*>*
store <2 x i8*> %v3, <2 x i8*>* %v4
- %v5 = load <2 x i32>* %v1
+ %v5 = load <2 x i32>, <2 x i32>* %v1
ret <2 x i32> %v5
; CHECK-LABEL: @test1(
; CHECK: %v1 = alloca <2 x i32>
; CHECK: call void @anything(<2 x i32>* %v1)
-; CHECK: %v2 = load <2 x i32>* %v1
+; CHECK: %v2 = load <2 x i32>, <2 x i32>* %v1
; CHECK: %v3 = inttoptr <2 x i32> %v2 to <2 x i8*>
; CHECK: %v4 = bitcast <2 x i32>* %v1 to <2 x i8*>*
; CHECK: store <2 x i8*> %v3, <2 x i8*>* %v4
diff --git a/llvm/test/Transforms/GVN/pr17732.ll b/llvm/test/Transforms/GVN/pr17732.ll
index 606a195b853..bf838c910b7 100644
--- a/llvm/test/Transforms/GVN/pr17732.ll
+++ b/llvm/test/Transforms/GVN/pr17732.ll
@@ -15,10 +15,10 @@ target triple = "x86_64-unknown-linux-gnu"
define i32 @main() {
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds (%struct.with_array* @array_with_zeroinit, i64 0, i32 0, i64 0), i8* getelementptr inbounds ({ [2 x i8], i32, i8, [3 x i8] }* @main.obj_with_array, i64 0, i32 0, i64 0), i64 12, i32 4, i1 false)
- %0 = load i8* getelementptr inbounds (%struct.with_array* @array_with_zeroinit, i64 0, i32 2), align 4
+ %0 = load i8, i8* getelementptr inbounds (%struct.with_array* @array_with_zeroinit, i64 0, i32 2), align 4
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* getelementptr inbounds (%struct.with_vector* @vector_with_zeroinit, i64 0, i32 0, i64 0), i8* getelementptr inbounds ({ <2 x i8>, i32, i8, [3 x i8] }* @main.obj_with_vector, i64 0, i32 0, i64 0), i64 12, i32 4, i1 false)
- %1 = load i8* getelementptr inbounds (%struct.with_vector* @vector_with_zeroinit, i64 0, i32 2), align 4
+ %1 = load i8, i8* getelementptr inbounds (%struct.with_vector* @vector_with_zeroinit, i64 0, i32 2), align 4
%conv0 = sext i8 %0 to i32
%conv1 = sext i8 %1 to i32
%and = and i32 %conv0, %conv1
diff --git a/llvm/test/Transforms/GVN/pr17852.ll b/llvm/test/Transforms/GVN/pr17852.ll
index 0d5ee301970..9a8a709ba6a 100644
--- a/llvm/test/Transforms/GVN/pr17852.ll
+++ b/llvm/test/Transforms/GVN/pr17852.ll
@@ -13,7 +13,7 @@ for.end: ; preds = %for.cond
if.then: ; preds = %for.end
%f22 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 2
%f7 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 5
- %tmp7 = load i32* %f7, align 8
+ %tmp7 = load i32, i32* %f7, align 8
br label %if.end40
if.else: ; preds = %for.end
br i1 false, label %for.cond18, label %if.then6
@@ -22,7 +22,7 @@ if.then6: ; preds = %if.else
%tmp10 = bitcast %struct.S0* %p1 to i16*
%f5 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp11 = bitcast [2 x i8]* %f5 to i16*
- %bf.load13 = load i16* %tmp11, align 8
+ %bf.load13 = load i16, i16* %tmp11, align 8
br label %if.end36
for.cond18: ; preds = %if.else
call void @fn4()
@@ -32,33 +32,33 @@ if.end: ; preds = %for.cond18
%f925 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 7
%f526 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp15 = bitcast [2 x i8]* %f526 to i16*
- %bf.load27 = load i16* %tmp15, align 8
+ %bf.load27 = load i16, i16* %tmp15, align 8
%tmp16 = bitcast %struct.S0* %p1 to i16*
br label %if.end36
if.end36: ; preds = %if.end, %for.cond18, %if.then6
%f537 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp17 = bitcast [2 x i8]* %f537 to i16*
- %bf.load38 = load i16* %tmp17, align 8
+ %bf.load38 = load i16, i16* %tmp17, align 8
%bf.clear39 = and i16 %bf.load38, -16384
br label %if.end40
if.end40: ; preds = %if.end36, %if.then
%f6 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 4
- %tmp18 = load i32* %f6, align 4
+ %tmp18 = load i32, i32* %f6, align 4
call void @fn2(i32 %tmp18)
%f8 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 6
- %tmp19 = load i32* %f8, align 4
+ %tmp19 = load i32, i32* %f8, align 4
%tobool41 = icmp eq i32 %tmp19, 0
br i1 true, label %if.end50, label %if.then42
if.then42: ; preds = %if.end40
%tmp20 = bitcast %struct.S0* %p1 to i16*
%f547 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp21 = bitcast [2 x i8]* %f547 to i16*
- %bf.load48 = load i16* %tmp21, align 8
+ %bf.load48 = load i16, i16* %tmp21, align 8
br label %if.end50
if.end50: ; preds = %if.then42, %if.end40
%f551 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp22 = bitcast [2 x i8]* %f551 to i16*
- %bf.load52 = load i16* %tmp22, align 8
+ %bf.load52 = load i16, i16* %tmp22, align 8
%bf.clear53 = and i16 %bf.load52, -16384
ret void
}
diff --git a/llvm/test/Transforms/GVN/pre-basic-add.ll b/llvm/test/Transforms/GVN/pre-basic-add.ll
index 4bde05c3387..460d1f939fe 100644
--- a/llvm/test/Transforms/GVN/pre-basic-add.ll
+++ b/llvm/test/Transforms/GVN/pre-basic-add.ll
@@ -5,7 +5,7 @@
define i32 @test() nounwind {
entry:
- %0 = load i32* @H, align 4 ; <i32> [#uses=2]
+ %0 = load i32, i32* @H, align 4 ; <i32> [#uses=2]
%1 = call i32 (...)* @foo() nounwind ; <i32> [#uses=1]
%2 = icmp ne i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %bb, label %bb1
diff --git a/llvm/test/Transforms/GVN/pre-gep-load.ll b/llvm/test/Transforms/GVN/pre-gep-load.ll
index 765866f9824..291af359a7a 100644
--- a/llvm/test/Transforms/GVN/pre-gep-load.ll
+++ b/llvm/test/Transforms/GVN/pre-gep-load.ll
@@ -14,9 +14,9 @@ entry:
sw.bb: ; preds = %entry, %entry
%idxprom = sext i32 %i to i64
%arrayidx = getelementptr inbounds double*, double** %p, i64 0
- %0 = load double** %arrayidx, align 8
+ %0 = load double*, double** %arrayidx, align 8
%arrayidx1 = getelementptr inbounds double, double* %0, i64 %idxprom
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
%sub = fsub double %1, 1.000000e+00
%cmp = fcmp olt double %sub, 0.000000e+00
br i1 %cmp, label %if.then, label %if.end
@@ -30,9 +30,9 @@ if.end: ; preds = %sw.bb
sw.bb2: ; preds = %if.end, %entry
%idxprom3 = sext i32 %i to i64
%arrayidx4 = getelementptr inbounds double*, double** %p, i64 0
- %2 = load double** %arrayidx4, align 8
+ %2 = load double*, double** %arrayidx4, align 8
%arrayidx5 = getelementptr inbounds double, double* %2, i64 %idxprom3
- %3 = load double* %arrayidx5, align 8
+ %3 = load double, double* %arrayidx5, align 8
; CHECK: sw.bb2:
; CHECK-NEXT-NOT: sext
; CHECK-NEXT: phi double [
diff --git a/llvm/test/Transforms/GVN/pre-load.ll b/llvm/test/Transforms/GVN/pre-load.ll
index 1d75c4913c3..24221d540f2 100644
--- a/llvm/test/Transforms/GVN/pre-load.ll
+++ b/llvm/test/Transforms/GVN/pre-load.ll
@@ -9,14 +9,14 @@ block1:
block2:
br label %block4
; CHECK: block2:
-; CHECK-NEXT: load i32* %p
+; CHECK-NEXT: load i32, i32* %p
block3:
store i32 0, i32* %p
br label %block4
block4:
- %PRE = load i32* %p
+ %PRE = load i32, i32* %p
ret i32 %PRE
; CHECK: block4:
; CHECK-NEXT: phi i32
@@ -32,7 +32,7 @@ block1:
block2:
br label %block4
; CHECK: block2:
-; CHECK-NEXT: load i32* %q
+; CHECK-NEXT: load i32, i32* %q
block3:
store i32 0, i32* %p
@@ -40,7 +40,7 @@ block3:
block4:
%P2 = phi i32* [%p, %block3], [%q, %block2]
- %PRE = load i32* %P2
+ %PRE = load i32, i32* %P2
ret i32 %PRE
; CHECK: block4:
; CHECK-NEXT: phi i32 [
@@ -59,7 +59,7 @@ block1:
block2:
br label %block4
; CHECK: block2:
-; CHECK-NEXT: load i32* %B
+; CHECK-NEXT: load i32, i32* %B
block3:
%A = getelementptr i32, i32* %p, i32 1
@@ -69,7 +69,7 @@ block3:
block4:
%P2 = phi i32* [%p, %block3], [%q, %block2]
%P3 = getelementptr i32, i32* %P2, i32 1
- %PRE = load i32* %P3
+ %PRE = load i32, i32* %P3
ret i32 %PRE
; CHECK: block4:
; CHECK-NEXT: phi i32 [
@@ -87,7 +87,7 @@ block1:
block2:
br label %block4
; CHECK: block2:
-; CHECK: load i32*
+; CHECK: load i32, i32*
; CHECK: br label %block4
block3:
@@ -101,7 +101,7 @@ block3:
block4:
%P2 = phi i32* [%p, %block3], [%q, %block2]
%P3 = getelementptr i32, i32* %P2, i32 1
- %PRE = load i32* %P3
+ %PRE = load i32, i32* %P3
ret i32 %PRE
; CHECK: block4:
; CHECK-NEXT: phi i32 [
@@ -127,7 +127,7 @@ bb.nph:
br label %bb
; CHECK: bb.nph:
-; CHECK: load double*
+; CHECK: load double, double*
; CHECK: br label %bb
bb:
@@ -135,8 +135,8 @@ bb:
%tmp6 = add i64 %indvar, 1
%scevgep = getelementptr double, double* %G, i64 %tmp6
%scevgep7 = getelementptr double, double* %G, i64 %indvar
- %2 = load double* %scevgep7, align 8
- %3 = load double* %scevgep, align 8
+ %2 = load double, double* %scevgep7, align 8
+ %3 = load double, double* %scevgep, align 8
%4 = fadd double %2, %3
store double %4, double* %scevgep7, align 8
%exitcond = icmp eq i64 %tmp6, %tmp
@@ -144,8 +144,8 @@ bb:
; Should only be one load in the loop.
; CHECK: bb:
-; CHECK: load double*
-; CHECK-NOT: load double*
+; CHECK: load double, double*
+; CHECK-NOT: load double, double*
; CHECK: br i1 %exitcond
return:
@@ -170,7 +170,7 @@ bb.nph:
br label %bb
; CHECK: bb.nph:
-; CHECK: load double*
+; CHECK: load double, double*
; CHECK: br label %bb
bb:
@@ -178,8 +178,8 @@ bb:
%tmp6 = add i64 %indvar, 1
%scevgep = getelementptr double, double* %G, i64 %tmp6
%scevgep7 = getelementptr double, double* %G, i64 %indvar
- %2 = load double* %scevgep7, align 8
- %3 = load double* %scevgep, align 8
+ %2 = load double, double* %scevgep7, align 8
+ %3 = load double, double* %scevgep, align 8
%4 = fadd double %2, %3
store double %4, double* %scevgep, align 8
%exitcond = icmp eq i64 %tmp6, %tmp
@@ -187,8 +187,8 @@ bb:
; Should only be one load in the loop.
; CHECK: bb:
-; CHECK: load double*
-; CHECK-NOT: load double*
+; CHECK: load double, double*
+; CHECK-NOT: load double, double*
; CHECK: br i1 %exitcond
return:
@@ -222,8 +222,8 @@ bb:
%scevgep = getelementptr double, double* %G, i64 %tmp8
%tmp9 = add i64 %indvar, 1
%scevgep10 = getelementptr double, double* %G, i64 %tmp9
- %3 = load double* %scevgep10, align 8
- %4 = load double* %scevgep, align 8
+ %3 = load double, double* %scevgep10, align 8
+ %4 = load double, double* %scevgep, align 8
%5 = fadd double %3, %4
store double %5, double* %scevgep, align 8
%exitcond = icmp eq i64 %tmp9, %tmp7
@@ -231,8 +231,8 @@ bb:
; Should only be one load in the loop.
; CHECK: bb:
-; CHECK: load double*
-; CHECK-NOT: load double*
+; CHECK: load double, double*
+; CHECK-NOT: load double, double*
; CHECK: br i1 %exitcond
return:
@@ -249,7 +249,7 @@ block1:
block2:
br label %block4
; CHECK: block2:
-; CHECK: load i32*
+; CHECK: load i32, i32*
; CHECK: br label %block4
block3:
@@ -260,7 +260,7 @@ block3:
block4:
%P2 = phi i32* [%p, %block3], [%q, %block2]
%P3 = getelementptr i32, i32* %P2, i32 1
- %PRE = load i32* %P3
+ %PRE = load i32, i32* %P3
ret i32 %PRE
; CHECK: block4:
; CHECK-NEXT: phi i32 [
@@ -288,7 +288,7 @@ bb.nph:
br label %bb
; CHECK: bb.nph:
-; CHECK: load double*
+; CHECK: load double, double*
; CHECK: br label %bb
bb:
@@ -297,8 +297,8 @@ bb:
%scevgep = getelementptr double, double* %G, i64 %tmp8
%tmp9 = add i64 %indvar, 1
%scevgep10 = getelementptr double, double* %G, i64 %tmp9
- %3 = load double* %scevgep10, align 8
- %4 = load double* %scevgep, align 8
+ %3 = load double, double* %scevgep10, align 8
+ %4 = load double, double* %scevgep, align 8
%5 = fadd double %3, %4
store double %5, double* %scevgep, align 8
%exitcond = icmp eq i64 %tmp9, %tmp7
@@ -306,8 +306,8 @@ bb:
; Should only be one load in the loop.
; CHECK: bb:
-; CHECK: load double*
-; CHECK-NOT: load double*
+; CHECK: load double, double*
+; CHECK-NOT: load double, double*
; CHECK: br i1 %exitcond
return:
@@ -332,8 +332,8 @@ bb.nph:
%tmp8 = add i64 %tmp, -1
br label %bb
; CHECK: bb.nph:
-; CHECK: load double*
-; CHECK: load double*
+; CHECK: load double, double*
+; CHECK: load double, double*
; CHECK: br label %bb
@@ -344,10 +344,10 @@ bb:
%scevgep10 = getelementptr double, double* %G, i64 %tmp9
%tmp11 = add i64 %indvar, 1
%scevgep12 = getelementptr double, double* %G, i64 %tmp11
- %2 = load double* %scevgep12, align 8
- %3 = load double* %scevgep10, align 8
+ %2 = load double, double* %scevgep12, align 8
+ %3 = load double, double* %scevgep10, align 8
%4 = fadd double %2, %3
- %5 = load double* %scevgep, align 8
+ %5 = load double, double* %scevgep, align 8
%6 = fadd double %4, %5
store double %6, double* %scevgep12, align 8
%exitcond = icmp eq i64 %tmp11, %tmp8
@@ -355,8 +355,8 @@ bb:
; Should only be one load in the loop.
; CHECK: bb:
-; CHECK: load double*
-; CHECK-NOT: load double*
+; CHECK: load double, double*
+; CHECK-NOT: load double, double*
; CHECK: br i1 %exitcond
return:
@@ -372,7 +372,7 @@ block1:
block2:
%cond = icmp sgt i32 %N, 1
br i1 %cond, label %block4, label %block5
-; CHECK: load i32* %p
+; CHECK: load i32, i32* %p
; CHECK-NEXT: br label %block4
block3:
@@ -380,7 +380,7 @@ block3:
br label %block4
block4:
- %PRE = load i32* %p
+ %PRE = load i32, i32* %p
br label %block5
block5:
diff --git a/llvm/test/Transforms/GVN/pre-single-pred.ll b/llvm/test/Transforms/GVN/pre-single-pred.ll
index f1f5c71a93a..0df45cf5c1c 100644
--- a/llvm/test/Transforms/GVN/pre-single-pred.ll
+++ b/llvm/test/Transforms/GVN/pre-single-pred.ll
@@ -23,9 +23,9 @@ for.cond.for.end_crit_edge: ; preds = %for.cond
br label %for.end
; CHECK: for.body:
-; CHECK-NEXT: %tmp3 = load i32* @p
+; CHECK-NEXT: %tmp3 = load i32, i32* @p
for.body: ; preds = %for.cond
- %tmp3 = load i32* @p ; <i32> [#uses=1]
+ %tmp3 = load i32, i32* @p ; <i32> [#uses=1]
%dec = add i32 %tmp3, -1 ; <i32> [#uses=2]
store i32 %dec, i32* @p
%cmp6 = icmp slt i32 %dec, 0 ; <i1> [#uses=1]
@@ -40,6 +40,6 @@ for.inc: ; preds = %for.body
br label %for.cond
for.end: ; preds = %for.body.for.end_crit_edge, %for.cond.for.end_crit_edge
- %tmp9 = load i32* @p ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* @p ; <i32> [#uses=1]
ret i32 %tmp9
}
diff --git a/llvm/test/Transforms/GVN/preserve-tbaa.ll b/llvm/test/Transforms/GVN/preserve-tbaa.ll
index 587d463eef2..19467eeff40 100644
--- a/llvm/test/Transforms/GVN/preserve-tbaa.ll
+++ b/llvm/test/Transforms/GVN/preserve-tbaa.ll
@@ -5,7 +5,7 @@ target datalayout = "e-p:64:64:64"
; GVN should preserve the TBAA tag on loads when doing PRE.
; CHECK-LABEL: @test(
-; CHECK: %tmp33.pre = load i16* %P, align 2, !tbaa !0
+; CHECK: %tmp33.pre = load i16, i16* %P, align 2, !tbaa !0
; CHECK: br label %for.body
define void @test(i16 *%P, i16* %Q) nounwind {
entry:
@@ -15,7 +15,7 @@ bb.nph: ; preds = %entry
br label %for.body
for.body: ; preds = %for.body, %bb.nph
- %tmp33 = load i16* %P, align 2, !tbaa !0
+ %tmp33 = load i16, i16* %P, align 2, !tbaa !0
store i16 %tmp33, i16* %Q
store i16 0, i16* %P, align 2, !tbaa !0
diff --git a/llvm/test/Transforms/GVN/range.ll b/llvm/test/Transforms/GVN/range.ll
index 37202321a6d..297c6aac88d 100644
--- a/llvm/test/Transforms/GVN/range.ll
+++ b/llvm/test/Transforms/GVN/range.ll
@@ -2,82 +2,82 @@
define i32 @test1(i32* %p) {
; CHECK: @test1(i32* %p)
-; CHECK: %a = load i32* %p, !range !0
+; CHECK: %a = load i32, i32* %p, !range !0
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !range !0
- %b = load i32* %p, !range !0
+ %a = load i32, i32* %p, !range !0
+ %b = load i32, i32* %p, !range !0
%c = add i32 %a, %b
ret i32 %c
}
define i32 @test2(i32* %p) {
; CHECK: @test2(i32* %p)
-; CHECK: %a = load i32* %p
+; CHECK: %a = load i32, i32* %p
; CHECK-NOT: range
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !range !0
- %b = load i32* %p
+ %a = load i32, i32* %p, !range !0
+ %b = load i32, i32* %p
%c = add i32 %a, %b
ret i32 %c
}
define i32 @test3(i32* %p) {
; CHECK: @test3(i32* %p)
-; CHECK: %a = load i32* %p, !range ![[DISJOINT_RANGE:[0-9]+]]
+; CHECK: %a = load i32, i32* %p, !range ![[DISJOINT_RANGE:[0-9]+]]
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !range !0
- %b = load i32* %p, !range !1
+ %a = load i32, i32* %p, !range !0
+ %b = load i32, i32* %p, !range !1
%c = add i32 %a, %b
ret i32 %c
}
define i32 @test4(i32* %p) {
; CHECK: @test4(i32* %p)
-; CHECK: %a = load i32* %p, !range ![[MERGED_RANGE:[0-9]+]]
+; CHECK: %a = load i32, i32* %p, !range ![[MERGED_RANGE:[0-9]+]]
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !range !0
- %b = load i32* %p, !range !2
+ %a = load i32, i32* %p, !range !0
+ %b = load i32, i32* %p, !range !2
%c = add i32 %a, %b
ret i32 %c
}
define i32 @test5(i32* %p) {
; CHECK: @test5(i32* %p)
-; CHECK: %a = load i32* %p, !range ![[MERGED_SIGNED_RANGE:[0-9]+]]
+; CHECK: %a = load i32, i32* %p, !range ![[MERGED_SIGNED_RANGE:[0-9]+]]
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !range !3
- %b = load i32* %p, !range !4
+ %a = load i32, i32* %p, !range !3
+ %b = load i32, i32* %p, !range !4
%c = add i32 %a, %b
ret i32 %c
}
define i32 @test6(i32* %p) {
; CHECK: @test6(i32* %p)
-; CHECK: %a = load i32* %p, !range ![[MERGED_TEST6:[0-9]+]]
+; CHECK: %a = load i32, i32* %p, !range ![[MERGED_TEST6:[0-9]+]]
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !range !5
- %b = load i32* %p, !range !6
+ %a = load i32, i32* %p, !range !5
+ %b = load i32, i32* %p, !range !6
%c = add i32 %a, %b
ret i32 %c
}
define i32 @test7(i32* %p) {
; CHECK: @test7(i32* %p)
-; CHECK: %a = load i32* %p, !range ![[MERGED_TEST7:[0-9]+]]
+; CHECK: %a = load i32, i32* %p, !range ![[MERGED_TEST7:[0-9]+]]
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !range !7
- %b = load i32* %p, !range !8
+ %a = load i32, i32* %p, !range !7
+ %b = load i32, i32* %p, !range !8
%c = add i32 %a, %b
ret i32 %c
}
define i32 @test8(i32* %p) {
; CHECK: @test8(i32* %p)
-; CHECK: %a = load i32* %p
+; CHECK: %a = load i32, i32* %p
; CHECK-NOT: range
; CHECK: %c = add i32 %a, %a
- %a = load i32* %p, !range !9
- %b = load i32* %p, !range !10
+ %a = load i32, i32* %p, !range !9
+ %b = load i32, i32* %p, !range !10
%c = add i32 %a, %b
ret i32 %c
}
diff --git a/llvm/test/Transforms/GVN/readattrs.ll b/llvm/test/Transforms/GVN/readattrs.ll
index ba624a71f9b..fb36d07c86e 100644
--- a/llvm/test/Transforms/GVN/readattrs.ll
+++ b/llvm/test/Transforms/GVN/readattrs.ll
@@ -9,7 +9,7 @@ define i8 @test() {
%a = alloca i8
store i8 1, i8* %a
call void @use(i8* %a)
- %b = load i8* %a
+ %b = load i8, i8* %a
ret i8 %b
; CHECK-LABEL: define i8 @test(
; CHECK: call void @use(i8* %a)
diff --git a/llvm/test/Transforms/GVN/rle-must-alias.ll b/llvm/test/Transforms/GVN/rle-must-alias.ll
index fc83c533a00..0d181dd3a11 100644
--- a/llvm/test/Transforms/GVN/rle-must-alias.ll
+++ b/llvm/test/Transforms/GVN/rle-must-alias.ll
@@ -18,21 +18,21 @@ entry:
bb: ; preds = %entry
%2 = tail call i32 (...)* @bar() nounwind ; <i32> [#uses=0]
%3 = getelementptr [100 x i32], [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* %3, align 4 ; <i32> [#uses=1]
store i32 %4, i32* @G, align 4
br label %bb3
bb1: ; preds = %entry
%5 = tail call i32 (...)* @baz() nounwind ; <i32> [#uses=0]
%6 = getelementptr [100 x i32], [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=2]
+ %7 = load i32, i32* %6, align 4 ; <i32> [#uses=2]
store i32 %7, i32* @G, align 4
%8 = icmp eq i32 %7, 0 ; <i1> [#uses=1]
br i1 %8, label %bb3, label %bb4
bb3: ; preds = %bb1, %bb
%9 = getelementptr [100 x i32], [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
- %DEAD = load i32* %9, align 4 ; <i32> [#uses=1]
+ %DEAD = load i32, i32* %9, align 4 ; <i32> [#uses=1]
ret i32 %DEAD
bb4: ; preds = %bb1
diff --git a/llvm/test/Transforms/GVN/rle-no-phi-translate.ll b/llvm/test/Transforms/GVN/rle-no-phi-translate.ll
index 96dbf481899..c1fd2011537 100644
--- a/llvm/test/Transforms/GVN/rle-no-phi-translate.ll
+++ b/llvm/test/Transforms/GVN/rle-no-phi-translate.ll
@@ -19,7 +19,7 @@ bb: ; preds = %entry
bb2: ; preds = %bb1, %bb
%c_addr.0 = phi i32* [ %b, %entry ], [ %c, %bb ] ; <i32*> [#uses=1]
- %cv = load i32* %c_addr.0, align 4 ; <i32> [#uses=1]
+ %cv = load i32, i32* %c_addr.0, align 4 ; <i32> [#uses=1]
ret i32 %cv
; CHECK: bb2:
; CHECK-NOT: load i32
diff --git a/llvm/test/Transforms/GVN/rle-nonlocal.ll b/llvm/test/Transforms/GVN/rle-nonlocal.ll
index 8229aaa1424..7975462313c 100644
--- a/llvm/test/Transforms/GVN/rle-nonlocal.ll
+++ b/llvm/test/Transforms/GVN/rle-nonlocal.ll
@@ -6,20 +6,20 @@ block1:
br i1 %cmp , label %block2, label %block3
block2:
- %a = load i32** %p
+ %a = load i32*, i32** %p
br label %block4
block3:
- %b = load i32** %p
+ %b = load i32*, i32** %p
br label %block4
block4:
; CHECK-NOT: %existingPHI = phi
; CHECK: %DEAD = phi
%existingPHI = phi i32* [ %a, %block2 ], [ %b, %block3 ]
- %DEAD = load i32** %p
- %c = load i32* %DEAD
- %d = load i32* %existingPHI
+ %DEAD = load i32*, i32** %p
+ %c = load i32, i32* %DEAD
+ %d = load i32, i32* %existingPHI
%e = add i32 %c, %d
ret i32 %e
}
diff --git a/llvm/test/Transforms/GVN/rle-phi-translate.ll b/llvm/test/Transforms/GVN/rle-phi-translate.ll
index b2afb7075bb..7402e1a1fc0 100644
--- a/llvm/test/Transforms/GVN/rle-phi-translate.ll
+++ b/llvm/test/Transforms/GVN/rle-phi-translate.ll
@@ -11,13 +11,13 @@ entry:
br i1 %t1, label %bb, label %bb1
bb:
- %t2 = load i32* %c, align 4
+ %t2 = load i32, i32* %c, align 4
%t3 = add i32 %t2, 1
store i32 %t3, i32* %g, align 4
br label %bb2
bb1: ; preds = %entry
- %t5 = load i32* %b, align 4
+ %t5 = load i32, i32* %b, align 4
%t6 = add i32 %t5, 1
store i32 %t6, i32* %g, align 4
br label %bb2
@@ -25,8 +25,8 @@ bb1: ; preds = %entry
bb2: ; preds = %bb1, %bb
%c_addr.0 = phi i32* [ %g, %bb1 ], [ %c, %bb ]
%b_addr.0 = phi i32* [ %b, %bb1 ], [ %g, %bb ]
- %cv = load i32* %c_addr.0, align 4
- %bv = load i32* %b_addr.0, align 4
+ %cv = load i32, i32* %c_addr.0, align 4
+ %bv = load i32, i32* %b_addr.0, align 4
; CHECK: %bv = phi i32
; CHECK: %cv = phi i32
; CHECK-NOT: load
@@ -53,7 +53,7 @@ bb1:
bb2:
%d = phi i32* [ %c, %bb1 ], [ %b, %bb ]
%d1 = bitcast i32* %d to i8*
- %dv = load i8* %d1
+ %dv = load i8, i8* %d1
; CHECK: %dv = phi i8 [ 92, %bb1 ], [ 4, %bb ]
; CHECK-NOT: load
; CHECK: ret i8 %dv
@@ -79,7 +79,7 @@ bb2:
%d = phi i32* [ %c, %bb1 ], [ %b, %bb ]
%i = phi i32 [ 7, %bb1 ], [ 17, %bb ]
%d1 = getelementptr i32, i32* %d, i32 %i
- %dv = load i32* %d1
+ %dv = load i32, i32* %d1
; CHECK: %dv = phi i32 [ 82, %bb1 ], [ 4, %bb ]
; CHECK-NOT: load
; CHECK: ret i32 %dv
@@ -105,7 +105,7 @@ bb2:
%d = phi i32* [ %c, %bb1 ], [ %b, %bb ]
%i = phi i32 [ 7, %bb1 ], [ 0, %bb ]
%d1 = getelementptr i32, i32* %d, i32 %i
- %dv = load i32* %d1
+ %dv = load i32, i32* %d1
; CHECK: %dv = phi i32 [ 82, %bb1 ], [ 4, %bb ]
; CHECK-NOT: load
; CHECK: ret i32 %dv
@@ -130,8 +130,8 @@ for.body:
%arrayidx6 = getelementptr double, double* %G, i64 %indvar
%tmp = add i64 %indvar, 1
%arrayidx = getelementptr double, double* %G, i64 %tmp
- %tmp3 = load double* %arrayidx
- %tmp7 = load double* %arrayidx6
+ %tmp3 = load double, double* %arrayidx
+ %tmp7 = load double, double* %arrayidx6
%add = fadd double %tmp3, %tmp7
store double %add, double* %arrayidx
%exitcond = icmp eq i64 %tmp, 999
diff --git a/llvm/test/Transforms/GVN/rle-semidominated.ll b/llvm/test/Transforms/GVN/rle-semidominated.ll
index 923cd03ecdb..f80d040c93e 100644
--- a/llvm/test/Transforms/GVN/rle-semidominated.ll
+++ b/llvm/test/Transforms/GVN/rle-semidominated.ll
@@ -2,7 +2,7 @@
define i32 @main(i32* %p, i32 %x, i32 %y) {
block1:
- %z = load i32* %p
+ %z = load i32, i32* %p
%cmp = icmp eq i32 %x, %y
br i1 %cmp, label %block2, label %block3
@@ -15,6 +15,6 @@ block3:
br label %block4
block4:
- %DEAD = load i32* %p
+ %DEAD = load i32, i32* %p
ret i32 %DEAD
}
diff --git a/llvm/test/Transforms/GVN/rle.ll b/llvm/test/Transforms/GVN/rle.ll
index a19575d06e6..ee68c0c7663 100644
--- a/llvm/test/Transforms/GVN/rle.ll
+++ b/llvm/test/Transforms/GVN/rle.ll
@@ -5,7 +5,7 @@
define i32 @test0(i32 %V, i32* %P) {
store i32 %V, i32* %P
- %A = load i32* %P
+ %A = load i32, i32* %P
ret i32 %A
; CHECK-LABEL: @test0(
; CHECK: ret i32 %V
@@ -20,7 +20,7 @@ define i32 @test0(i32 %V, i32* %P) {
define i8 @crash0({i32, i32} %A, {i32, i32}* %P) {
store {i32, i32} %A, {i32, i32}* %P
%X = bitcast {i32, i32}* %P to i8*
- %Y = load i8* %X
+ %Y = load i8, i8* %X
ret i8 %Y
}
@@ -28,7 +28,7 @@ define i8 @crash0({i32, i32} %A, {i32, i32}* %P) {
declare void @helper()
define void @crash1() {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* undef, i8* undef, i64 undef, i32 1, i1 false) nounwind
- %tmp = load i8* bitcast (void ()* @helper to i8*)
+ %tmp = load i8, i8* bitcast (void ()* @helper to i8*)
%x = icmp eq i8 %tmp, 15
ret void
}
@@ -45,7 +45,7 @@ define float @coerce_mustalias1(i32 %V, i32* %P) {
%P2 = bitcast i32* %P to float*
- %A = load float* %P2
+ %A = load float, float* %P2
ret float %A
; CHECK-LABEL: @coerce_mustalias1(
; CHECK-NOT: load
@@ -58,7 +58,7 @@ define float @coerce_mustalias2(i32* %V, i32** %P) {
%P2 = bitcast i32** %P to float*
- %A = load float* %P2
+ %A = load float, float* %P2
ret float %A
; CHECK-LABEL: @coerce_mustalias2(
; CHECK-NOT: load
@@ -71,7 +71,7 @@ define i32* @coerce_mustalias3(float %V, float* %P) {
%P2 = bitcast float* %P to i32**
- %A = load i32** %P2
+ %A = load i32*, i32** %P2
ret i32* %A
; CHECK-LABEL: @coerce_mustalias3(
; CHECK-NOT: load
@@ -80,10 +80,10 @@ define i32* @coerce_mustalias3(float %V, float* %P) {
;; i32 -> f32 load forwarding.
define float @coerce_mustalias4(i32* %P, i1 %cond) {
- %A = load i32* %P
+ %A = load i32, i32* %P
%P2 = bitcast i32* %P to float*
- %B = load float* %P2
+ %B = load float, float* %P2
br i1 %cond, label %T, label %F
T:
ret float %B
@@ -93,7 +93,7 @@ F:
ret float %X
; CHECK-LABEL: @coerce_mustalias4(
-; CHECK: %A = load i32* %P
+; CHECK: %A = load i32, i32* %P
; CHECK-NOT: load
; CHECK: ret float
; CHECK: F:
@@ -105,7 +105,7 @@ define i8 @coerce_mustalias5(i32 %V, i32* %P) {
%P2 = bitcast i32* %P to i8*
- %A = load i8* %P2
+ %A = load i8, i8* %P2
ret i8 %A
; CHECK-LABEL: @coerce_mustalias5(
; CHECK-NOT: load
@@ -118,7 +118,7 @@ define float @coerce_mustalias6(i64 %V, i64* %P) {
%P2 = bitcast i64* %P to float*
- %A = load float* %P2
+ %A = load float, float* %P2
ret float %A
; CHECK-LABEL: @coerce_mustalias6(
; CHECK-NOT: load
@@ -131,7 +131,7 @@ define i8* @coerce_mustalias7(i64 %V, i64* %P) {
%P2 = bitcast i64* %P to i8**
- %A = load i8** %P2
+ %A = load i8*, i8** %P2
ret i8* %A
; CHECK-LABEL: @coerce_mustalias7(
; CHECK-NOT: load
@@ -144,7 +144,7 @@ entry:
%conv = bitcast i16* %A to i8*
tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i32 1, i1 false)
%arrayidx = getelementptr inbounds i16, i16* %A, i64 42
- %tmp2 = load i16* %arrayidx
+ %tmp2 = load i16, i16* %arrayidx
ret i16 %tmp2
; CHECK-LABEL: @memset_to_i16_local(
; CHECK-NOT: load
@@ -157,7 +157,7 @@ entry:
%conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 %Val, i64 400, i32 1, i1 false)
%arrayidx = getelementptr inbounds float, float* %A, i64 42 ; <float*> [#uses=1]
- %tmp2 = load float* %arrayidx ; <float> [#uses=1]
+ %tmp2 = load float, float* %arrayidx ; <float> [#uses=1]
ret float %tmp2
; CHECK-LABEL: @memset_to_float_local(
; CHECK-NOT: load
@@ -184,7 +184,7 @@ F:
Cont:
%P2 = getelementptr i16, i16* %P, i32 4
- %A = load i16* %P2
+ %A = load i16, i16* %P2
ret i16 %A
; CHECK-LABEL: @memset_to_i16_nonlocal0(
@@ -203,7 +203,7 @@ entry:
%conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1, i1 false)
%arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
- %tmp2 = load float* %arrayidx ; <float> [#uses=1]
+ %tmp2 = load float, float* %arrayidx ; <float> [#uses=1]
ret float %tmp2
; CHECK-LABEL: @memcpy_to_float_local(
; CHECK-NOT: load
@@ -216,7 +216,7 @@ entry:
%conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p1i8.i64(i8* %conv, i8 addrspace(1)* bitcast ({i32, float, i32 } addrspace(1)* @GCst_as1 to i8 addrspace(1)*), i64 12, i32 1, i1 false)
%arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
- %tmp2 = load float* %arrayidx ; <float> [#uses=1]
+ %tmp2 = load float, float* %arrayidx ; <float> [#uses=1]
ret float %tmp2
; CHECK-LABEL: @memcpy_to_float_local_as1(
; CHECK-NOT: load
@@ -237,7 +237,7 @@ F:
br label %Cont
Cont:
- %A = load i8* %P3
+ %A = load i8, i8* %P3
ret i8 %A
; CHECK-LABEL: @coerce_mustalias_nonlocal0(
@@ -263,7 +263,7 @@ F:
Cont:
%P3 = bitcast i32* %P to i8*
- %A = load i8* %P3
+ %A = load i8, i8* %P3
ret i8 %A
; CHECK-LABEL: @coerce_mustalias_nonlocal1(
@@ -286,12 +286,12 @@ F:
br label %Cont
Cont:
- %A = load i8* %P3
+ %A = load i8, i8* %P3
ret i8 %A
; CHECK-LABEL: @coerce_mustalias_pre0(
; CHECK: F:
-; CHECK: load i8* %P3
+; CHECK: load i8, i8* %P3
; CHECK: Cont:
; CHECK: %A = phi i8 [
; CHECK-NOT: load
@@ -311,7 +311,7 @@ define i8 @coerce_offset0(i32 %V, i32* %P) {
%P2 = bitcast i32* %P to i8*
%P3 = getelementptr i8, i8* %P2, i32 2
- %A = load i8* %P3
+ %A = load i8, i8* %P3
ret i8 %A
; CHECK-LABEL: @coerce_offset0(
; CHECK-NOT: load
@@ -324,7 +324,7 @@ define i8 @coerce_offset0_addrspacecast(i32 %V, i32* %P) {
%P2 = addrspacecast i32* %P to i8 addrspace(1)*
%P3 = getelementptr i8, i8 addrspace(1)* %P2, i32 2
- %A = load i8 addrspace(1)* %P3
+ %A = load i8, i8 addrspace(1)* %P3
ret i8 %A
; CHECK-LABEL: @coerce_offset0_addrspacecast(
; CHECK-NOT: load
@@ -346,7 +346,7 @@ F:
br label %Cont
Cont:
- %A = load i8* %P4
+ %A = load i8, i8* %P4
ret i8 %A
; CHECK-LABEL: @coerce_offset_nonlocal0(
@@ -370,12 +370,12 @@ F:
br label %Cont
Cont:
- %A = load i8* %P4
+ %A = load i8, i8* %P4
ret i8 %A
; CHECK-LABEL: @coerce_offset_pre0(
; CHECK: F:
-; CHECK: load i8* %P4
+; CHECK: load i8, i8* %P4
; CHECK: Cont:
; CHECK: %A = phi i8 [
; CHECK-NOT: load
@@ -386,28 +386,28 @@ define i32 @chained_load(i32** %p, i32 %x, i32 %y) {
block1:
%A = alloca i32*
- %z = load i32** %p
+ %z = load i32*, i32** %p
store i32* %z, i32** %A
%cmp = icmp eq i32 %x, %y
br i1 %cmp, label %block2, label %block3
block2:
- %a = load i32** %p
+ %a = load i32*, i32** %p
br label %block4
block3:
- %b = load i32** %p
+ %b = load i32*, i32** %p
br label %block4
block4:
- %c = load i32** %p
- %d = load i32* %c
+ %c = load i32*, i32** %p
+ %d = load i32, i32* %c
ret i32 %d
; CHECK-LABEL: @chained_load(
-; CHECK: %z = load i32** %p
+; CHECK: %z = load i32*, i32** %p
; CHECK-NOT: load
-; CHECK: %d = load i32* %z
+; CHECK: %d = load i32, i32* %z
; CHECK-NEXT: ret i32 %d
}
@@ -428,7 +428,7 @@ F1:
T1:
%P2 = getelementptr i32, i32* %P, i32 %A
- %x = load i32* %P2
+ %x = load i32, i32* %P2
%cond = call i1 @cond2()
br i1 %cond, label %TX, label %F
@@ -485,7 +485,7 @@ block6:
br i1 %cmpxy, label %block7, label %exit
block7:
- %D = load i32* %C
+ %D = load i32, i32* %C
ret i32 %D
; CHECK: block7:
@@ -502,13 +502,13 @@ entry:
store i8 192, i8* %X3
%X = getelementptr i8, i8* %p, i32 4
- %Y = load i8* %X
+ %Y = load i8, i8* %X
br label %loop
loop:
%i = phi i32 [4, %entry], [192, %loop]
%X2 = getelementptr i8, i8* %p, i32 %i
- %Y2 = load i8* %X2
+ %Y2 = load i8, i8* %X2
; CHECK: loop:
; CHECK-NEXT: %Y2 = phi i8 [ %Y, %entry ], [ 0, %loop ]
@@ -533,13 +533,13 @@ entry:
store i8 19, i8* %X4
%X = getelementptr i8, i8* %p, i32 4
- %Y = load i8* %X
+ %Y = load i8, i8* %X
br label %loop
loop:
%i = phi i32 [4, %entry], [3, %cont]
%X2 = getelementptr i8, i8* %p, i32 %i
- %Y2 = load i8* %X2 ; Ensure this load is not being incorrectly replaced.
+ %Y2 = load i8, i8* %X2 ; Ensure this load is not being incorrectly replaced.
%cond = call i1 @cond2()
br i1 %cond, label %cont, label %out
@@ -551,7 +551,7 @@ cont:
; CHECK: store i32
; CHECK-NEXT: getelementptr i8, i8* %p, i32 3
-; CHECK-NEXT: load i8*
+; CHECK-NEXT: load i8, i8*
br label %loop
out:
@@ -567,7 +567,7 @@ entry:
%tmp = bitcast [256 x i32]* %x to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 1024, i32 4, i1 false)
%arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %x, i32 0, i32 0 ; <i32*>
- %tmp1 = load i32* %arraydecay ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %arraydecay ; <i32> [#uses=1]
ret i32 %tmp1
; CHECK-LABEL: @memset_to_load(
; CHECK: ret i32 0
@@ -581,15 +581,15 @@ entry:
define i32 @load_load_partial_alias(i8* %P) nounwind ssp {
entry:
%0 = bitcast i8* %P to i32*
- %tmp2 = load i32* %0
+ %tmp2 = load i32, i32* %0
%add.ptr = getelementptr inbounds i8, i8* %P, i64 1
- %tmp5 = load i8* %add.ptr
+ %tmp5 = load i8, i8* %add.ptr
%conv = zext i8 %tmp5 to i32
%add = add nsw i32 %tmp2, %conv
ret i32 %add
; TEMPORARILYDISABLED-LABEL: @load_load_partial_alias(
-; TEMPORARILYDISABLED: load i32*
+; TEMPORARILYDISABLED: load i32, i32*
; TEMPORARILYDISABLED-NOT: load
; TEMPORARILYDISABLED: lshr i32 {{.*}}, 8
; TEMPORARILYDISABLED-NOT: load
@@ -603,13 +603,13 @@ entry:
define i32 @load_load_partial_alias_cross_block(i8* %P) nounwind ssp {
entry:
%xx = bitcast i8* %P to i32*
- %x1 = load i32* %xx, align 4
+ %x1 = load i32, i32* %xx, align 4
%cmp = icmp eq i32 %x1, 127
br i1 %cmp, label %land.lhs.true, label %if.end
land.lhs.true: ; preds = %entry
%arrayidx4 = getelementptr inbounds i8, i8* %P, i64 1
- %tmp5 = load i8* %arrayidx4, align 1
+ %tmp5 = load i8, i8* %arrayidx4, align 1
%conv6 = zext i8 %tmp5 to i32
ret i32 %conv6
@@ -632,39 +632,39 @@ if.end:
define i32 @test_widening1(i8* %P) nounwind ssp noredzone {
entry:
- %tmp = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
+ %tmp = load i8, i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
%conv = zext i8 %tmp to i32
- %tmp1 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
+ %tmp1 = load i8, i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
%conv2 = zext i8 %tmp1 to i32
%add = add nsw i32 %conv, %conv2
ret i32 %add
; CHECK-LABEL: @test_widening1(
; CHECK-NOT: load
-; CHECK: load i16*
+; CHECK: load i16, i16*
; CHECK-NOT: load
; CHECK: ret i32
}
define i32 @test_widening2() nounwind ssp noredzone {
entry:
- %tmp = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
+ %tmp = load i8, i8* getelementptr inbounds (%widening1* @f, i64 0, i32 1), align 4
%conv = zext i8 %tmp to i32
- %tmp1 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
+ %tmp1 = load i8, i8* getelementptr inbounds (%widening1* @f, i64 0, i32 2), align 1
%conv2 = zext i8 %tmp1 to i32
%add = add nsw i32 %conv, %conv2
- %tmp2 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 3), align 2
+ %tmp2 = load i8, i8* getelementptr inbounds (%widening1* @f, i64 0, i32 3), align 2
%conv3 = zext i8 %tmp2 to i32
%add2 = add nsw i32 %add, %conv3
- %tmp3 = load i8* getelementptr inbounds (%widening1* @f, i64 0, i32 4), align 1
+ %tmp3 = load i8, i8* getelementptr inbounds (%widening1* @f, i64 0, i32 4), align 1
%conv4 = zext i8 %tmp3 to i32
%add3 = add nsw i32 %add2, %conv3
ret i32 %add3
; CHECK-LABEL: @test_widening2(
; CHECK-NOT: load
-; CHECK: load i32*
+; CHECK: load i32, i32*
; CHECK-NOT: load
; CHECK: ret i32
}
@@ -693,7 +693,7 @@ define void @test_escape1() nounwind {
%x = alloca i8**, align 8
store i8** getelementptr inbounds ([5 x i8*]* @_ZTV1X, i64 0, i64 2), i8*** %x, align 8
call void @use() nounwind
- %DEAD = load i8*** %x, align 8
+ %DEAD = load i8**, i8*** %x, align 8
call void @use3(i8*** %x, i8** %DEAD) nounwind
ret void
; CHECK: test_escape1
diff --git a/llvm/test/Transforms/GVN/tbaa.ll b/llvm/test/Transforms/GVN/tbaa.ll
index 71fbed419e9..b5a717b0984 100644
--- a/llvm/test/Transforms/GVN/tbaa.ll
+++ b/llvm/test/Transforms/GVN/tbaa.ll
@@ -81,9 +81,9 @@ define i32 @test8(i32* %p, i32* %q) {
; Since we know the location is invariant, we can forward the
; load across the potentially aliasing store.
- %a = load i32* %q, !tbaa !10
+ %a = load i32, i32* %q, !tbaa !10
store i32 15, i32* %p
- %b = load i32* %q, !tbaa !10
+ %b = load i32, i32* %q, !tbaa !10
%c = sub i32 %a, %b
ret i32 %c
}
@@ -94,9 +94,9 @@ define i32 @test9(i32* %p, i32* %q) {
; Since we know the location is invariant, we can forward the
; load across the potentially aliasing store (within the call).
- %a = load i32* %q, !tbaa !10
+ %a = load i32, i32* %q, !tbaa !10
call void @clobber()
- %b = load i32* %q, !tbaa !10
+ %b = load i32, i32* %q, !tbaa !10
%c = sub i32 %a, %b
ret i32 %c
}
diff --git a/llvm/test/Transforms/GVN/volatile.ll b/llvm/test/Transforms/GVN/volatile.ll
index 5ba03d9a125..b31058db4ea 100644
--- a/llvm/test/Transforms/GVN/volatile.ll
+++ b/llvm/test/Transforms/GVN/volatile.ll
@@ -6,12 +6,12 @@
; for dependencies of a non-volatile load
define i32 @test1(i32* nocapture %p, i32* nocapture %q) {
; CHECK-LABEL: test1
-; CHECK: %0 = load volatile i32* %q
+; CHECK: %0 = load volatile i32, i32* %q
; CHECK-NEXT: ret i32 0
entry:
- %x = load i32* %p
- load volatile i32* %q
- %y = load i32* %p
+ %x = load i32, i32* %p
+ load volatile i32, i32* %q
+ %y = load i32, i32* %p
%add = sub i32 %y, %x
ret i32 %add
}
@@ -20,12 +20,12 @@ entry:
; volatile, this would be (in effect) removing the volatile load
define i32 @test2(i32* nocapture %p, i32* nocapture %q) {
; CHECK-LABEL: test2
-; CHECK: %x = load i32* %p
-; CHECK-NEXT: %y = load volatile i32* %p
+; CHECK: %x = load i32, i32* %p
+; CHECK-NEXT: %y = load volatile i32, i32* %p
; CHECK-NEXT: %add = sub i32 %y, %x
entry:
- %x = load i32* %p
- %y = load volatile i32* %p
+ %x = load i32, i32* %p
+ %y = load volatile i32, i32* %p
%add = sub i32 %y, %x
ret i32 %add
}
@@ -34,13 +34,13 @@ entry:
; reorder it even if p and q are noalias
define i32 @test3(i32* noalias nocapture %p, i32* noalias nocapture %q) {
; CHECK-LABEL: test3
-; CHECK: %x = load i32* %p
-; CHECK-NEXT: %0 = load volatile i32* %q
-; CHECK-NEXT: %y = load volatile i32* %p
+; CHECK: %x = load i32, i32* %p
+; CHECK-NEXT: %0 = load volatile i32, i32* %q
+; CHECK-NEXT: %y = load volatile i32, i32* %p
entry:
- %x = load i32* %p
- load volatile i32* %q
- %y = load volatile i32* %p
+ %x = load i32, i32* %p
+ load volatile i32, i32* %q
+ %y = load volatile i32, i32* %p
%add = sub i32 %y, %x
ret i32 %add
}
@@ -50,13 +50,13 @@ entry:
; case, the ordering prevents forwarding.
define i32 @test4(i32* noalias nocapture %p, i32* noalias nocapture %q) {
; CHECK-LABEL: test4
-; CHECK: %x = load i32* %p
-; CHECK-NEXT: %0 = load atomic volatile i32* %q seq_cst
-; CHECK-NEXT: %y = load atomic i32* %p seq_cst
+; CHECK: %x = load i32, i32* %p
+; CHECK-NEXT: %0 = load atomic volatile i32, i32* %q seq_cst
+; CHECK-NEXT: %y = load atomic i32, i32* %p seq_cst
entry:
- %x = load i32* %p
- load atomic volatile i32* %q seq_cst, align 4
- %y = load atomic i32* %p seq_cst, align 4
+ %x = load i32, i32* %p
+ load atomic volatile i32, i32* %q seq_cst, align 4
+ %y = load atomic i32, i32* %p seq_cst, align 4
%add = sub i32 %y, %x
ret i32 %add
}
@@ -64,11 +64,11 @@ entry:
; Value forwarding from a volatile load is perfectly legal
define i32 @test5(i32* nocapture %p, i32* nocapture %q) {
; CHECK-LABEL: test5
-; CHECK: %x = load volatile i32* %p
+; CHECK: %x = load volatile i32, i32* %p
; CHECK-NEXT: ret i32 0
entry:
- %x = load volatile i32* %p
- %y = load i32* %p
+ %x = load volatile i32, i32* %p
+ %y = load i32, i32* %p
%add = sub i32 %y, %x
ret i32 %add
}
@@ -76,17 +76,17 @@ entry:
; Does cross block redundancy elimination work with volatiles?
define i32 @test6(i32* noalias nocapture %p, i32* noalias nocapture %q) {
; CHECK-LABEL: test6
-; CHECK: %y1 = load i32* %p
+; CHECK: %y1 = load i32, i32* %p
; CHECK-LABEL: header
-; CHECK: %x = load volatile i32* %q
+; CHECK: %x = load volatile i32, i32* %q
; CHECK-NEXT: %add = sub i32 %y1, %x
entry:
- %y1 = load i32* %p
+ %y1 = load i32, i32* %p
call void @use(i32 %y1)
br label %header
header:
- %x = load volatile i32* %q
- %y = load i32* %p
+ %x = load volatile i32, i32* %q
+ %y = load i32, i32* %p
%add = sub i32 %y, %x
%cnd = icmp eq i32 %add, 0
br i1 %cnd, label %exit, label %header
@@ -98,22 +98,22 @@ exit:
define i32 @test7(i1 %c, i32* noalias nocapture %p, i32* noalias nocapture %q) {
; CHECK-LABEL: test7
; CHECK-LABEL: entry.header_crit_edge:
-; CHECK: %y.pre = load i32* %p
+; CHECK: %y.pre = load i32, i32* %p
; CHECK-LABEL: skip:
-; CHECK: %y1 = load i32* %p
+; CHECK: %y1 = load i32, i32* %p
; CHECK-LABEL: header:
; CHECK: %y = phi i32
-; CHECK-NEXT: %x = load volatile i32* %q
+; CHECK-NEXT: %x = load volatile i32, i32* %q
; CHECK-NEXT: %add = sub i32 %y, %x
entry:
br i1 %c, label %header, label %skip
skip:
- %y1 = load i32* %p
+ %y1 = load i32, i32* %p
call void @use(i32 %y1)
br label %header
header:
- %x = load volatile i32* %q
- %y = load i32* %p
+ %x = load volatile i32, i32* %q
+ %y = load i32, i32* %p
%add = sub i32 %y, %x
%cnd = icmp eq i32 %add, 0
br i1 %cnd, label %exit, label %header
@@ -126,20 +126,20 @@ exit:
define i32 @test8(i1 %b, i1 %c, i32* noalias %p, i32* noalias %q) {
; CHECK-LABEL: test8
; CHECK-LABEL: entry
-; CHECK: %y1 = load i32* %p
+; CHECK: %y1 = load i32, i32* %p
; CHECK-LABEL: header:
; CHECK: %y = phi i32
-; CHECK-NEXT: %x = load volatile i32* %q
+; CHECK-NEXT: %x = load volatile i32, i32* %q
; CHECK-NOT: load
; CHECK-LABEL: skip.header_crit_edge:
-; CHECK: %y.pre = load i32* %p
+; CHECK: %y.pre = load i32, i32* %p
entry:
- %y1 = load i32* %p
+ %y1 = load i32, i32* %p
call void @use(i32 %y1)
br label %header
header:
- %x = load volatile i32* %q
- %y = load i32* %p
+ %x = load volatile i32, i32* %q
+ %y = load i32, i32* %p
call void @use(i32 %y)
br i1 %b, label %skip, label %header
skip:
diff --git a/llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll b/llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll
index 766c227460e..49a87d90230 100644
--- a/llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll
+++ b/llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll
@@ -11,7 +11,7 @@
;; dies when %b dies.
define internal i32 @foo() {
- %ret = load i32* @foo.upgrd.1 ; <i32> [#uses=1]
+ %ret = load i32, i32* @foo.upgrd.1 ; <i32> [#uses=1]
ret i32 %ret
}
diff --git a/llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll b/llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll
index 6221fa3a62f..afa2629f8f6 100644
--- a/llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll
+++ b/llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll
@@ -5,7 +5,7 @@
@bar = internal global [2 x { i32*, i32 }] [ { i32*, i32 } { i32* @foo.upgrd.1, i32 7 }, { i32*, i32 } { i32* @foo.upgrd.1, i32 1 } ] ; <[2 x { i32*, i32 }]*> [#uses=0]
define internal i32 @foo() {
- %ret = load i32* @foo.upgrd.1 ; <i32> [#uses=1]
+ %ret = load i32, i32* @foo.upgrd.1 ; <i32> [#uses=1]
ret i32 %ret
}
diff --git a/llvm/test/Transforms/GlobalDCE/complex-constantexpr.ll b/llvm/test/Transforms/GlobalDCE/complex-constantexpr.ll
index 4bf1aeee709..b67d0b6cab0 100644
--- a/llvm/test/Transforms/GlobalDCE/complex-constantexpr.ll
+++ b/llvm/test/Transforms/GlobalDCE/complex-constantexpr.ll
@@ -19,7 +19,7 @@ bb:
br label %bb2
bb1: ; preds = %bb11
- %tmp = load i32* @global5, align 4
+ %tmp = load i32, i32* @global5, align 4
br label %bb2
bb2: ; preds = %bb1, %bb
@@ -34,12 +34,12 @@ bb6: ; preds = %bb2
br label %bb8
bb8: ; preds = %bb6, %bb2
- %tmp9 = load i32* @global7, align 4
+ %tmp9 = load i32, i32* @global7, align 4
%tmp10 = icmp eq i32 %tmp9, 0
br i1 %tmp10, label %bb11, label %bb15
bb11: ; preds = %bb8
- %tmp12 = load i32* @global6, align 4
+ %tmp12 = load i32, i32* @global6, align 4
%tmp13 = add nsw i32 %tmp12, 1
store i32 %tmp13, i32* @global6, align 4
%tmp14 = icmp slt i32 %tmp13, 42
@@ -71,7 +71,7 @@ define void @blam() {
bb:
store i32 ptrtoint (i32* @global to i32), i32* getelementptr inbounds (%struct.ham* @global8, i64 0, i32 0), align 4
store i32 0, i32* @global9, align 4
- %tmp = load i32* getelementptr inbounds (%struct.ham* @global8, i64 0, i32 0), align 4
+ %tmp = load i32, i32* getelementptr inbounds (%struct.ham* @global8, i64 0, i32 0), align 4
br label %bb1
bb1: ; preds = %bb1, %bb
diff --git a/llvm/test/Transforms/GlobalDCE/global_ctors_integration.ll b/llvm/test/Transforms/GlobalDCE/global_ctors_integration.ll
index 5e6cc79f103..f7f702a980d 100644
--- a/llvm/test/Transforms/GlobalDCE/global_ctors_integration.ll
+++ b/llvm/test/Transforms/GlobalDCE/global_ctors_integration.ll
@@ -12,7 +12,7 @@
@llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
define internal void @__cxx_global_var_init() section "__TEXT,__StaticInit,regular,pure_instructions" {
- %1 = load i32* @_ZN3Bar18LINKER_INITIALIZEDE, align 4
+ %1 = load i32, i32* @_ZN3Bar18LINKER_INITIALIZEDE, align 4
call void @_ZN3FooC1E17LinkerInitialized(%class.Foo* @foo, i32 %1)
ret void
}
@@ -23,8 +23,8 @@ define linkonce_odr void @_ZN3FooC1E17LinkerInitialized(%class.Foo* %this, i32)
%3 = alloca i32, align 4
store %class.Foo* %this, %class.Foo** %2, align 8
store i32 %0, i32* %3, align 4
- %4 = load %class.Foo** %2
- %5 = load i32* %3, align 4
+ %4 = load %class.Foo*, %class.Foo** %2
+ %5 = load i32, i32* %3, align 4
call void @_ZN3FooC2E17LinkerInitialized(%class.Foo* %4, i32 %5)
ret void
}
@@ -35,7 +35,7 @@ define linkonce_odr void @_ZN3FooC2E17LinkerInitialized(%class.Foo* %this, i32)
%3 = alloca i32, align 4
store %class.Foo* %this, %class.Foo** %2, align 8
store i32 %0, i32* %3, align 4
- %4 = load %class.Foo** %2
+ %4 = load %class.Foo*, %class.Foo** %2
ret void
}
diff --git a/llvm/test/Transforms/GlobalDCE/indirectbr.ll b/llvm/test/Transforms/GlobalDCE/indirectbr.ll
index 048dd7b35a2..5671aea943f 100644
--- a/llvm/test/Transforms/GlobalDCE/indirectbr.ll
+++ b/llvm/test/Transforms/GlobalDCE/indirectbr.ll
@@ -10,7 +10,7 @@ entry:
L1:
%arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @L, i32 0, i32 %idx
- %l = load i8** %arrayidx
+ %l = load i8*, i8** %arrayidx
indirectbr i8* %l, [label %L1, label %L2]
L2:
diff --git a/llvm/test/Transforms/GlobalOpt/2004-10-10-CastStoreOnce.ll b/llvm/test/Transforms/GlobalOpt/2004-10-10-CastStoreOnce.ll
index bdcf1fa4778..061b9b0670f 100644
--- a/llvm/test/Transforms/GlobalOpt/2004-10-10-CastStoreOnce.ll
+++ b/llvm/test/Transforms/GlobalOpt/2004-10-10-CastStoreOnce.ll
@@ -4,8 +4,8 @@
@G = internal global i32* null ; <i32**> [#uses=2]
define i32 @user() {
- %P = load i32** @G ; <i32*> [#uses=1]
- %Q = load i32* %P ; <i32> [#uses=1]
+ %P = load i32*, i32** @G ; <i32*> [#uses=1]
+ %Q = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %Q
}
diff --git a/llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll b/llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll
index 7bcb1d43091..f4f361bf76d 100644
--- a/llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll
+++ b/llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll
@@ -4,7 +4,7 @@
@g_40507551 = internal global i16 31038 ; <i16*> [#uses=1]
define void @main() {
- %tmp.4.i.1 = load i8* getelementptr (i8* bitcast (i16* @g_40507551 to i8*), i32 1) ; <i8> [#uses=0]
+ %tmp.4.i.1 = load i8, i8* getelementptr (i8* bitcast (i16* @g_40507551 to i8*), i32 1) ; <i8> [#uses=0]
ret void
}
diff --git a/llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll b/llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll
index c9712198ce7..419ae101966 100644
--- a/llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll
+++ b/llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll
@@ -106,12 +106,12 @@ entry:
%__priority_addr = alloca i32 ; <i32*> [#uses=2]
store i32 %__initialize_p, i32* %__initialize_p_addr
store i32 %__priority, i32* %__priority_addr
- %tmp = load i32* %__priority_addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %__priority_addr ; <i32> [#uses=1]
%tmp.upgrd.1 = icmp eq i32 %tmp, 65535 ; <i1> [#uses=1]
br i1 %tmp.upgrd.1, label %cond_true, label %cond_next14
cond_true: ; preds = %entry
- %tmp8 = load i32* %__initialize_p_addr ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %__initialize_p_addr ; <i32> [#uses=1]
%tmp9 = icmp eq i32 %tmp8, 1 ; <i1> [#uses=1]
br i1 %tmp9, label %cond_true10, label %cond_next14
diff --git a/llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll b/llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll
index 352639ac067..7b62cf08a4b 100644
--- a/llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll
+++ b/llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll
@@ -10,7 +10,7 @@ entry:
cond_true16.i: ; preds = %cond_true16.i, %entry
%low.0.in.i.0 = phi i32* [ @nrow, %entry ], [ null, %cond_true16.i ] ; <i32*> [#uses=1]
- %low.0.i = load i32* %low.0.in.i.0 ; <i32> [#uses=0]
+ %low.0.i = load i32, i32* %low.0.in.i.0 ; <i32> [#uses=0]
br label %cond_true16.i
}
diff --git a/llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll b/llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll
index c7aca62895f..f312fbbc9f9 100644
--- a/llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll
+++ b/llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll
@@ -9,7 +9,7 @@ target triple = "thumb-apple-darwin8"
define zeroext i16 @__NSCharToUnicharCFWrapper(i8 zeroext %ch) {
entry:
%iftmp.0.0.in.in = select i1 false, i16* @replacementUnichar, i16* null ; <i16*> [#uses=1]
- %iftmp.0.0.in = load i16* %iftmp.0.0.in.in ; <i16> [#uses=1]
+ %iftmp.0.0.in = load i16, i16* %iftmp.0.0.in.in ; <i16> [#uses=1]
ret i16 %iftmp.0.0.in
}
diff --git a/llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll b/llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll
index 2b7e7379b26..55863556246 100644
--- a/llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll
+++ b/llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll
@@ -19,13 +19,13 @@ internal constant %struct.__builtin_CFString {
define %struct.__CFString* @_Z19SFLGetVisibilityKeyv() {
entry:
- %tmp1 = load %struct.__CFString** @_ZZ19SFLGetVisibilityKeyvE19_kSFLLVisibilityKey ; <%struct.__CFString*> [#uses=1]
+ %tmp1 = load %struct.__CFString*, %struct.__CFString** @_ZZ19SFLGetVisibilityKeyvE19_kSFLLVisibilityKey ; <%struct.__CFString*> [#uses=1]
ret %struct.__CFString* %tmp1
}
define %struct.__CFString* @_Z22SFLGetAlwaysVisibleKeyv() {
entry:
- %tmp1 = load %struct.__CFString** @_ZZ22SFLGetAlwaysVisibleKeyvE22_kSFLLAlwaysVisibleKey ; <%struct.__CFString*> [#uses=1]
+ %tmp1 = load %struct.__CFString*, %struct.__CFString** @_ZZ22SFLGetAlwaysVisibleKeyvE22_kSFLLAlwaysVisibleKey ; <%struct.__CFString*> [#uses=1]
%tmp2 = icmp eq %struct.__CFString* %tmp1, null ; <i1> [#uses=1]
br i1 %tmp2, label %cond_true, label %cond_next
@@ -34,25 +34,25 @@ cond_true: ; preds = %entry
br label %cond_next
cond_next: ; preds = %entry, %cond_true
- %tmp4 = load %struct.__CFString** @_ZZ22SFLGetAlwaysVisibleKeyvE22_kSFLLAlwaysVisibleKey ; <%struct.__CFString*> [#uses=1]
+ %tmp4 = load %struct.__CFString*, %struct.__CFString** @_ZZ22SFLGetAlwaysVisibleKeyvE22_kSFLLAlwaysVisibleKey ; <%struct.__CFString*> [#uses=1]
ret %struct.__CFString* %tmp4
}
define %struct.__CFString* @_Z21SFLGetNeverVisibleKeyv() {
entry:
- %tmp1 = load %struct.__CFString** @_ZZ21SFLGetNeverVisibleKeyvE21_kSFLLNeverVisibleKey ; <%struct.__CFString*> [#uses=1]
+ %tmp1 = load %struct.__CFString*, %struct.__CFString** @_ZZ21SFLGetNeverVisibleKeyvE21_kSFLLNeverVisibleKey ; <%struct.__CFString*> [#uses=1]
ret %struct.__CFString* %tmp1
}
define %struct.__CFDictionary* @_ZN18SFLMutableListItem18GetPrefsDictionaryEv(%struct.SFLMutableListItem* %this) {
entry:
%tmp4 = getelementptr %struct.SFLMutableListItem, %struct.SFLMutableListItem* %this, i32 0, i32 0 ; <i16*> [#uses=1]
- %tmp5 = load i16* %tmp4 ; <i16> [#uses=1]
+ %tmp5 = load i16, i16* %tmp4 ; <i16> [#uses=1]
%tmp6 = icmp eq i16 %tmp5, 0 ; <i1> [#uses=1]
br i1 %tmp6, label %cond_next22, label %cond_true
cond_true: ; preds = %entry
- %tmp9 = load %struct.__CFString** @_ZZ22SFLGetAlwaysVisibleKeyvE22_kSFLLAlwaysVisibleKey ; <%struct.__CFString*> [#uses=1]
+ %tmp9 = load %struct.__CFString*, %struct.__CFString** @_ZZ22SFLGetAlwaysVisibleKeyvE22_kSFLLAlwaysVisibleKey ; <%struct.__CFString*> [#uses=1]
%tmp10 = icmp eq %struct.__CFString* %tmp9, null ; <i1> [#uses=1]
br i1 %tmp10, label %cond_true13, label %cond_next22
@@ -62,8 +62,8 @@ cond_true13: ; preds = %cond_true
cond_next22: ; preds = %entry, %cond_true13, %cond_true
%iftmp.1.0.in = phi %struct.__CFString** [ @_ZZ22SFLGetAlwaysVisibleKeyvE22_kSFLLAlwaysVisibleKey, %cond_true ], [ @_ZZ22SFLGetAlwaysVisibleKeyvE22_kSFLLAlwaysVisibleKey, %cond_true13 ], [ @_ZZ21SFLGetNeverVisibleKeyvE21_kSFLLNeverVisibleKey, %entry ] ; <%struct.__CFString**> [#uses=1]
- %iftmp.1.0 = load %struct.__CFString** %iftmp.1.0.in ; <%struct.__CFString*> [#uses=1]
- %tmp24 = load %struct.__CFString** @_ZZ19SFLGetVisibilityKeyvE19_kSFLLVisibilityKey ; <%struct.__CFString*> [#uses=1]
+ %iftmp.1.0 = load %struct.__CFString*, %struct.__CFString** %iftmp.1.0.in ; <%struct.__CFString*> [#uses=1]
+ %tmp24 = load %struct.__CFString*, %struct.__CFString** @_ZZ19SFLGetVisibilityKeyvE19_kSFLLVisibilityKey ; <%struct.__CFString*> [#uses=1]
%tmp2728 = bitcast %struct.__CFString* %tmp24 to i8* ; <i8*> [#uses=1]
%tmp2930 = bitcast %struct.__CFString* %iftmp.1.0 to i8* ; <i8*> [#uses=1]
call void @_Z20CFDictionaryAddValuePKvS0_( i8* %tmp2728, i8* %tmp2930 )
diff --git a/llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll b/llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll
index ede505b907e..45ab055a04c 100644
--- a/llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll
+++ b/llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll
@@ -10,7 +10,7 @@ target triple = "powerpc-apple-darwin8"
define i8 @func() {
entry:
%tmp10 = getelementptr [2 x i32], [2 x i32]* getelementptr ([6 x [2 x i32]]* @aaui1, i32 0, i32 0), i32 5, i32 1 ; <i32*> [#uses=1]
- %tmp11 = load i32* %tmp10, align 4 ; <i32> [#uses=1]
+ %tmp11 = load i32, i32* %tmp10, align 4 ; <i32> [#uses=1]
%tmp12 = call i32 (...)* @func3( i32* null, i32 0, i32 %tmp11 ) ; <i32> [#uses=0]
ret i8 undef
}
diff --git a/llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll b/llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll
index 4105ab1ed5b..dc41fdb8de4 100644
--- a/llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll
+++ b/llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll
@@ -20,7 +20,7 @@ stack_restore: ; preds = %cond_next20.i
ret i32 0
cond_next21.i.i23.i: ; preds = %cond_next20.i
- %tmp6.i4.i = load i32* bitcast (void (i32)** @indirect1 to i32*), align 4 ; <i32> [#uses=0]
+ %tmp6.i4.i = load i32, i32* bitcast (void (i32)** @indirect1 to i32*), align 4 ; <i32> [#uses=0]
ret i32 0
}
diff --git a/llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll b/llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll
index ec246ac2698..8df7050b38a 100644
--- a/llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll
+++ b/llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll
@@ -11,6 +11,6 @@ define void @test(i32 %X) {
define double @get(i32 %X) {
%P = getelementptr [16 x [31 x double]], [16 x [31 x double]]* @mm, i32 0, i32 0, i32 %X
- %V = load double* %P
+ %V = load double, double* %P
ret double %V
}
diff --git a/llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll b/llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
index 08b2cb1eb6f..ac663604854 100644
--- a/llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
+++ b/llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
@@ -3,7 +3,7 @@
define double @foo() nounwind {
entry:
- %tmp1 = load volatile double* @t0.1441, align 8 ; <double> [#uses=2]
+ %tmp1 = load volatile double, double* @t0.1441, align 8 ; <double> [#uses=2]
%tmp4 = fmul double %tmp1, %tmp1 ; <double> [#uses=1]
ret double %tmp4
}
diff --git a/llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll b/llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll
index d58becd5307..2d40877f9cf 100644
--- a/llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll
+++ b/llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll
@@ -23,9 +23,9 @@ define void @test() {
}
define double @test2() {
- %V1 = load double* getelementptr (%T* @G, i32 0, i32 0), align 16
- %V2 = load double* getelementptr (%T* @G, i32 0, i32 1), align 8
- %V3 = load double* getelementptr (%T* @G, i32 0, i32 2), align 16
+ %V1 = load double, double* getelementptr (%T* @G, i32 0, i32 0), align 16
+ %V2 = load double, double* getelementptr (%T* @G, i32 0, i32 1), align 8
+ %V3 = load double, double* getelementptr (%T* @G, i32 0, i32 2), align 16
%R = fadd double %V1, %V2
%R2 = fadd double %R, %V3
ret double %R2
diff --git a/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll b/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll
index 0867ca9c543..da6e6606179 100644
--- a/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll
+++ b/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll
@@ -10,8 +10,8 @@
@array = internal addrspace(1) global [ 2 x i32 ] zeroinitializer
define i32 @foo() {
- %A = load i32 addrspace(1) * getelementptr ({ i32, i32 } addrspace(1) * @struct, i32 0, i32 0)
- %B = load i32 addrspace(1) * getelementptr ([ 2 x i32 ] addrspace(1) * @array, i32 0, i32 0)
+ %A = load i32, i32 addrspace(1) * getelementptr ({ i32, i32 } addrspace(1) * @struct, i32 0, i32 0)
+ %B = load i32, i32 addrspace(1) * getelementptr ([ 2 x i32 ] addrspace(1) * @array, i32 0, i32 0)
; Use the loaded values, so they won't get removed completely
%R = add i32 %A, %B
ret i32 %R
diff --git a/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll b/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll
index 6a8e2212825..628c108766e 100644
--- a/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll
+++ b/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll
@@ -18,7 +18,7 @@ declare noalias i8* @malloc(i32)
define i32 @baz() nounwind readonly noinline {
bb1.thread:
- %tmpLD1 = load %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=2]
+ %tmpLD1 = load %struct.foo*, %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=2]
br label %bb1
bb1: ; preds = %bb1, %bb1.thread
diff --git a/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll b/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll
index b6e9e979d17..8bcc2d3f948 100644
--- a/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll
+++ b/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll
@@ -17,7 +17,7 @@ declare noalias i8* @malloc(i32)
define i32 @baz() nounwind readonly noinline {
bb1.thread:
- %tmpLD1 = load %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=3]
+ %tmpLD1 = load %struct.foo*, %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=3]
store %struct.foo* %tmpLD1, %struct.foo** null
br label %bb1
diff --git a/llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll b/llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll
index c127b856e61..21ec5267b97 100644
--- a/llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll
+++ b/llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll
@@ -13,14 +13,14 @@ entry:
bb: ; preds = %bb1
%0 = getelementptr %struct.node, %struct.node* %t.0, i64 0, i32 1 ; <i32*> [#uses=1]
- %1 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
%2 = getelementptr %struct.node, %struct.node* %t.0, i64 0, i32 0 ; <%struct.node**> [#uses=1]
br label %bb1
bb1: ; preds = %bb, %entry
%value.0 = phi i32 [ undef, %entry ], [ %1, %bb ] ; <i32> [#uses=1]
%t.0.in = phi %struct.node** [ @head, %entry ], [ %2, %bb ] ; <%struct.node**> [#uses=1]
- %t.0 = load %struct.node** %t.0.in ; <%struct.node*> [#uses=3]
+ %t.0 = load %struct.node*, %struct.node** %t.0.in ; <%struct.node*> [#uses=3]
%3 = icmp eq %struct.node* %t.0, null ; <i1> [#uses=1]
br i1 %3, label %bb2, label %bb
diff --git a/llvm/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll b/llvm/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
index d6a565ad10a..a3e90045d64 100644
--- a/llvm/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
+++ b/llvm/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
@@ -5,6 +5,6 @@
@a = alias bitcast (i32* @g to i8*)
define void @f() {
- %tmp = load i8* @a
+ %tmp = load i8, i8* @a
ret void
}
diff --git a/llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll b/llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll
index 049eef10eb7..c82b83083ae 100644
--- a/llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll
+++ b/llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll
@@ -33,7 +33,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone
define i32 @bar() nounwind ssp {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load i32* @Stop, align 4, !dbg !13 ; <i32> [#uses=1]
+ %0 = load i32, i32* @Stop, align 4, !dbg !13 ; <i32> [#uses=1]
%1 = icmp eq i32 %0, 1, !dbg !13 ; <i1> [#uses=1]
br i1 %1, label %bb, label %bb1, !dbg !13
diff --git a/llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll b/llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll
index 059af1cfeac..469fa765c7a 100644
--- a/llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll
+++ b/llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll
@@ -12,6 +12,6 @@ entry:
define i32* @get() nounwind {
entry:
- %0 = load i32** @X, align 4 ; <i32*> [#uses=1]
+ %0 = load i32*, i32** @X, align 4 ; <i32*> [#uses=1]
ret i32* %0
}
diff --git a/llvm/test/Transforms/GlobalOpt/2009-06-01-RecursivePHI.ll b/llvm/test/Transforms/GlobalOpt/2009-06-01-RecursivePHI.ll
index d3c3ff59fea..30e4d422621 100644
--- a/llvm/test/Transforms/GlobalOpt/2009-06-01-RecursivePHI.ll
+++ b/llvm/test/Transforms/GlobalOpt/2009-06-01-RecursivePHI.ll
@@ -33,7 +33,7 @@ bb.i4.i: ; preds = %my_calloc.exit.i
unreachable
my_calloc.exit5.i: ; preds = %my_calloc.exit.i
- %.pre.i58 = load %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
+ %.pre.i58 = load %struct.s_net*, %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
br label %bb17.i78
bb1.i61: ; preds = %bb4.preheader.i, %bb1.i61
@@ -58,11 +58,11 @@ bb.i1.i68: ; preds = %bb8.i67
unreachable
my_malloc.exit.i70: ; preds = %bb8.i67
- %0 = load %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
+ %0 = load %struct.s_net*, %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
br i1 undef, label %bb9.i71, label %bb16.i77
bb9.i71: ; preds = %bb9.i71, %my_malloc.exit.i70
- %1 = load %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
+ %1 = load %struct.s_net*, %struct.s_net** @net, align 4 ; <%struct.s_net*> [#uses=1]
br i1 undef, label %bb9.i71, label %bb16.i77
bb16.i77: ; preds = %bb9.i71, %my_malloc.exit.i70, %bb15.preheader.i
diff --git a/llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll b/llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll
index 40862bd038e..7fad289ea0e 100644
--- a/llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll
+++ b/llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll
@@ -13,7 +13,7 @@ target triple = "x86_64-apple-darwin10.0"
; based on linit in office-ispell
define void @test() nounwind ssp {
- %1 = load i32* getelementptr inbounds (%struct.hashheader* @hashheader, i64 0, i32 13), align 8 ; <i32> [#uses=1]
+ %1 = load i32, i32* getelementptr inbounds (%struct.hashheader* @hashheader, i64 0, i32 13), align 8 ; <i32> [#uses=1]
%2 = sext i32 %1 to i64 ; <i64> [#uses=1]
%3 = mul i64 %2, ptrtoint (%struct.strchartype* getelementptr (%struct.strchartype* null, i64 1) to i64) ; <i64> [#uses=1]
%4 = tail call i8* @malloc(i64 %3) ; <i8*> [#uses=1]
diff --git a/llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll b/llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll
index 7c5e8e40b1d..25bb9769f49 100644
--- a/llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll
+++ b/llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll
@@ -14,14 +14,14 @@ target triple = "x86_64-apple-darwin10.0"
; Derived from @DescribeChannel() in yacr2
define void @test() nounwind ssp {
store i64 2335, i64* @channelColumns, align 8
- %1 = load i64* @channelColumns, align 8 ; <i64> [#uses=1]
+ %1 = load i64, i64* @channelColumns, align 8 ; <i64> [#uses=1]
%2 = shl i64 %1, 3 ; <i64> [#uses=1]
%3 = add i64 %2, 8 ; <i64> [#uses=1]
%4 = call noalias i8* @malloc(i64 %3) nounwind ; <i8*> [#uses=1]
; CHECK: call noalias i8* @malloc
%5 = bitcast i8* %4 to i64* ; <i64*> [#uses=1]
store i64* %5, i64** @TOP, align 8
- %6 = load i64** @TOP, align 8 ; <i64*> [#uses=1]
+ %6 = load i64*, i64** @TOP, align 8 ; <i64*> [#uses=1]
%7 = getelementptr inbounds i64, i64* %6, i64 13 ; <i64*> [#uses=1]
store i64 0, i64* %7, align 8
ret void
diff --git a/llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll b/llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll
index 629d57c8842..9f53ce402fd 100644
--- a/llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll
+++ b/llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll
@@ -12,7 +12,7 @@ bb918:
%malloccall.i10 = call i8* @malloc(i32 16) nounwind ; <i8*> [#uses=1]
%0 = bitcast i8* %malloccall.i10 to i32* ; <i32*> [#uses=1]
store i32* %0, i32** @fixLRBT, align 8
- %1 = load i32** @fixLRBT, align 8 ; <i32*> [#uses=0]
- %A = load i32* %1
+ %1 = load i32*, i32** @fixLRBT, align 8 ; <i32*> [#uses=0]
+ %A = load i32, i32* %1
ret i32 %A
}
diff --git a/llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll b/llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll
index ab7721fd972..12fa3419192 100644
--- a/llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll
+++ b/llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
define fastcc void @init_net() nounwind {
entry:
- %0 = load i32* @numf2s, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @numf2s, align 4 ; <i32> [#uses=1]
%mallocsize2 = shl i32 %0, 4 ; <i32> [#uses=1]
%malloccall3 = tail call i8* @malloc(i32 %mallocsize2) nounwind ; <i8*> [#uses=1]
%1 = bitcast i8* %malloccall3 to %struct.xyz* ; <%struct.xyz*> [#uses=1]
@@ -20,7 +20,7 @@ entry:
define fastcc void @load_train(i8* %trainfile, i32 %mode, i32 %objects) nounwind {
entry:
- %0 = load %struct.xyz** @Y, align 8 ; <%struct.xyz*> [#uses=0]
+ %0 = load %struct.xyz*, %struct.xyz** @Y, align 8 ; <%struct.xyz*> [#uses=0]
ret void
}
diff --git a/llvm/test/Transforms/GlobalOpt/array-elem-refs.ll b/llvm/test/Transforms/GlobalOpt/array-elem-refs.ll
index ec472b0e99f..eb9a5f41554 100644
--- a/llvm/test/Transforms/GlobalOpt/array-elem-refs.ll
+++ b/llvm/test/Transforms/GlobalOpt/array-elem-refs.ll
@@ -11,9 +11,9 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define signext i8 @foo() #0 {
entry:
- %0 = load i8*** @c, align 8
- %1 = load i8** %0, align 8
- %2 = load i8* %1, align 1
+ %0 = load i8**, i8*** @c, align 8
+ %1 = load i8*, i8** %0, align 8
+ %2 = load i8, i8* %1, align 1
ret i8 %2
; CHECK-LABEL: @foo
diff --git a/llvm/test/Transforms/GlobalOpt/atomic.ll b/llvm/test/Transforms/GlobalOpt/atomic.ll
index ac05bfd68d9..563c1fec7d2 100644
--- a/llvm/test/Transforms/GlobalOpt/atomic.ll
+++ b/llvm/test/Transforms/GlobalOpt/atomic.ll
@@ -8,7 +8,7 @@
define void @test1() {
entry:
- %0 = load atomic i8* bitcast (i64* @GV1 to i8*) acquire, align 8
+ %0 = load atomic i8, i8* bitcast (i64* @GV1 to i8*) acquire, align 8
ret void
}
@@ -20,6 +20,6 @@ entry:
}
define i32 @test2b() {
entry:
- %atomic-load = load atomic i32* @GV2 seq_cst, align 4
+ %atomic-load = load atomic i32, i32* @GV2 seq_cst, align 4
ret i32 %atomic-load
}
diff --git a/llvm/test/Transforms/GlobalOpt/basictest.ll b/llvm/test/Transforms/GlobalOpt/basictest.ll
index 4332d3dd38c..4c25e4f9f40 100644
--- a/llvm/test/Transforms/GlobalOpt/basictest.ll
+++ b/llvm/test/Transforms/GlobalOpt/basictest.ll
@@ -3,7 +3,7 @@
@X = internal global i32 4 ; <i32*> [#uses=1]
define i32 @foo() {
- %V = load i32* @X ; <i32> [#uses=1]
+ %V = load i32, i32* @X ; <i32> [#uses=1]
ret i32 %V
}
diff --git a/llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll b/llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll
index 871bfbfd7f2..2d41dfe5641 100644
--- a/llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll
+++ b/llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll
@@ -20,7 +20,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define internal void @test1() {
entry:
store i32 1, i32* getelementptr inbounds ([2 x i32]* @xs, i64 0, i64 0)
- %0 = load i32* getelementptr inbounds ([2 x i32]* @xs, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr inbounds ([2 x i32]* @xs, i32 0, i64 0), align 4
store i32 %0, i32* getelementptr inbounds ([2 x i32]* @xs, i64 0, i64 1)
ret void
}
@@ -37,7 +37,7 @@ entry:
define internal i32 @test2_helper(%closure* %this, i32 %b) {
entry:
%0 = getelementptr inbounds %closure, %closure* %this, i32 0, i32 0
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%add = add nsw i32 %1, %b
ret i32 %add
}
@@ -85,7 +85,7 @@ entry:
@test6_v2 = global i32 0, align 4
; CHECK: @test6_v2 = global i32 42, align 4
define internal void @test6() {
- %load = load { i32, i32 }* @test6_v1, align 8
+ %load = load { i32, i32 }, { i32, i32 }* @test6_v1, align 8
%xv0 = extractvalue { i32, i32 } %load, 0
%iv = insertvalue { i32, i32 } %load, i32 %xv0, 1
%xv1 = extractvalue { i32, i32 } %iv, 1
diff --git a/llvm/test/Transforms/GlobalOpt/crash-2.ll b/llvm/test/Transforms/GlobalOpt/crash-2.ll
index 684f6cee180..ed8b588602c 100644
--- a/llvm/test/Transforms/GlobalOpt/crash-2.ll
+++ b/llvm/test/Transforms/GlobalOpt/crash-2.ll
@@ -14,6 +14,6 @@
@g_271 = internal global [7 x [6 x [5 x i8*]]] [[6 x [5 x i8*]] [[5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* null], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_114, i8* @g_114, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* null, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* null, i8* null], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* null, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25, i8* @g_114, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)]], [6 x [5 x i8*]] [[5 x i8*] [i8* @g_25, i8* null, i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* @g_25, i8* @g_114, i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_114], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25], [5 x i8*] [i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_114, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* @g_25, i8* @g_25, i8* @g_25], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)]], [6 x [5 x i8*]] [[5 x i8*] [i8* null, i8* @g_25, i8* @g_25, i8* @g_25, i8* null], [5 x i8*] [i8* @g_25, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1)], [5 x i8*] [i8* null, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* null, i8* @g_25], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_114, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1)], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* null, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* null], [5 x i8*] [i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)]], [6 x [5 x i8*]] [[5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* null, i8* @g_25], [5 x i8*] [i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* @g_25, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* @g_25], [5 x i8*] [i8* @g_114, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_114], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* @g_114, i8* @g_25, i8* @g_25, i8* @g_114, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)]], [6 x [5 x i8*]] [[5 x i8*] [i8* @g_25, i8* null, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* @g_25], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1)], [5 x i8*] [i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1)], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_114], [5 x i8*] [i8* @g_25, i8* null, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* null], [5 x i8*] [i8* @g_114, i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_114, i8* @g_25]], [6 x [5 x i8*]] [[5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* null, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* null, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1)], [5 x i8*] [i8* @g_114, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0)], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25]], [6 x [5 x i8*]] [[5 x i8*] [i8* @g_25, i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* null], [5 x i8*] [i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_114, i8* @g_25, i8* @g_25, i8* @g_114], [5 x i8*] [i8* null, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_25, i8* null, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1)], [5 x i8*] [i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_114, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* @g_114, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1)], [5 x i8*] [i8* @g_25, i8* @g_25, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25], [5 x i8*] [i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25, i8* @g_25, i8* getelementptr (i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), i64 1), i8* @g_25]]], align 4
define i32 @func() {
- %tmp = load i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), align 1
+ %tmp = load i8, i8* getelementptr inbounds (%struct.S0.1.7.13* @g_71, i32 0, i32 0), align 1
ret i32 0
}
diff --git a/llvm/test/Transforms/GlobalOpt/crash.ll b/llvm/test/Transforms/GlobalOpt/crash.ll
index 0bef820dd2f..c1458a5323e 100644
--- a/llvm/test/Transforms/GlobalOpt/crash.ll
+++ b/llvm/test/Transforms/GlobalOpt/crash.ll
@@ -35,7 +35,7 @@ bb.nph.i:
unreachable
xx:
- %E = load %T** @switch_inf, align 8
+ %E = load %T*, %T** @switch_inf, align 8
unreachable
}
@@ -46,7 +46,7 @@ declare noalias i8* @malloc(i64) nounwind
@permute_bitrev.bitrev = internal global i32* null, align 8
define void @permute_bitrev() nounwind {
entry:
- %tmp = load i32** @permute_bitrev.bitrev, align 8
+ %tmp = load i32*, i32** @permute_bitrev.bitrev, align 8
%conv = sext i32 0 to i64
%mul = mul i64 %conv, 4
%call = call i8* @malloc(i64 %mul)
@@ -72,7 +72,7 @@ declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
define void @icmp_user_of_stored_once() nounwind ssp {
entry:
- %tmp4 = load i32*** @g_52, align 8
+ %tmp4 = load i32**, i32*** @g_52, align 8
store i32** @g_90, i32*** @g_52
%cmp17 = icmp ne i32*** undef, @g_52
ret void
diff --git a/llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll b/llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll
index 9b11985693a..b0db2ceee6c 100644
--- a/llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll
+++ b/llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll
@@ -15,9 +15,9 @@
define internal void @CTOR() {
store i32 1, i32* getelementptr ([6 x [5 x i32]]* @G, i64 0, i64 0, i64 0)
store i32 2, i32* getelementptr inbounds ([6 x [5 x i32]]* @G, i64 0, i64 0, i64 0)
- %t = load i32* getelementptr ([6 x [5 x i32]]* @G, i64 0, i64 0, i64 0)
+ %t = load i32, i32* getelementptr ([6 x [5 x i32]]* @G, i64 0, i64 0, i64 0)
store i32 %t, i32* @H
- %s = load i32* getelementptr inbounds ([6 x [5 x i32]]* @G, i64 0, i64 0, i64 0)
+ %s = load i32, i32* getelementptr inbounds ([6 x [5 x i32]]* @G, i64 0, i64 0, i64 0)
store i32 %s, i32* @I
ret void
}
diff --git a/llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll b/llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll
index f0414727967..efeabf36746 100644
--- a/llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll
+++ b/llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll
@@ -51,7 +51,7 @@ T: ; preds = %Cont
}
define internal void @CTOR4() {
- %X = load i32* @G3 ; <i32> [#uses=1]
+ %X = load i32, i32* @G3 ; <i32> [#uses=1]
%Y = add i32 %X, 123 ; <i32> [#uses=1]
store i32 %Y, i32* @G3
ret void
@@ -59,7 +59,7 @@ define internal void @CTOR4() {
define internal void @CTOR5() {
%X.2p = getelementptr inbounds { i32, [2 x i32] }, { i32, [2 x i32] }* @X, i32 0, i32 1, i32 0 ; <i32*> [#uses=2]
- %X.2 = load i32* %X.2p ; <i32> [#uses=1]
+ %X.2 = load i32, i32* %X.2p ; <i32> [#uses=1]
%X.1p = getelementptr inbounds { i32, [2 x i32] }, { i32, [2 x i32] }* @X, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 %X.2, i32* %X.1p
store i32 42, i32* %X.2p
@@ -68,9 +68,9 @@ define internal void @CTOR5() {
define internal void @CTOR6() {
%A = alloca i32 ; <i32*> [#uses=2]
- %y = load i32* @Y ; <i32> [#uses=1]
+ %y = load i32, i32* @Y ; <i32> [#uses=1]
store i32 %y, i32* %A
- %Av = load i32* %A ; <i32> [#uses=1]
+ %Av = load i32, i32* %A ; <i32> [#uses=1]
%Av1 = add i32 %Av, 1 ; <i32> [#uses=1]
store i32 %Av1, i32* @Y
ret void
@@ -95,7 +95,7 @@ define internal void @CTOR8() {
}
define i1 @accessor() {
- %V = load i1* @CTORGV ; <i1> [#uses=1]
+ %V = load i1, i1* @CTORGV ; <i1> [#uses=1]
ret i1 %V
}
diff --git a/llvm/test/Transforms/GlobalOpt/deadfunction.ll b/llvm/test/Transforms/GlobalOpt/deadfunction.ll
index f9a0e925cef..5771c4ccfb7 100644
--- a/llvm/test/Transforms/GlobalOpt/deadfunction.ll
+++ b/llvm/test/Transforms/GlobalOpt/deadfunction.ll
@@ -11,7 +11,7 @@ define internal void @test(i32 %n) nounwind noinline {
entry:
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @test.x, i64 0, i64 %idxprom
- %0 = load i8** %arrayidx, align 8
+ %0 = load i8*, i8** %arrayidx, align 8
indirectbr i8* %0, [label %a, label %b, label %c]
a:
diff --git a/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll b/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll
index 675211b64b4..d4bf8032cda 100644
--- a/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll
+++ b/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll
@@ -13,7 +13,7 @@
@llvm.used = appending global [2 x i8*] [i8* getelementptr inbounds ([7 x i8]* @"\01L_OBJC_METH_VAR_NAME_40", i32 0, i32 0), i8* bitcast (i8** @"\01L_OBJC_SELECTOR_REFERENCES_41" to i8*)]
define internal void @__cxx_global_var_init() section "__TEXT,__StaticInit,regular,pure_instructions" {
- %1 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_41", !invariant.load !2009
+ %1 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_41", !invariant.load !2009
store i8* %1, i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
ret void
}
@@ -26,8 +26,8 @@ define internal void @_GLOBAL__I_a() section "__TEXT,__StaticInit,regular,pure_i
declare void @test(i8*)
define void @print() {
-; CHECK: %1 = load i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
- %1 = load i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
+; CHECK: %1 = load i8*, i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
+ %1 = load i8*, i8** getelementptr inbounds ([1 x %struct.ButtonInitData]* @_ZL14buttonInitData, i32 0, i32 0, i32 0), align 4
call void @test(i8* %1)
ret void
}
diff --git a/llvm/test/Transforms/GlobalOpt/fastcc.ll b/llvm/test/Transforms/GlobalOpt/fastcc.ll
index 76122b203d7..26398d2bb90 100644
--- a/llvm/test/Transforms/GlobalOpt/fastcc.ll
+++ b/llvm/test/Transforms/GlobalOpt/fastcc.ll
@@ -2,13 +2,13 @@
define internal i32 @f(i32* %m) {
; CHECK-LABEL: define internal fastcc i32 @f
- %v = load i32* %m
+ %v = load i32, i32* %m
ret i32 %v
}
define internal x86_thiscallcc i32 @g(i32* %m) {
; CHECK-LABEL: define internal fastcc i32 @g
- %v = load i32* %m
+ %v = load i32, i32* %m
ret i32 %v
}
@@ -16,13 +16,13 @@ define internal x86_thiscallcc i32 @g(i32* %m) {
; convention.
define internal coldcc i32 @h(i32* %m) {
; CHECK-LABEL: define internal coldcc i32 @h
- %v = load i32* %m
+ %v = load i32, i32* %m
ret i32 %v
}
define internal i32 @j(i32* %m) {
; CHECK-LABEL: define internal i32 @j
- %v = load i32* %m
+ %v = load i32, i32* %m
ret i32 %v
}
diff --git a/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll b/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll
index df9c72f78f4..c37558a0e12 100644
--- a/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll
+++ b/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll
@@ -18,7 +18,7 @@ define void @storeinit(i32 %i) {
define float @readval(i32 %i) {
%Ptr = getelementptr { i32, [4 x float] }, { i32, [4 x float] }* @G, i32 0, i32 1, i32 %i ; <float*> [#uses=1]
- %V = load float* %Ptr ; <float> [#uses=1]
+ %V = load float, float* %Ptr ; <float> [#uses=1]
ret float %V
}
diff --git a/llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll b/llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll
index 296b12c14cd..9b51fb741a5 100644
--- a/llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll
+++ b/llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll
@@ -17,25 +17,25 @@ define void @frob() {
}
define i32 @borf(i64 %i, i64 %j) {
%p = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0
- %a = load i32* %p
+ %a = load i32, i32* %p
%q = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 0
- %b = load i32* %q
+ %b = load i32, i32* %q
%c = add i32 %a, %b
ret i32 %c
}
define i32 @borg(i64 %i, i64 %j) {
%p = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 1
- %a = load i32* %p
+ %a = load i32, i32* %p
%q = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 1
- %b = load i32* %q
+ %b = load i32, i32* %q
%c = add i32 %a, %b
ret i32 %c
}
define i32 @borh(i64 %i, i64 %j) {
%p = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 2
- %a = load i32* %p
+ %a = load i32, i32* %p
%q = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 2
- %b = load i32* %q
+ %b = load i32, i32* %q
%c = add i32 %a, %b
ret i32 %c
}
diff --git a/llvm/test/Transforms/GlobalOpt/globalsra.ll b/llvm/test/Transforms/GlobalOpt/globalsra.ll
index 6d8f220d12b..af6c27c82a9 100644
--- a/llvm/test/Transforms/GlobalOpt/globalsra.ll
+++ b/llvm/test/Transforms/GlobalOpt/globalsra.ll
@@ -13,12 +13,12 @@ define void @onlystore() {
define float @storeinit() {
store float 1.000000e+00, float* getelementptr ({ i32, float, { double } }* @G, i32 0, i32 1)
- %X = load float* getelementptr ({ i32, float, { double } }* @G, i32 0, i32 1) ; <float> [#uses=1]
+ %X = load float, float* getelementptr ({ i32, float, { double } }* @G, i32 0, i32 1) ; <float> [#uses=1]
ret float %X
}
define double @constantize() {
- %X = load double* getelementptr ({ i32, float, { double } }* @G, i32 0, i32 2, i32 0) ; <double> [#uses=1]
+ %X = load double, double* getelementptr ({ i32, float, { double } }* @G, i32 0, i32 2, i32 0) ; <double> [#uses=1]
ret double %X
}
diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-1.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-1.ll
index 5388401ba51..6035eaaefcc 100644
--- a/llvm/test/Transforms/GlobalOpt/heap-sra-1.ll
+++ b/llvm/test/Transforms/GlobalOpt/heap-sra-1.ll
@@ -19,14 +19,14 @@ declare noalias i8* @malloc(i64)
define i32 @baz() nounwind readonly noinline {
bb1.thread:
- %0 = load %struct.foo** @X, align 4
+ %0 = load %struct.foo*, %struct.foo** @X, align 4
br label %bb1
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %3, %bb1 ]
%1 = getelementptr %struct.foo, %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = add i32 %2, %sum.0.reg2mem.0
%indvar.next = add i32 %i.0.reg2mem.0, 1
%exitcond = icmp eq i32 %indvar.next, 1200
diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-2.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-2.ll
index feeb70956d1..d66c627b184 100644
--- a/llvm/test/Transforms/GlobalOpt/heap-sra-2.ll
+++ b/llvm/test/Transforms/GlobalOpt/heap-sra-2.ll
@@ -19,14 +19,14 @@ declare noalias i8* @malloc(i64)
define i32 @baz() nounwind readonly noinline {
bb1.thread:
- %0 = load %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=1]
+ %0 = load %struct.foo*, %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=1]
br label %bb1
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=2]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %3, %bb1 ] ; <i32> [#uses=1]
%1 = getelementptr %struct.foo, %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 4 ; <i32> [#uses=1]
%3 = add i32 %2, %sum.0.reg2mem.0 ; <i32> [#uses=2]
%indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, 1200 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-3.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-3.ll
index 4ae9ec0cfad..6a34364b49f 100644
--- a/llvm/test/Transforms/GlobalOpt/heap-sra-3.ll
+++ b/llvm/test/Transforms/GlobalOpt/heap-sra-3.ll
@@ -20,14 +20,14 @@ declare noalias i8* @malloc(i64)
define i32 @baz() nounwind readonly noinline {
bb1.thread:
- %0 = load %struct.foo** @X, align 4
+ %0 = load %struct.foo*, %struct.foo** @X, align 4
br label %bb1
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %3, %bb1 ]
%1 = getelementptr %struct.foo, %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = add i32 %2, %sum.0.reg2mem.0
%indvar.next = add i32 %i.0.reg2mem.0, 1
%exitcond = icmp eq i32 %indvar.next, 1200
diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-4.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-4.ll
index a6e7578d062..2176b9fcbee 100644
--- a/llvm/test/Transforms/GlobalOpt/heap-sra-4.ll
+++ b/llvm/test/Transforms/GlobalOpt/heap-sra-4.ll
@@ -20,14 +20,14 @@ declare noalias i8* @malloc(i64)
define i32 @baz() nounwind readonly noinline {
bb1.thread:
- %0 = load %struct.foo** @X, align 4
+ %0 = load %struct.foo*, %struct.foo** @X, align 4
br label %bb1
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %3, %bb1 ]
%1 = getelementptr %struct.foo, %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = add i32 %2, %sum.0.reg2mem.0
%indvar.next = add i32 %i.0.reg2mem.0, 1
%exitcond = icmp eq i32 %indvar.next, 1200
diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll
index 9449827ccae..bf9ca7b5311 100644
--- a/llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll
+++ b/llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll
@@ -18,7 +18,7 @@ declare noalias i8* @malloc(i64)
define i32 @baz() nounwind readonly noinline {
bb1.thread:
- %tmpLD1 = load %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=1]
+ %tmpLD1 = load %struct.foo*, %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=1]
br label %bb1
bb1: ; preds = %bb1, %bb1.thread
@@ -26,14 +26,14 @@ bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=2]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %tmp3, %bb1 ] ; <i32> [#uses=1]
%tmp1 = getelementptr %struct.foo, %struct.foo* %tmp, i32 %i.0.reg2mem.0, i32 0 ; <i32*> [#uses=1]
- %tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
%tmp6 = add i32 %tmp2, %sum.0.reg2mem.0 ; <i32> [#uses=2]
%tmp4 = getelementptr %struct.foo, %struct.foo* %tmp, i32 %i.0.reg2mem.0, i32 1 ; <i32*> [#uses=1]
- %tmp5 = load i32 * %tmp4
+ %tmp5 = load i32 , i32 * %tmp4
%tmp3 = add i32 %tmp5, %tmp6
%indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
- %tmpLD2 = load %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=1]
+ %tmpLD2 = load %struct.foo*, %struct.foo** @X, align 4 ; <%struct.foo*> [#uses=1]
%exitcond = icmp eq i32 %indvar.next, 1200 ; <i1> [#uses=1]
br i1 %exitcond, label %bb2, label %bb1
diff --git a/llvm/test/Transforms/GlobalOpt/integer-bool.ll b/llvm/test/Transforms/GlobalOpt/integer-bool.ll
index abf5fdd2ef3..617febdc016 100644
--- a/llvm/test/Transforms/GlobalOpt/integer-bool.ll
+++ b/llvm/test/Transforms/GlobalOpt/integer-bool.ll
@@ -20,7 +20,7 @@ define void @set2() {
define i1 @get() {
; CHECK-LABEL: @get(
- %A = load i32 addrspace(1) * @G
+ %A = load i32, i32 addrspace(1) * @G
%C = icmp slt i32 %A, 2
ret i1 %C
; CHECK: ret i1 true
diff --git a/llvm/test/Transforms/GlobalOpt/iterate.ll b/llvm/test/Transforms/GlobalOpt/iterate.ll
index 74668742759..056d74e6735 100644
--- a/llvm/test/Transforms/GlobalOpt/iterate.ll
+++ b/llvm/test/Transforms/GlobalOpt/iterate.ll
@@ -4,8 +4,8 @@
@H = internal global { i32* } { i32* @G } ; <{ i32* }*> [#uses=1]
define i32 @loadg() {
- %G = load i32** getelementptr ({ i32* }* @H, i32 0, i32 0) ; <i32*> [#uses=1]
- %GV = load i32* %G ; <i32> [#uses=1]
+ %G = load i32*, i32** getelementptr ({ i32* }* @H, i32 0, i32 0) ; <i32*> [#uses=1]
+ %GV = load i32, i32* %G ; <i32> [#uses=1]
ret i32 %GV
}
diff --git a/llvm/test/Transforms/GlobalOpt/load-store-global.ll b/llvm/test/Transforms/GlobalOpt/load-store-global.ll
index cbd3cdbdba7..e01358e8ed3 100644
--- a/llvm/test/Transforms/GlobalOpt/load-store-global.ll
+++ b/llvm/test/Transforms/GlobalOpt/load-store-global.ll
@@ -4,7 +4,7 @@
; CHECK-NOT: @G
define void @foo() {
- %V = load i32* @G ; <i32> [#uses=1]
+ %V = load i32, i32* @G ; <i32> [#uses=1]
store i32 %V, i32* @G
ret void
; CHECK-LABEL: @foo(
@@ -12,7 +12,7 @@ define void @foo() {
}
define i32 @bar() {
- %X = load i32* @G ; <i32> [#uses=1]
+ %X = load i32, i32* @G ; <i32> [#uses=1]
ret i32 %X
; CHECK-LABEL: @bar(
; CHECK-NEXT: ret i32 17
@@ -29,7 +29,7 @@ define void @qux() nounwind {
%cmp2 = icmp eq i8* null, %b
%cmp3 = icmp eq i64** null, %g
store i64* inttoptr (i64 1 to i64*), i64** @a, align 8
- %l = load i64** @a, align 8
+ %l = load i64*, i64** @a, align 8
ret void
; CHECK-LABEL: @qux(
; CHECK-NOT: store
diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll
index 51ccbbd43ca..d5087dea344 100644
--- a/llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll
+++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll
@@ -8,7 +8,7 @@ define void @init() {
%malloccall = tail call i8* @malloc(i64 4) ; <i8*> [#uses=1]
%P = bitcast i8* %malloccall to i32* ; <i32*> [#uses=1]
store i32* %P, i32** @G
- %GV = load i32** @G ; <i32*> [#uses=1]
+ %GV = load i32*, i32** @G ; <i32*> [#uses=1]
store i32 0, i32* %GV
ret void
}
@@ -16,8 +16,8 @@ define void @init() {
declare noalias i8* @malloc(i64)
define i32 @get() {
- %GV = load i32** @G ; <i32*> [#uses=1]
- %V = load i32* %GV ; <i32> [#uses=1]
+ %GV = load i32*, i32** @G ; <i32*> [#uses=1]
+ %V = load i32, i32* %GV ; <i32> [#uses=1]
ret i32 %V
; CHECK: ret i32 0
}
diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll
index 373a7929fa0..335ed82a8cf 100644
--- a/llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll
+++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll
@@ -10,7 +10,7 @@ define void @t() {
%malloccall = tail call i8* @malloc(i64 mul (i64 100, i64 4))
%P = bitcast i8* %malloccall to i32*
store i32* %P, i32** @G
- %GV = load i32** @G
+ %GV = load i32*, i32** @G
%GVe = getelementptr i32, i32* %GV, i32 40
store i32 20, i32* %GVe
ret void
diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll
index b4e7986df64..1e42c3b89a6 100644
--- a/llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll
+++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll
@@ -9,7 +9,7 @@ define void @t() {
%malloccall = tail call i8* @malloc(i64 mul (i64 100, i64 4)) nobuiltin
%P = bitcast i8* %malloccall to i32*
store i32* %P, i32** @G
- %GV = load i32** @G
+ %GV = load i32*, i32** @G
%GVe = getelementptr i32, i32* %GV, i32 40
store i32 20, i32* %GVe
ret void
diff --git a/llvm/test/Transforms/GlobalOpt/memset-null.ll b/llvm/test/Transforms/GlobalOpt/memset-null.ll
index 53ec7551130..838ac0979e3 100644
--- a/llvm/test/Transforms/GlobalOpt/memset-null.ll
+++ b/llvm/test/Transforms/GlobalOpt/memset-null.ll
@@ -23,7 +23,7 @@ entry:
define internal void @_GLOBAL__I_b() nounwind {
entry:
- %tmp.i.i.i = load i8** @y, align 8
+ %tmp.i.i.i = load i8*, i8** @y, align 8
tail call void @llvm.memset.p0i8.i64(i8* %tmp.i.i.i, i8 0, i64 10, i32 1, i1 false) nounwind
ret void
}
diff --git a/llvm/test/Transforms/GlobalOpt/phi-select.ll b/llvm/test/Transforms/GlobalOpt/phi-select.ll
index cd8a7dc990f..e5cb9819ca0 100644
--- a/llvm/test/Transforms/GlobalOpt/phi-select.ll
+++ b/llvm/test/Transforms/GlobalOpt/phi-select.ll
@@ -8,7 +8,7 @@
define i32 @test1(i1 %C) {
%P = select i1 %C, i32* @X, i32* @Y ; <i32*> [#uses=1]
- %V = load i32* %P ; <i32> [#uses=1]
+ %V = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %V
}
@@ -21,7 +21,7 @@ T: ; preds = %0
Cont: ; preds = %T, %0
%P = phi i32* [ @X, %0 ], [ @Y, %T ] ; <i32*> [#uses=1]
- %V = load i32* %P ; <i32> [#uses=1]
+ %V = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %V
}
diff --git a/llvm/test/Transforms/GlobalOpt/storepointer-compare.ll b/llvm/test/Transforms/GlobalOpt/storepointer-compare.ll
index 09e20a8adb2..bcf7cafd831 100644
--- a/llvm/test/Transforms/GlobalOpt/storepointer-compare.ll
+++ b/llvm/test/Transforms/GlobalOpt/storepointer-compare.ll
@@ -16,7 +16,7 @@ define void @init() {
}
define void @doit() {
- %FP = load void ()** @G ; <void ()*> [#uses=2]
+ %FP = load void ()*, void ()** @G ; <void ()*> [#uses=2]
%CC = icmp eq void ()* %FP, null ; <i1> [#uses=1]
br i1 %CC, label %isNull, label %DoCall
diff --git a/llvm/test/Transforms/GlobalOpt/storepointer.ll b/llvm/test/Transforms/GlobalOpt/storepointer.ll
index 8019076f946..9003004d431 100644
--- a/llvm/test/Transforms/GlobalOpt/storepointer.ll
+++ b/llvm/test/Transforms/GlobalOpt/storepointer.ll
@@ -12,7 +12,7 @@ define void @init() {
}
define void @doit() {
- %FP = load void ()** @G ; <void ()*> [#uses=1]
+ %FP = load void ()*, void ()** @G ; <void ()*> [#uses=1]
call void %FP( )
ret void
}
diff --git a/llvm/test/Transforms/GlobalOpt/tls.ll b/llvm/test/Transforms/GlobalOpt/tls.ll
index 59ae23a4f6a..beea178b6a3 100644
--- a/llvm/test/Transforms/GlobalOpt/tls.ll
+++ b/llvm/test/Transforms/GlobalOpt/tls.ll
@@ -24,14 +24,14 @@ entry:
store i32 0, i32* getelementptr inbounds ([100 x i32]* @x, i64 0, i64 1), align 4
; Read the value of @ip, which now points at x[1] for thread 2.
- %0 = load i32** @ip, align 8
+ %0 = load i32*, i32** @ip, align 8
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
ret i32 %1
; CHECK-LABEL: @f(
; Make sure that the load from @ip hasn't been removed.
-; CHECK: load i32** @ip
+; CHECK: load i32*, i32** @ip
; CHECK: ret
}
diff --git a/llvm/test/Transforms/GlobalOpt/trivialstore.ll b/llvm/test/Transforms/GlobalOpt/trivialstore.ll
index 21437f33b26..f907d8382d5 100644
--- a/llvm/test/Transforms/GlobalOpt/trivialstore.ll
+++ b/llvm/test/Transforms/GlobalOpt/trivialstore.ll
@@ -8,7 +8,7 @@ define void @foo() {
}
define i32 @bar() {
- %X = load i32* @G ; <i32> [#uses=1]
+ %X = load i32, i32* @G ; <i32> [#uses=1]
ret i32 %X
}
diff --git a/llvm/test/Transforms/GlobalOpt/undef-init.ll b/llvm/test/Transforms/GlobalOpt/undef-init.ll
index c1494973950..d304821225e 100644
--- a/llvm/test/Transforms/GlobalOpt/undef-init.ll
+++ b/llvm/test/Transforms/GlobalOpt/undef-init.ll
@@ -5,7 +5,7 @@
define i32 @_Z3foov() {
entry:
- %tmp.1 = load i32* @X.0 ; <i32> [#uses=1]
+ %tmp.1 = load i32, i32* @X.0 ; <i32> [#uses=1]
ret i32 %tmp.1
}
diff --git a/llvm/test/Transforms/GlobalOpt/unnamed-addr.ll b/llvm/test/Transforms/GlobalOpt/unnamed-addr.ll
index c2ce0b9ddd9..85ed829c711 100644
--- a/llvm/test/Transforms/GlobalOpt/unnamed-addr.ll
+++ b/llvm/test/Transforms/GlobalOpt/unnamed-addr.ll
@@ -13,7 +13,7 @@
; CHECK: @e = linkonce_odr global i32 0
define i32 @get_e() {
- %t = load i32* @e
+ %t = load i32, i32* @e
ret i32 %t
}
@@ -25,9 +25,9 @@ define void @set_e(i32 %x) {
define i1 @bah(i64 %i) nounwind readonly optsize ssp {
entry:
%arrayidx4 = getelementptr inbounds [4 x i8], [4 x i8]* @d, i64 0, i64 %i
- %tmp5 = load i8* %arrayidx4, align 1
+ %tmp5 = load i8, i8* %arrayidx4, align 1
%array0 = bitcast [4 x i8]* @d to i8*
- %tmp6 = load i8* %array0, align 1
+ %tmp6 = load i8, i8* %array0, align 1
%cmp = icmp eq i8 %tmp5, %tmp6
ret i1 %cmp
}
@@ -63,6 +63,6 @@ return:
define i32 @zed() {
entry:
- %tmp1 = load i32* @c, align 4
+ %tmp1 = load i32, i32* @c, align 4
ret i32 %tmp1
}
diff --git a/llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll b/llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll
index ad16a6485e8..51feb480e3b 100644
--- a/llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll
+++ b/llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll
@@ -4,7 +4,7 @@
define i32 @test1(i64 %idx) nounwind {
%arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @zero, i64 0, i64 %idx
- %l = load i32* %arrayidx
+ %l = load i32, i32* %arrayidx
ret i32 %l
; CHECK-LABEL: @test1(
; CHECK: ret i32 0
diff --git a/llvm/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll b/llvm/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
index 03ebf0b89b3..735597a52cd 100644
--- a/llvm/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
+++ b/llvm/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
@@ -20,11 +20,11 @@ return: ; preds = %entry
define internal i32 @vfu2(%struct.MYstr* byval align 4 %u) nounwind readonly {
entry:
%0 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 1 ; <i32*> [#uses=1]
- %1 = load i32* %0
-; CHECK: load i32* getelementptr inbounds (%struct.MYstr* @mystr, i32 0, i32 1)
+ %1 = load i32, i32* %0
+; CHECK: load i32, i32* getelementptr inbounds (%struct.MYstr* @mystr, i32 0, i32 1)
%2 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 0 ; <i8*> [#uses=1]
- %3 = load i8* %2
-; CHECK: load i8* getelementptr inbounds (%struct.MYstr* @mystr, i32 0, i32 0)
+ %3 = load i8, i8* %2
+; CHECK: load i8, i8* getelementptr inbounds (%struct.MYstr* @mystr, i32 0, i32 0)
%4 = zext i8 %3 to i32
%5 = add i32 %4, %1
ret i32 %5
diff --git a/llvm/test/Transforms/IPConstantProp/dangling-block-address.ll b/llvm/test/Transforms/IPConstantProp/dangling-block-address.ll
index 487375c3618..abd0b0fc313 100644
--- a/llvm/test/Transforms/IPConstantProp/dangling-block-address.ll
+++ b/llvm/test/Transforms/IPConstantProp/dangling-block-address.ll
@@ -30,9 +30,9 @@ end: ; preds = %indirectgoto
indirectgoto: ; preds = %lab0, %entry
%indvar = phi i32 [ %indvar.next, %lab0 ], [ 0, %entry ] ; <i32> [#uses=2]
%pc.addr.0 = getelementptr i32, i32* %pc, i32 %indvar ; <i32*> [#uses=1]
- %tmp1.pn = load i32* %pc.addr.0 ; <i32> [#uses=1]
+ %tmp1.pn = load i32, i32* %pc.addr.0 ; <i32> [#uses=1]
%indirect.goto.dest.in = getelementptr inbounds [2 x i8*], [2 x i8*]* @bar.l, i32 0, i32 %tmp1.pn ; <i8**> [#uses=1]
- %indirect.goto.dest = load i8** %indirect.goto.dest.in ; <i8*> [#uses=1]
+ %indirect.goto.dest = load i8*, i8** %indirect.goto.dest.in ; <i8*> [#uses=1]
indirectbr i8* %indirect.goto.dest, [label %lab0, label %end]
}
diff --git a/llvm/test/Transforms/IPConstantProp/global.ll b/llvm/test/Transforms/IPConstantProp/global.ll
index 67152937bee..d3ba14658f6 100644
--- a/llvm/test/Transforms/IPConstantProp/global.ll
+++ b/llvm/test/Transforms/IPConstantProp/global.ll
@@ -4,7 +4,7 @@
define void @_Z7test1f1v() nounwind {
entry:
- %tmp = load i32* @_ZL6test1g, align 4
+ %tmp = load i32, i32* @_ZL6test1g, align 4
%cmp = icmp eq i32 %tmp, 0
br i1 %cmp, label %if.then, label %if.end
@@ -21,6 +21,6 @@ if.end: ; preds = %if.then, %entry
; CHECK-NEXT: ret i32 42
define i32 @_Z7test1f2v() nounwind {
entry:
- %tmp = load i32* @_ZL6test1g, align 4
+ %tmp = load i32, i32* @_ZL6test1g, align 4
ret i32 %tmp
}
diff --git a/llvm/test/Transforms/IPConstantProp/return-argument.ll b/llvm/test/Transforms/IPConstantProp/return-argument.ll
index 2a14f05985d..927531b996e 100644
--- a/llvm/test/Transforms/IPConstantProp/return-argument.ll
+++ b/llvm/test/Transforms/IPConstantProp/return-argument.ll
@@ -4,7 +4,7 @@
;; This function returns its second argument on all return statements
define internal i32* @incdec(i1 %C, i32* %V) {
- %X = load i32* %V
+ %X = load i32, i32* %V
br i1 %C, label %T, label %F
T: ; preds = %0
diff --git a/llvm/test/Transforms/IRCE/decrementing-loop.ll b/llvm/test/Transforms/IRCE/decrementing-loop.ll
index e70dacd83e5..6ff3e76c754 100644
--- a/llvm/test/Transforms/IRCE/decrementing-loop.ll
+++ b/llvm/test/Transforms/IRCE/decrementing-loop.ll
@@ -2,7 +2,7 @@
define void @decrementing_loop(i32 *%arr, i32 *%a_len_ptr, i32 %n) {
entry:
- %len = load i32* %a_len_ptr, !range !0
+ %len = load i32, i32* %a_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
%start = sub i32 %n, 1
br i1 %first.itr.check, label %loop, label %exit
diff --git a/llvm/test/Transforms/IRCE/low-becount.ll b/llvm/test/Transforms/IRCE/low-becount.ll
index c2eea08c597..89b91d6496a 100644
--- a/llvm/test/Transforms/IRCE/low-becount.ll
+++ b/llvm/test/Transforms/IRCE/low-becount.ll
@@ -4,7 +4,7 @@
define void @low_profiled_be_count(i32 *%arr, i32 *%a_len_ptr, i32 %n) {
entry:
- %len = load i32* %a_len_ptr, !range !0
+ %len = load i32, i32* %a_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
br i1 %first.itr.check, label %loop, label %exit
diff --git a/llvm/test/Transforms/IRCE/multiple-access-no-preloop.ll b/llvm/test/Transforms/IRCE/multiple-access-no-preloop.ll
index 721dbe7f828..9963a57185b 100644
--- a/llvm/test/Transforms/IRCE/multiple-access-no-preloop.ll
+++ b/llvm/test/Transforms/IRCE/multiple-access-no-preloop.ll
@@ -4,8 +4,8 @@ define void @multiple_access_no_preloop(
i32* %arr_a, i32* %a_len_ptr, i32* %arr_b, i32* %b_len_ptr, i32 %n) {
entry:
- %len.a = load i32* %a_len_ptr, !range !0
- %len.b = load i32* %b_len_ptr, !range !0
+ %len.a = load i32, i32* %a_len_ptr, !range !0
+ %len.b = load i32, i32* %b_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
br i1 %first.itr.check, label %loop, label %exit
diff --git a/llvm/test/Transforms/IRCE/not-likely-taken.ll b/llvm/test/Transforms/IRCE/not-likely-taken.ll
index 5f623f51e18..50a64a28632 100644
--- a/llvm/test/Transforms/IRCE/not-likely-taken.ll
+++ b/llvm/test/Transforms/IRCE/not-likely-taken.ll
@@ -6,8 +6,8 @@ define void @multiple_access_no_preloop(
i32* %arr_a, i32* %a_len_ptr, i32* %arr_b, i32* %b_len_ptr, i32 %n) {
entry:
- %len.a = load i32* %a_len_ptr, !range !0
- %len.b = load i32* %b_len_ptr, !range !0
+ %len.a = load i32, i32* %a_len_ptr, !range !0
+ %len.b = load i32, i32* %b_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
br i1 %first.itr.check, label %loop, label %exit
diff --git a/llvm/test/Transforms/IRCE/single-access-no-preloop.ll b/llvm/test/Transforms/IRCE/single-access-no-preloop.ll
index 60485c88d5a..4824d9560c6 100644
--- a/llvm/test/Transforms/IRCE/single-access-no-preloop.ll
+++ b/llvm/test/Transforms/IRCE/single-access-no-preloop.ll
@@ -2,7 +2,7 @@
define void @single_access_no_preloop_no_offset(i32 *%arr, i32 *%a_len_ptr, i32 %n) {
entry:
- %len = load i32* %a_len_ptr, !range !0
+ %len = load i32, i32* %a_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
br i1 %first.itr.check, label %loop, label %exit
@@ -57,7 +57,7 @@ define void @single_access_no_preloop_no_offset(i32 *%arr, i32 *%a_len_ptr, i32
define void @single_access_no_preloop_with_offset(i32 *%arr, i32 *%a_len_ptr, i32 %n) {
entry:
- %len = load i32* %a_len_ptr, !range !0
+ %len = load i32, i32* %a_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
br i1 %first.itr.check, label %loop, label %exit
diff --git a/llvm/test/Transforms/IRCE/single-access-with-preloop.ll b/llvm/test/Transforms/IRCE/single-access-with-preloop.ll
index c0d86f04060..e581014ad18 100644
--- a/llvm/test/Transforms/IRCE/single-access-with-preloop.ll
+++ b/llvm/test/Transforms/IRCE/single-access-with-preloop.ll
@@ -2,7 +2,7 @@
define void @single_access_with_preloop(i32 *%arr, i32 *%a_len_ptr, i32 %n, i32 %offset) {
entry:
- %len = load i32* %a_len_ptr, !range !0
+ %len = load i32, i32* %a_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
br i1 %first.itr.check, label %loop, label %exit
diff --git a/llvm/test/Transforms/IRCE/unhandled.ll b/llvm/test/Transforms/IRCE/unhandled.ll
index 098642afee2..0d5ff953aa3 100644
--- a/llvm/test/Transforms/IRCE/unhandled.ll
+++ b/llvm/test/Transforms/IRCE/unhandled.ll
@@ -7,7 +7,7 @@ define void @general_affine_expressions(i32 *%arr, i32 *%a_len_ptr, i32 %n,
i32 %scale, i32 %offset) {
; CHECK-NOT: constrained Loop at depth
entry:
- %len = load i32* %a_len_ptr, !range !0
+ %len = load i32, i32* %a_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
br i1 %first.itr.check, label %loop, label %exit
diff --git a/llvm/test/Transforms/IRCE/with-parent-loops.ll b/llvm/test/Transforms/IRCE/with-parent-loops.ll
index 449a0773806..00af20b964f 100644
--- a/llvm/test/Transforms/IRCE/with-parent-loops.ll
+++ b/llvm/test/Transforms/IRCE/with-parent-loops.ll
@@ -8,7 +8,7 @@ define void @inner_loop(i32* %arr, i32* %a_len_ptr, i32 %n) #0 {
; CHECK: irce: in function inner_loop: constrained Loop at depth 1 containing: %loop<header><exiting>,%in.bounds<latch><exiting>
entry:
- %len = load i32* %a_len_ptr, !range !0
+ %len = load i32, i32* %a_len_ptr, !range !0
%first.itr.check = icmp sgt i32 %n, 0
br i1 %first.itr.check, label %loop, label %exit
@@ -42,7 +42,7 @@ loop: ; preds = %inner_loop.exit, %e
%idx = phi i32 [ 0, %entry ], [ %idx.next, %inner_loop.exit ]
%idx.next = add i32 %idx, 1
%next = icmp ult i32 %idx.next, %parent.count
- %len.i = load i32* %a_len_ptr, !range !0
+ %len.i = load i32, i32* %a_len_ptr, !range !0
%first.itr.check.i = icmp sgt i32 %n, 0
br i1 %first.itr.check.i, label %loop.i, label %exit.i
@@ -88,7 +88,7 @@ loop.i: ; preds = %inner_loop.exit.i,
%idx.i = phi i32 [ 0, %loop ], [ %idx.next.i, %inner_loop.exit.i ]
%idx.next.i = add i32 %idx.i, 1
%next.i = icmp ult i32 %idx.next.i, %parent.count
- %len.i.i = load i32* %a_len_ptr, !range !0
+ %len.i.i = load i32, i32* %a_len_ptr, !range !0
%first.itr.check.i.i = icmp sgt i32 %n, 0
br i1 %first.itr.check.i.i, label %loop.i.i, label %exit.i.i
@@ -132,7 +132,7 @@ loop: ; preds = %inner_loop.exit12,
%idx = phi i32 [ 0, %entry ], [ %idx.next, %inner_loop.exit12 ]
%idx.next = add i32 %idx, 1
%next = icmp ult i32 %idx.next, %parent.count
- %len.i = load i32* %a_len_ptr, !range !0
+ %len.i = load i32, i32* %a_len_ptr, !range !0
%first.itr.check.i = icmp sgt i32 %n, 0
br i1 %first.itr.check.i, label %loop.i, label %exit.i
@@ -155,7 +155,7 @@ exit.i: ; preds = %in.bounds.i, %loop
br label %inner_loop.exit
inner_loop.exit: ; preds = %exit.i, %out.of.bounds.i
- %len.i1 = load i32* %a_len_ptr, !range !0
+ %len.i1 = load i32, i32* %a_len_ptr, !range !0
%first.itr.check.i2 = icmp sgt i32 %n, 0
br i1 %first.itr.check.i2, label %loop.i6, label %exit.i11
@@ -202,7 +202,7 @@ loop.i: ; preds = %inner_loop.exit.i,
%idx.i = phi i32 [ 0, %loop ], [ %idx.next.i, %inner_loop.exit.i ]
%idx.next.i = add i32 %idx.i, 1
%next.i = icmp ult i32 %idx.next.i, %parent.count
- %len.i.i = load i32* %a_len_ptr, !range !0
+ %len.i.i = load i32, i32* %a_len_ptr, !range !0
%first.itr.check.i.i = icmp sgt i32 %n, 0
br i1 %first.itr.check.i.i, label %loop.i.i, label %exit.i.i
@@ -234,7 +234,7 @@ loop.i6: ; preds = %inner_loop.exit.i16
%idx.i1 = phi i32 [ 0, %with_parent.exit ], [ %idx.next.i2, %inner_loop.exit.i16 ]
%idx.next.i2 = add i32 %idx.i1, 1
%next.i3 = icmp ult i32 %idx.next.i2, %parent.count
- %len.i.i4 = load i32* %a_len_ptr, !range !0
+ %len.i.i4 = load i32, i32* %a_len_ptr, !range !0
%first.itr.check.i.i5 = icmp sgt i32 %n, 0
br i1 %first.itr.check.i.i5, label %loop.i.i10, label %exit.i.i15
@@ -278,7 +278,7 @@ loop: ; preds = %with_parent.exit, %
%idx = phi i32 [ 0, %entry ], [ %idx.next, %with_parent.exit ]
%idx.next = add i32 %idx, 1
%next = icmp ult i32 %idx.next, %grandparent.count
- %len.i = load i32* %a_len_ptr, !range !0
+ %len.i = load i32, i32* %a_len_ptr, !range !0
%first.itr.check.i = icmp sgt i32 %n, 0
br i1 %first.itr.check.i, label %loop.i, label %exit.i
@@ -307,7 +307,7 @@ loop.i4: ; preds = %inner_loop.exit.i,
%idx.i1 = phi i32 [ 0, %inner_loop.exit ], [ %idx.next.i2, %inner_loop.exit.i ]
%idx.next.i2 = add i32 %idx.i1, 1
%next.i3 = icmp ult i32 %idx.next.i2, %parent.count
- %len.i.i = load i32* %a_len_ptr, !range !0
+ %len.i.i = load i32, i32* %a_len_ptr, !range !0
%first.itr.check.i.i = icmp sgt i32 %n, 0
br i1 %first.itr.check.i.i, label %loop.i.i, label %exit.i.i
diff --git a/llvm/test/Transforms/IndVarSimplify/2005-02-17-TruncateExprCrash.ll b/llvm/test/Transforms/IndVarSimplify/2005-02-17-TruncateExprCrash.ll
index ce043beb7fd..16ad635a920 100644
--- a/llvm/test/Transforms/IndVarSimplify/2005-02-17-TruncateExprCrash.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2005-02-17-TruncateExprCrash.ll
@@ -45,7 +45,7 @@ then.4: ; preds = %_ZNK7QString2atEi.exit
ret void
endif.4: ; preds = %_ZNK7QString2atEi.exit
- %tmp.115 = load i8* null ; <i8> [#uses=1]
+ %tmp.115 = load i8, i8* null ; <i8> [#uses=1]
br i1 false, label %loopexit.1, label %no_exit.0
no_exit.0: ; preds = %no_exit.0, %endif.4
diff --git a/llvm/test/Transforms/IndVarSimplify/2006-06-16-Indvar-LCSSA-Crash.ll b/llvm/test/Transforms/IndVarSimplify/2006-06-16-Indvar-LCSSA-Crash.ll
index 36ec2b81ba7..2d40f88d0d0 100644
--- a/llvm/test/Transforms/IndVarSimplify/2006-06-16-Indvar-LCSSA-Crash.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2006-06-16-Indvar-LCSSA-Crash.ll
@@ -6,9 +6,9 @@ endif.0:
no_exit.30: ; preds = %no_exit.30, %endif.0
%x.12.0 = phi i32 [ %inc.28, %no_exit.30 ], [ -2, %endif.0 ] ; <i32> [#uses=1]
- %tmp.583 = load i16* null ; <i16> [#uses=1]
+ %tmp.583 = load i16, i16* null ; <i16> [#uses=1]
%tmp.584 = zext i16 %tmp.583 to i32 ; <i32> [#uses=1]
- %tmp.588 = load i32* null ; <i32> [#uses=1]
+ %tmp.588 = load i32, i32* null ; <i32> [#uses=1]
%tmp.589 = mul i32 %tmp.584, %tmp.588 ; <i32> [#uses=1]
%tmp.591 = add i32 %tmp.589, 0 ; <i32> [#uses=1]
%inc.28 = add i32 %x.12.0, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/Transforms/IndVarSimplify/2007-01-06-TripCount.ll b/llvm/test/Transforms/IndVarSimplify/2007-01-06-TripCount.ll
index 56e9ca65ec8..3dda657a6d7 100644
--- a/llvm/test/Transforms/IndVarSimplify/2007-01-06-TripCount.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2007-01-06-TripCount.ll
@@ -25,7 +25,7 @@ cond_true: ; preds = %bb2
cond_next: ; preds = %bb2
%tmp2 = getelementptr [5 x i8], [5 x i8]* @foo, i32 0, i32 %i.0 ; <i8*> [#uses=1]
- %tmp3 = load i8* %tmp2 ; <i8> [#uses=1]
+ %tmp3 = load i8, i8* %tmp2 ; <i8> [#uses=1]
%tmp5 = icmp eq i8 %tmp3, 0 ; <i1> [#uses=1]
br i1 %tmp5, label %bb6, label %bb
diff --git a/llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll b/llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll
index 624567d10fa..4c0c3279232 100644
--- a/llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll
@@ -19,7 +19,7 @@
define signext i16 @ExtractBufferedBlocksIgnored(%struct.JPEGGlobals* %globp) nounwind {
entry:
%tmp4311 = getelementptr %struct.JPEGGlobals, %struct.JPEGGlobals* %globp, i32 0, i32 70 ; <i32*> [#uses=1]
- %tmp4412 = load i32* %tmp4311, align 16 ; <i32> [#uses=2]
+ %tmp4412 = load i32, i32* %tmp4311, align 16 ; <i32> [#uses=2]
%tmp4613 = icmp sgt i32 %tmp4412, 0 ; <i1> [#uses=1]
br i1 %tmp4613, label %bb, label %bb49
@@ -27,9 +27,9 @@ bb: ; preds = %bb28, %entry
%component.09 = phi i16 [ 0, %entry ], [ %tmp37, %bb28 ] ; <i16> [#uses=2]
%tmp12 = sext i16 %component.09 to i32 ; <i32> [#uses=2]
%tmp6 = getelementptr %struct.JPEGGlobals, %struct.JPEGGlobals* %globp, i32 0, i32 77, i32 %tmp12 ; <i16**> [#uses=2]
- %tmp7 = load i16** %tmp6, align 4 ; <i16*> [#uses=2]
+ %tmp7 = load i16*, i16** %tmp6, align 4 ; <i16*> [#uses=2]
%tmp235 = getelementptr %struct.JPEGGlobals, %struct.JPEGGlobals* %globp, i32 0, i32 71, i32 %tmp12 ; <i32*> [#uses=1]
- %tmp246 = load i32* %tmp235, align 4 ; <i32> [#uses=2]
+ %tmp246 = load i32, i32* %tmp235, align 4 ; <i32> [#uses=2]
%tmp267 = icmp sgt i32 %tmp246, 0 ; <i1> [#uses=1]
br i1 %tmp267, label %bb8, label %bb28
diff --git a/llvm/test/Transforms/IndVarSimplify/2008-10-03-CouldNotCompute.ll b/llvm/test/Transforms/IndVarSimplify/2008-10-03-CouldNotCompute.ll
index 23e78849613..1248154c155 100644
--- a/llvm/test/Transforms/IndVarSimplify/2008-10-03-CouldNotCompute.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2008-10-03-CouldNotCompute.ll
@@ -8,7 +8,7 @@ entry:
br i1 false, label %bb.nph, label %return
bb.nph: ; preds = %entry
- %0 = load i32* @foo, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @foo, align 4 ; <i32> [#uses=1]
%1 = sext i32 %0 to i64 ; <i64> [#uses=1]
br label %bb
diff --git a/llvm/test/Transforms/IndVarSimplify/2009-04-14-shorten_iv_vars.ll b/llvm/test/Transforms/IndVarSimplify/2009-04-14-shorten_iv_vars.ll
index befdcb56593..ab2af329f4d 100644
--- a/llvm/test/Transforms/IndVarSimplify/2009-04-14-shorten_iv_vars.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2009-04-14-shorten_iv_vars.ll
@@ -15,92 +15,92 @@ bb1.thread:
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %84, %bb1 ] ; <i32> [#uses=19]
- %0 = load i32** @a, align 8 ; <i32*> [#uses=1]
- %1 = load i32** @b, align 8 ; <i32*> [#uses=1]
+ %0 = load i32*, i32** @a, align 8 ; <i32*> [#uses=1]
+ %1 = load i32*, i32** @b, align 8 ; <i32*> [#uses=1]
%2 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
%3 = getelementptr i32, i32* %1, i64 %2 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 1 ; <i32> [#uses=1]
- %5 = load i32** @c, align 8 ; <i32*> [#uses=1]
+ %4 = load i32, i32* %3, align 1 ; <i32> [#uses=1]
+ %5 = load i32*, i32** @c, align 8 ; <i32*> [#uses=1]
%6 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
%7 = getelementptr i32, i32* %5, i64 %6 ; <i32*> [#uses=1]
- %8 = load i32* %7, align 1 ; <i32> [#uses=1]
+ %8 = load i32, i32* %7, align 1 ; <i32> [#uses=1]
%9 = add i32 %8, %4 ; <i32> [#uses=1]
%10 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
%11 = getelementptr i32, i32* %0, i64 %10 ; <i32*> [#uses=1]
store i32 %9, i32* %11, align 1
- %12 = load i32** @a, align 8 ; <i32*> [#uses=1]
+ %12 = load i32*, i32** @a, align 8 ; <i32*> [#uses=1]
%13 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
- %14 = load i32** @b, align 8 ; <i32*> [#uses=1]
+ %14 = load i32*, i32** @b, align 8 ; <i32*> [#uses=1]
%15 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%16 = sext i32 %15 to i64 ; <i64> [#uses=1]
%17 = getelementptr i32, i32* %14, i64 %16 ; <i32*> [#uses=1]
- %18 = load i32* %17, align 1 ; <i32> [#uses=1]
- %19 = load i32** @c, align 8 ; <i32*> [#uses=1]
+ %18 = load i32, i32* %17, align 1 ; <i32> [#uses=1]
+ %19 = load i32*, i32** @c, align 8 ; <i32*> [#uses=1]
%20 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%21 = sext i32 %20 to i64 ; <i64> [#uses=1]
%22 = getelementptr i32, i32* %19, i64 %21 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 1 ; <i32> [#uses=1]
+ %23 = load i32, i32* %22, align 1 ; <i32> [#uses=1]
%24 = add i32 %23, %18 ; <i32> [#uses=1]
%25 = sext i32 %13 to i64 ; <i64> [#uses=1]
%26 = getelementptr i32, i32* %12, i64 %25 ; <i32*> [#uses=1]
store i32 %24, i32* %26, align 1
- %27 = load i32** @a, align 8 ; <i32*> [#uses=1]
+ %27 = load i32*, i32** @a, align 8 ; <i32*> [#uses=1]
%28 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
- %29 = load i32** @b, align 8 ; <i32*> [#uses=1]
+ %29 = load i32*, i32** @b, align 8 ; <i32*> [#uses=1]
%30 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%31 = sext i32 %30 to i64 ; <i64> [#uses=1]
%32 = getelementptr i32, i32* %29, i64 %31 ; <i32*> [#uses=1]
- %33 = load i32* %32, align 1 ; <i32> [#uses=1]
- %34 = load i32** @c, align 8 ; <i32*> [#uses=1]
+ %33 = load i32, i32* %32, align 1 ; <i32> [#uses=1]
+ %34 = load i32*, i32** @c, align 8 ; <i32*> [#uses=1]
%35 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%36 = sext i32 %35 to i64 ; <i64> [#uses=1]
%37 = getelementptr i32, i32* %34, i64 %36 ; <i32*> [#uses=1]
- %38 = load i32* %37, align 1 ; <i32> [#uses=1]
+ %38 = load i32, i32* %37, align 1 ; <i32> [#uses=1]
%39 = add i32 %38, %33 ; <i32> [#uses=1]
%40 = sext i32 %28 to i64 ; <i64> [#uses=1]
%41 = getelementptr i32, i32* %27, i64 %40 ; <i32*> [#uses=1]
store i32 %39, i32* %41, align 1
- %42 = load i32** @d, align 8 ; <i32*> [#uses=1]
- %43 = load i32** @e, align 8 ; <i32*> [#uses=1]
+ %42 = load i32*, i32** @d, align 8 ; <i32*> [#uses=1]
+ %43 = load i32*, i32** @e, align 8 ; <i32*> [#uses=1]
%44 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
%45 = getelementptr i32, i32* %43, i64 %44 ; <i32*> [#uses=1]
- %46 = load i32* %45, align 1 ; <i32> [#uses=1]
- %47 = load i32** @f, align 8 ; <i32*> [#uses=1]
+ %46 = load i32, i32* %45, align 1 ; <i32> [#uses=1]
+ %47 = load i32*, i32** @f, align 8 ; <i32*> [#uses=1]
%48 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
%49 = getelementptr i32, i32* %47, i64 %48 ; <i32*> [#uses=1]
- %50 = load i32* %49, align 1 ; <i32> [#uses=1]
+ %50 = load i32, i32* %49, align 1 ; <i32> [#uses=1]
%51 = add i32 %50, %46 ; <i32> [#uses=1]
%52 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
%53 = getelementptr i32, i32* %42, i64 %52 ; <i32*> [#uses=1]
store i32 %51, i32* %53, align 1
- %54 = load i32** @d, align 8 ; <i32*> [#uses=1]
+ %54 = load i32*, i32** @d, align 8 ; <i32*> [#uses=1]
%55 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
- %56 = load i32** @e, align 8 ; <i32*> [#uses=1]
+ %56 = load i32*, i32** @e, align 8 ; <i32*> [#uses=1]
%57 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%58 = sext i32 %57 to i64 ; <i64> [#uses=1]
%59 = getelementptr i32, i32* %56, i64 %58 ; <i32*> [#uses=1]
- %60 = load i32* %59, align 1 ; <i32> [#uses=1]
- %61 = load i32** @f, align 8 ; <i32*> [#uses=1]
+ %60 = load i32, i32* %59, align 1 ; <i32> [#uses=1]
+ %61 = load i32*, i32** @f, align 8 ; <i32*> [#uses=1]
%62 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%63 = sext i32 %62 to i64 ; <i64> [#uses=1]
%64 = getelementptr i32, i32* %61, i64 %63 ; <i32*> [#uses=1]
- %65 = load i32* %64, align 1 ; <i32> [#uses=1]
+ %65 = load i32, i32* %64, align 1 ; <i32> [#uses=1]
%66 = add i32 %65, %60 ; <i32> [#uses=1]
%67 = sext i32 %55 to i64 ; <i64> [#uses=1]
%68 = getelementptr i32, i32* %54, i64 %67 ; <i32*> [#uses=1]
store i32 %66, i32* %68, align 1
- %69 = load i32** @d, align 8 ; <i32*> [#uses=1]
+ %69 = load i32*, i32** @d, align 8 ; <i32*> [#uses=1]
%70 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
- %71 = load i32** @e, align 8 ; <i32*> [#uses=1]
+ %71 = load i32*, i32** @e, align 8 ; <i32*> [#uses=1]
%72 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%73 = sext i32 %72 to i64 ; <i64> [#uses=1]
%74 = getelementptr i32, i32* %71, i64 %73 ; <i32*> [#uses=1]
- %75 = load i32* %74, align 1 ; <i32> [#uses=1]
- %76 = load i32** @f, align 8 ; <i32*> [#uses=1]
+ %75 = load i32, i32* %74, align 1 ; <i32> [#uses=1]
+ %76 = load i32*, i32** @f, align 8 ; <i32*> [#uses=1]
%77 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%78 = sext i32 %77 to i64 ; <i64> [#uses=1]
%79 = getelementptr i32, i32* %76, i64 %78 ; <i32*> [#uses=1]
- %80 = load i32* %79, align 1 ; <i32> [#uses=1]
+ %80 = load i32, i32* %79, align 1 ; <i32> [#uses=1]
%81 = add i32 %80, %75 ; <i32> [#uses=1]
%82 = sext i32 %70 to i64 ; <i64> [#uses=1]
%83 = getelementptr i32, i32* %69, i64 %82 ; <i32*> [#uses=1]
diff --git a/llvm/test/Transforms/IndVarSimplify/2009-04-15-shorten-iv-vars-2.ll b/llvm/test/Transforms/IndVarSimplify/2009-04-15-shorten-iv-vars-2.ll
index 6815f31c3bc..921a3937aa5 100644
--- a/llvm/test/Transforms/IndVarSimplify/2009-04-15-shorten-iv-vars-2.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2009-04-15-shorten-iv-vars-2.ll
@@ -29,123 +29,123 @@ bb1.thread:
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %116, %bb1 ] ; <i32> [#uses=22]
- %0 = load i32** @a, align 8 ; <i32*> [#uses=1]
+ %0 = load i32*, i32** @a, align 8 ; <i32*> [#uses=1]
%1 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
- %2 = load i32** @b, align 8 ; <i32*> [#uses=1]
+ %2 = load i32*, i32** @b, align 8 ; <i32*> [#uses=1]
%3 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%4 = zext i32 %3 to i64 ; <i64> [#uses=1]
%5 = getelementptr i32, i32* %2, i64 %4 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 1 ; <i32> [#uses=1]
- %7 = load i32** @c, align 8 ; <i32*> [#uses=1]
+ %6 = load i32, i32* %5, align 1 ; <i32> [#uses=1]
+ %7 = load i32*, i32** @c, align 8 ; <i32*> [#uses=1]
%8 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
%10 = getelementptr i32, i32* %7, i64 %9 ; <i32*> [#uses=1]
- %11 = load i32* %10, align 1 ; <i32> [#uses=1]
+ %11 = load i32, i32* %10, align 1 ; <i32> [#uses=1]
%12 = add i32 %11, %6 ; <i32> [#uses=1]
%13 = zext i32 %1 to i64 ; <i64> [#uses=1]
%14 = getelementptr i32, i32* %0, i64 %13 ; <i32*> [#uses=1]
store i32 %12, i32* %14, align 1
- %15 = load i32** @a, align 8 ; <i32*> [#uses=1]
+ %15 = load i32*, i32** @a, align 8 ; <i32*> [#uses=1]
%16 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%17 = and i32 %16, 15 ; <i32> [#uses=1]
- %18 = load i32** @b, align 8 ; <i32*> [#uses=1]
+ %18 = load i32*, i32** @b, align 8 ; <i32*> [#uses=1]
%19 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%20 = and i32 %19, 15 ; <i32> [#uses=1]
%21 = zext i32 %20 to i64 ; <i64> [#uses=1]
%22 = getelementptr i32, i32* %18, i64 %21 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 1 ; <i32> [#uses=1]
- %24 = load i32** @c, align 8 ; <i32*> [#uses=1]
+ %23 = load i32, i32* %22, align 1 ; <i32> [#uses=1]
+ %24 = load i32*, i32** @c, align 8 ; <i32*> [#uses=1]
%25 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%26 = and i32 %25, 15 ; <i32> [#uses=1]
%27 = zext i32 %26 to i64 ; <i64> [#uses=1]
%28 = getelementptr i32, i32* %24, i64 %27 ; <i32*> [#uses=1]
- %29 = load i32* %28, align 1 ; <i32> [#uses=1]
+ %29 = load i32, i32* %28, align 1 ; <i32> [#uses=1]
%30 = add i32 %29, %23 ; <i32> [#uses=1]
%31 = zext i32 %17 to i64 ; <i64> [#uses=1]
%32 = getelementptr i32, i32* %15, i64 %31 ; <i32*> [#uses=1]
store i32 %30, i32* %32, align 1
- %33 = load i32** @a, align 8 ; <i32*> [#uses=1]
+ %33 = load i32*, i32** @a, align 8 ; <i32*> [#uses=1]
%34 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%35 = and i32 %34, 15 ; <i32> [#uses=1]
- %36 = load i32** @b, align 8 ; <i32*> [#uses=1]
+ %36 = load i32*, i32** @b, align 8 ; <i32*> [#uses=1]
%37 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%38 = and i32 %37, 15 ; <i32> [#uses=1]
%39 = zext i32 %38 to i64 ; <i64> [#uses=1]
%40 = getelementptr i32, i32* %36, i64 %39 ; <i32*> [#uses=1]
- %41 = load i32* %40, align 1 ; <i32> [#uses=1]
- %42 = load i32** @c, align 8 ; <i32*> [#uses=1]
+ %41 = load i32, i32* %40, align 1 ; <i32> [#uses=1]
+ %42 = load i32*, i32** @c, align 8 ; <i32*> [#uses=1]
%43 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%44 = and i32 %43, 15 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
%46 = getelementptr i32, i32* %42, i64 %45 ; <i32*> [#uses=1]
- %47 = load i32* %46, align 1 ; <i32> [#uses=1]
+ %47 = load i32, i32* %46, align 1 ; <i32> [#uses=1]
%48 = add i32 %47, %41 ; <i32> [#uses=1]
%49 = zext i32 %35 to i64 ; <i64> [#uses=1]
%50 = getelementptr i32, i32* %33, i64 %49 ; <i32*> [#uses=1]
store i32 %48, i32* %50, align 1
- %51 = load i32** @d, align 8 ; <i32*> [#uses=1]
+ %51 = load i32*, i32** @d, align 8 ; <i32*> [#uses=1]
%52 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
- %53 = load i32** @e, align 8 ; <i32*> [#uses=1]
+ %53 = load i32*, i32** @e, align 8 ; <i32*> [#uses=1]
%54 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%55 = zext i32 %54 to i64 ; <i64> [#uses=1]
%56 = getelementptr i32, i32* %53, i64 %55 ; <i32*> [#uses=1]
- %57 = load i32* %56, align 1 ; <i32> [#uses=1]
- %58 = load i32** @f, align 8 ; <i32*> [#uses=1]
+ %57 = load i32, i32* %56, align 1 ; <i32> [#uses=1]
+ %58 = load i32*, i32** @f, align 8 ; <i32*> [#uses=1]
%59 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%60 = zext i32 %59 to i64 ; <i64> [#uses=1]
%61 = getelementptr i32, i32* %58, i64 %60 ; <i32*> [#uses=1]
- %62 = load i32* %61, align 1 ; <i32> [#uses=1]
+ %62 = load i32, i32* %61, align 1 ; <i32> [#uses=1]
%63 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
%64 = getelementptr [256 x i32], [256 x i32]* @K, i64 0, i64 %63 ; <i32*> [#uses=1]
- %65 = load i32* %64, align 4 ; <i32> [#uses=1]
+ %65 = load i32, i32* %64, align 4 ; <i32> [#uses=1]
%66 = add i32 %62, %57 ; <i32> [#uses=1]
%67 = add i32 %66, %65 ; <i32> [#uses=1]
%68 = zext i32 %52 to i64 ; <i64> [#uses=1]
%69 = getelementptr i32, i32* %51, i64 %68 ; <i32*> [#uses=1]
store i32 %67, i32* %69, align 1
- %70 = load i32** @d, align 8 ; <i32*> [#uses=1]
+ %70 = load i32*, i32** @d, align 8 ; <i32*> [#uses=1]
%71 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%72 = and i32 %71, 15 ; <i32> [#uses=1]
- %73 = load i32** @e, align 8 ; <i32*> [#uses=1]
+ %73 = load i32*, i32** @e, align 8 ; <i32*> [#uses=1]
%74 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%75 = and i32 %74, 15 ; <i32> [#uses=1]
%76 = zext i32 %75 to i64 ; <i64> [#uses=1]
%77 = getelementptr i32, i32* %73, i64 %76 ; <i32*> [#uses=1]
- %78 = load i32* %77, align 1 ; <i32> [#uses=1]
- %79 = load i32** @f, align 8 ; <i32*> [#uses=1]
+ %78 = load i32, i32* %77, align 1 ; <i32> [#uses=1]
+ %79 = load i32*, i32** @f, align 8 ; <i32*> [#uses=1]
%80 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%81 = and i32 %80, 15 ; <i32> [#uses=1]
%82 = zext i32 %81 to i64 ; <i64> [#uses=1]
%83 = getelementptr i32, i32* %79, i64 %82 ; <i32*> [#uses=1]
- %84 = load i32* %83, align 1 ; <i32> [#uses=1]
+ %84 = load i32, i32* %83, align 1 ; <i32> [#uses=1]
%85 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%86 = sext i32 %85 to i64 ; <i64> [#uses=1]
%87 = getelementptr [256 x i32], [256 x i32]* @K, i64 0, i64 %86 ; <i32*> [#uses=1]
- %88 = load i32* %87, align 4 ; <i32> [#uses=1]
+ %88 = load i32, i32* %87, align 4 ; <i32> [#uses=1]
%89 = add i32 %84, %78 ; <i32> [#uses=1]
%90 = add i32 %89, %88 ; <i32> [#uses=1]
%91 = zext i32 %72 to i64 ; <i64> [#uses=1]
%92 = getelementptr i32, i32* %70, i64 %91 ; <i32*> [#uses=1]
store i32 %90, i32* %92, align 1
- %93 = load i32** @d, align 8 ; <i32*> [#uses=1]
+ %93 = load i32*, i32** @d, align 8 ; <i32*> [#uses=1]
%94 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%95 = and i32 %94, 15 ; <i32> [#uses=1]
- %96 = load i32** @e, align 8 ; <i32*> [#uses=1]
+ %96 = load i32*, i32** @e, align 8 ; <i32*> [#uses=1]
%97 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%98 = and i32 %97, 15 ; <i32> [#uses=1]
%99 = zext i32 %98 to i64 ; <i64> [#uses=1]
%100 = getelementptr i32, i32* %96, i64 %99 ; <i32*> [#uses=1]
- %101 = load i32* %100, align 1 ; <i32> [#uses=1]
- %102 = load i32** @f, align 8 ; <i32*> [#uses=1]
+ %101 = load i32, i32* %100, align 1 ; <i32> [#uses=1]
+ %102 = load i32*, i32** @f, align 8 ; <i32*> [#uses=1]
%103 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%104 = and i32 %103, 15 ; <i32> [#uses=1]
%105 = zext i32 %104 to i64 ; <i64> [#uses=1]
%106 = getelementptr i32, i32* %102, i64 %105 ; <i32*> [#uses=1]
- %107 = load i32* %106, align 1 ; <i32> [#uses=1]
+ %107 = load i32, i32* %106, align 1 ; <i32> [#uses=1]
%108 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%109 = sext i32 %108 to i64 ; <i64> [#uses=1]
%110 = getelementptr [256 x i32], [256 x i32]* @K, i64 0, i64 %109 ; <i32*> [#uses=1]
- %111 = load i32* %110, align 4 ; <i32> [#uses=1]
+ %111 = load i32, i32* %110, align 4 ; <i32> [#uses=1]
%112 = add i32 %107, %101 ; <i32> [#uses=1]
%113 = add i32 %112, %111 ; <i32> [#uses=1]
%114 = zext i32 %95 to i64 ; <i64> [#uses=1]
diff --git a/llvm/test/Transforms/IndVarSimplify/2011-09-27-hoistsext.ll b/llvm/test/Transforms/IndVarSimplify/2011-09-27-hoistsext.ll
index 9716778c981..f0765e7d4c6 100644
--- a/llvm/test/Transforms/IndVarSimplify/2011-09-27-hoistsext.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2011-09-27-hoistsext.ll
@@ -18,7 +18,7 @@ for.body:
%add174 = add nsw i32 %i2.115, %x
%idxprom177 = sext i32 %add174 to i64
%arrayidx179 = getelementptr inbounds double, double* %data, i64 %idxprom177
- %tmp180 = load double* %arrayidx179, align 8
+ %tmp180 = load double, double* %arrayidx179, align 8
%add249 = add nsw i32 %i2.115, %y
%cmp168 = icmp sgt i32 %add249, %n
br i1 %cmp168, label %exit, label %for.body
diff --git a/llvm/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll b/llvm/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll
index 2c628f64b49..a0b1e84a861 100644
--- a/llvm/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll
@@ -26,7 +26,7 @@ loop:
%p.01.us.us = phi i8* [ null, %preheader ], [ %gep, %loop ]
%s = phi i8 [0, %preheader], [%snext, %loop]
%gep = getelementptr inbounds i8, i8* %p.01.us.us, i64 1
- %snext = load i8* %gep
+ %snext = load i8, i8* %gep
%cmp = icmp ult i8* %gep, %end
br i1 %cmp, label %loop, label %exit
@@ -51,7 +51,7 @@ loop:
%p.01.us.us = phi i8* [ %buf, %preheader ], [ %gep, %loop ]
%s = phi i8 [0, %preheader], [%snext, %loop]
%gep = getelementptr inbounds i8, i8* %p.01.us.us, i64 1
- %snext = load i8* %gep
+ %snext = load i8, i8* %gep
%cmp = icmp ult i8* %gep, %end
br i1 %cmp, label %loop, label %exit
@@ -80,7 +80,7 @@ loop:
%iv = phi i32 [ 0, %preheader ], [ %ivnext, %loop ]
%s = phi i8 [0, %preheader], [%snext, %loop]
%gep = getelementptr inbounds i8, i8* %p.01.us.us, i64 1
- %snext = load i8* %gep
+ %snext = load i8, i8* %gep
%ivnext = add i32 %iv, 1
%cmp = icmp ult i32 %ivnext, %cnt
br i1 %cmp, label %loop, label %exit
@@ -110,7 +110,7 @@ loop:
%iv = phi i32 [ %bi, %preheader ], [ %ivnext, %loop ]
%s = phi i8 [0, %preheader], [%snext, %loop]
%gep = getelementptr inbounds i8, i8* %p.01.us.us, i64 1
- %snext = load i8* %gep
+ %snext = load i8, i8* %gep
%ivnext = add i32 %iv, 1
%cmp = icmp ult i32 %ivnext, %cnt
br i1 %cmp, label %loop, label %exit
diff --git a/llvm/test/Transforms/IndVarSimplify/2011-11-15-multiexit.ll b/llvm/test/Transforms/IndVarSimplify/2011-11-15-multiexit.ll
index 2683e906ccd..65b2cf68458 100644
--- a/llvm/test/Transforms/IndVarSimplify/2011-11-15-multiexit.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2011-11-15-multiexit.ll
@@ -35,6 +35,6 @@ early.exit:
if.end:
%tmp = phi i32* [ %first.lcssa, %early.exit ], [ %first, %if.then ], [ %first, %entry ], [ undef, %if.else ]
- %val = load i32* %tmp
+ %val = load i32, i32* %tmp
ret i32 %val
}
diff --git a/llvm/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll b/llvm/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll
index 2c738de32e0..1d80e751dca 100644
--- a/llvm/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll
+++ b/llvm/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll
@@ -13,9 +13,9 @@ entry:
br i1 undef, label %for.end12, label %for.cond.preheader
for.cond.preheader: ; preds = %entry
- %0 = load i32*** @c, align 8
- %1 = load i32** %0, align 8
- %2 = load i32* %1, align 4
+ %0 = load i32**, i32*** @c, align 8
+ %1 = load i32*, i32** %0, align 8
+ %2 = load i32, i32* %1, align 4
br label %for.body
for.body: ; preds = %for.cond.backedge, %for.body9.us, %for.cond.preheader
@@ -26,7 +26,7 @@ for.body: ; preds = %for.cond.backedge,
br i1 %tobool1, label %if.end, label %for.cond.backedge
if.end: ; preds = %for.body
- %5 = load i32* %3, align 4
+ %5 = load i32, i32* %3, align 4
%tobool4 = icmp eq i32 %5, 0
br i1 %tobool4, label %for.cond3, label %for.body9.preheader
@@ -35,8 +35,8 @@ for.body9.preheader: ; preds = %if.end
br i1 %tobool8, label %for.body9.us, label %for.body9
for.body9.us: ; preds = %for.body9.preheader
- %6 = load i32** undef, align 8
- %7 = load i32* %6, align 4
+ %6 = load i32*, i32** undef, align 8
+ %7 = load i32, i32* %6, align 4
br label %for.body
for.cond3: ; preds = %for.cond3, %if.end
diff --git a/llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll b/llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll
index 04446a1ecf4..5f337301b40 100644
--- a/llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll
+++ b/llvm/test/Transforms/IndVarSimplify/ashr-tripcount.ll
@@ -31,8 +31,8 @@ bb2: ; preds = %bb1, %entry
%.pn2 = zext i32 %.pn2.in to i64 ; <i64> [#uses=1]
%.pn.in = getelementptr [0 x float], [0 x float]* %pow_2_tab.pn, i64 0, i64 %.pn3 ; <float*> [#uses=1]
%.pn1.in = getelementptr [0 x float], [0 x float]* %pow_2_025_tab.pn, i64 0, i64 %.pn2 ; <float*> [#uses=1]
- %.pn = load float* %.pn.in ; <float> [#uses=1]
- %.pn1 = load float* %.pn1.in ; <float> [#uses=1]
+ %.pn = load float, float* %.pn.in ; <float> [#uses=1]
+ %.pn1 = load float, float* %.pn1.in ; <float> [#uses=1]
%invQuantizer.0 = fmul float %.pn, %.pn1 ; <float> [#uses=4]
%t3 = ashr i32 %noOfLines, 2 ; <i32> [#uses=1]
%t4 = icmp sgt i32 %t3, 0 ; <i1> [#uses=1]
@@ -47,31 +47,31 @@ bb3: ; preds = %bb4, %bb.nph
%k.04 = phi i32 [ %t48, %bb4 ], [ 0, %bb.nph ] ; <i32> [#uses=1]
%t6 = sext i32 %i.05 to i64 ; <i64> [#uses=1]
%t7 = getelementptr i32, i32* %quaSpectrum, i64 %t6 ; <i32*> [#uses=1]
- %t8 = load i32* %t7, align 4 ; <i32> [#uses=1]
+ %t8 = load i32, i32* %t7, align 4 ; <i32> [#uses=1]
%t9 = zext i32 %t8 to i64 ; <i64> [#uses=1]
%t10 = getelementptr float, float* %pow4_3_tab_ptr, i64 %t9 ; <float*> [#uses=1]
- %t11 = load float* %t10, align 4 ; <float> [#uses=1]
+ %t11 = load float, float* %t10, align 4 ; <float> [#uses=1]
%t12 = or i32 %i.05, 1 ; <i32> [#uses=1]
%t13 = sext i32 %t12 to i64 ; <i64> [#uses=1]
%t14 = getelementptr i32, i32* %quaSpectrum, i64 %t13 ; <i32*> [#uses=1]
- %t15 = load i32* %t14, align 4 ; <i32> [#uses=1]
+ %t15 = load i32, i32* %t14, align 4 ; <i32> [#uses=1]
%t16 = zext i32 %t15 to i64 ; <i64> [#uses=1]
%t17 = getelementptr float, float* %pow4_3_tab_ptr, i64 %t16 ; <float*> [#uses=1]
- %t18 = load float* %t17, align 4 ; <float> [#uses=1]
+ %t18 = load float, float* %t17, align 4 ; <float> [#uses=1]
%t19 = or i32 %i.05, 2 ; <i32> [#uses=1]
%t20 = sext i32 %t19 to i64 ; <i64> [#uses=1]
%t21 = getelementptr i32, i32* %quaSpectrum, i64 %t20 ; <i32*> [#uses=1]
- %t22 = load i32* %t21, align 4 ; <i32> [#uses=1]
+ %t22 = load i32, i32* %t21, align 4 ; <i32> [#uses=1]
%t23 = zext i32 %t22 to i64 ; <i64> [#uses=1]
%t24 = getelementptr float, float* %pow4_3_tab_ptr, i64 %t23 ; <float*> [#uses=1]
- %t25 = load float* %t24, align 4 ; <float> [#uses=1]
+ %t25 = load float, float* %t24, align 4 ; <float> [#uses=1]
%t26 = or i32 %i.05, 3 ; <i32> [#uses=1]
%t27 = sext i32 %t26 to i64 ; <i64> [#uses=1]
%t28 = getelementptr i32, i32* %quaSpectrum, i64 %t27 ; <i32*> [#uses=1]
- %t29 = load i32* %t28, align 4 ; <i32> [#uses=1]
+ %t29 = load i32, i32* %t28, align 4 ; <i32> [#uses=1]
%t30 = zext i32 %t29 to i64 ; <i64> [#uses=1]
%t31 = getelementptr float, float* %pow4_3_tab_ptr, i64 %t30 ; <float*> [#uses=1]
- %t32 = load float* %t31, align 4 ; <float> [#uses=1]
+ %t32 = load float, float* %t31, align 4 ; <float> [#uses=1]
%t33 = fmul float %t11, %invQuantizer.0 ; <float> [#uses=1]
%t34 = sext i32 %i.05 to i64 ; <i64> [#uses=1]
%t35 = getelementptr float, float* %iquaSpectrum, i64 %t34 ; <float*> [#uses=1]
diff --git a/llvm/test/Transforms/IndVarSimplify/avoid-i0.ll b/llvm/test/Transforms/IndVarSimplify/avoid-i0.ll
index 22f2e4b718c..cc38590c099 100644
--- a/llvm/test/Transforms/IndVarSimplify/avoid-i0.ll
+++ b/llvm/test/Transforms/IndVarSimplify/avoid-i0.ll
@@ -34,25 +34,25 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %_si1, i32* %_si1_addr
store i8 %_si2, i8* %_si2_addr
- %1 = load i8* %_si2_addr, align 1 ; <i8> [#uses=1]
+ %1 = load i8, i8* %_si2_addr, align 1 ; <i8> [#uses=1]
%2 = sext i8 %1 to i32 ; <i32> [#uses=1]
- %3 = load i32* %_si1_addr, align 4 ; <i32> [#uses=1]
+ %3 = load i32, i32* %_si1_addr, align 4 ; <i32> [#uses=1]
%4 = xor i32 %2, %3 ; <i32> [#uses=1]
- %5 = load i8* %_si2_addr, align 1 ; <i8> [#uses=1]
+ %5 = load i8, i8* %_si2_addr, align 1 ; <i8> [#uses=1]
%6 = sext i8 %5 to i32 ; <i32> [#uses=1]
%7 = sub i32 7, %6 ; <i32> [#uses=1]
- %8 = load i32* %_si1_addr, align 4 ; <i32> [#uses=1]
+ %8 = load i32, i32* %_si1_addr, align 4 ; <i32> [#uses=1]
%9 = shl i32 %8, %7 ; <i32> [#uses=1]
%10 = and i32 %4, %9 ; <i32> [#uses=1]
%11 = icmp slt i32 %10, 0 ; <i1> [#uses=1]
%12 = zext i1 %11 to i32 ; <i32> [#uses=1]
store i32 %12, i32* %0, align 4
- %13 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %13 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %13, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
%retval12 = trunc i32 %retval1 to i8 ; <i8> [#uses=1]
ret i8 %retval12
}
@@ -66,15 +66,15 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %_ui1, i32* %_ui1_addr
store i32 %_ui2, i32* %_ui2_addr
- %1 = load i32* %_ui1_addr, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %_ui1_addr, align 4 ; <i32> [#uses=1]
%2 = sub i32 %1, 1 ; <i32> [#uses=1]
store i32 %2, i32* %0, align 4
- %3 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %3 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %3, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
@@ -90,31 +90,31 @@ entry:
br label %bb4
bb: ; preds = %bb4
- %0 = load volatile i32* @x, align 4 ; <i32> [#uses=1]
+ %0 = load volatile i32, i32* @x, align 4 ; <i32> [#uses=1]
store i32 %0, i32* %vol.0, align 4
store i32 0, i32* %l_52, align 4
br label %bb2
bb1: ; preds = %bb2
- %1 = load i32* %l_52, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %l_52, align 4 ; <i32> [#uses=1]
%2 = call i32 @safe_sub_func_uint64_t_u_u(i32 %1, i32 1) nounwind ; <i32> [#uses=1]
store i32 %2, i32* %l_52, align 4
br label %bb2
bb2: ; preds = %bb1, %bb
- %3 = load i32* %l_52, align 4 ; <i32> [#uses=1]
+ %3 = load i32, i32* %l_52, align 4 ; <i32> [#uses=1]
%4 = icmp eq i32 %3, 0 ; <i1> [#uses=1]
br i1 %4, label %bb1, label %bb3
bb3: ; preds = %bb2
- %5 = load i32* %l_52, align 4 ; <i32> [#uses=1]
+ %5 = load i32, i32* %l_52, align 4 ; <i32> [#uses=1]
%6 = call signext i8 @safe_sub_func_int32_t_s_s(i32 %5, i8 signext 1) nounwind ; <i8> [#uses=1]
%7 = sext i8 %6 to i32 ; <i32> [#uses=1]
store i32 %7, i32* %l_52, align 4
br label %bb4
bb4: ; preds = %bb3, %entry
- %8 = load i32* %l_52, align 4 ; <i32> [#uses=1]
+ %8 = load i32, i32* %l_52, align 4 ; <i32> [#uses=1]
%9 = icmp ne i32 %8, 0 ; <i1> [#uses=1]
br i1 %9, label %bb, label %bb5
diff --git a/llvm/test/Transforms/IndVarSimplify/eliminate-comparison.ll b/llvm/test/Transforms/IndVarSimplify/eliminate-comparison.ll
index b3655c70bc3..4d14b3681c5 100644
--- a/llvm/test/Transforms/IndVarSimplify/eliminate-comparison.ll
+++ b/llvm/test/Transforms/IndVarSimplify/eliminate-comparison.ll
@@ -16,7 +16,7 @@ entry:
br i1 %cmp9, label %pre, label %return
pre:
- %t3 = load i32* %p
+ %t3 = load i32, i32* %p
%tobool.not = icmp ne i32 %t3, 0
br label %loop
@@ -50,22 +50,22 @@ entry:
br label %bb18
bb13:
- %tmp66 = load i64** %tmp65, align 4
+ %tmp66 = load i64*, i64** %tmp65, align 4
%tmp68 = getelementptr inbounds i64, i64* %tmp66, i32 %i
- %tmp69 = load i64* %tmp68, align 4
- %tmp74 = load i64** %tmp73, align 4
+ %tmp69 = load i64, i64* %tmp68, align 4
+ %tmp74 = load i64*, i64** %tmp73, align 4
%tmp76 = getelementptr inbounds i64, i64* %tmp74, i32 %i
- %tmp77 = load i64* %tmp76, align 4
+ %tmp77 = load i64, i64* %tmp76, align 4
%tmp78 = icmp ugt i64 %tmp69, %tmp77
br i1 %tmp78, label %bb20.loopexit, label %bb15
bb15:
- %tmp83 = load i64** %tmp82, align 4
+ %tmp83 = load i64*, i64** %tmp82, align 4
%tmp85 = getelementptr inbounds i64, i64* %tmp83, i32 %i
- %tmp86 = load i64* %tmp85, align 4
- %tmp91 = load i64** %tmp90, align 4
+ %tmp86 = load i64, i64* %tmp85, align 4
+ %tmp91 = load i64*, i64** %tmp90, align 4
%tmp93 = getelementptr inbounds i64, i64* %tmp91, i32 %i
- %tmp94 = load i64* %tmp93, align 4
+ %tmp94 = load i64, i64* %tmp93, align 4
%tmp95 = icmp ult i64 %tmp86, %tmp94
br i1 %tmp95, label %bb20.loopexit, label %bb17
diff --git a/llvm/test/Transforms/IndVarSimplify/eliminate-rem.ll b/llvm/test/Transforms/IndVarSimplify/eliminate-rem.ll
index d10ff1c0b6f..6f8e6bbac77 100644
--- a/llvm/test/Transforms/IndVarSimplify/eliminate-rem.ll
+++ b/llvm/test/Transforms/IndVarSimplify/eliminate-rem.ll
@@ -82,12 +82,12 @@ bb21: ; preds = %bb21, %bb20
%t30 = add nsw i64 %t27, %t22 ; <i64> [#uses=1]
%t31 = getelementptr inbounds i64, i64* %arg, i64 %t28 ; <i64*> [#uses=1]
%t32 = zext i32 %t23 to i64 ; <i64> [#uses=1]
- %t33 = load i64* %t29 ; <i64> [#uses=1]
+ %t33 = load i64, i64* %t29 ; <i64> [#uses=1]
%t34 = getelementptr inbounds i64, i64* %arg, i64 %t30 ; <i64*> [#uses=1]
- %t35 = load i64* %t31 ; <i64> [#uses=1]
+ %t35 = load i64, i64* %t31 ; <i64> [#uses=1]
%t36 = add nsw i64 %t32, %t33 ; <i64> [#uses=1]
%t37 = add nsw i64 %t36, %t35 ; <i64> [#uses=1]
- %t38 = load i64* %t34 ; <i64> [#uses=1]
+ %t38 = load i64, i64* %t34 ; <i64> [#uses=1]
%t39 = add nsw i64 %t37, %t38 ; <i64> [#uses=1]
%t40 = trunc i64 %t39 to i32 ; <i32> [#uses=2]
%t41 = add nsw i64 %t22, 1 ; <i64> [#uses=2]
diff --git a/llvm/test/Transforms/IndVarSimplify/indirectbr.ll b/llvm/test/Transforms/IndVarSimplify/indirectbr.ll
index 3f973a89cb2..d5801690d5e 100644
--- a/llvm/test/Transforms/IndVarSimplify/indirectbr.ll
+++ b/llvm/test/Transforms/IndVarSimplify/indirectbr.ll
@@ -33,7 +33,7 @@ bb16: ; preds = %bb16, %bb14, %bb7.p
%S.31.0 = phi i64 [ %3, %bb16 ], [ 1, %bb7.preheader ], [ 1, %bb14 ] ; <i64> [#uses=2]
%0 = add nsw i64 %S.31.0, -1 ; <i64> [#uses=1]
%1 = getelementptr inbounds [3 x double], [3 x double]* undef, i64 0, i64 %0 ; <double*> [#uses=1]
- %2 = load double* %1, align 8 ; <double> [#uses=0]
+ %2 = load double, double* %1, align 8 ; <double> [#uses=0]
%3 = add nsw i64 %S.31.0, 1 ; <i64> [#uses=1]
br label %bb16
}
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-fold.ll b/llvm/test/Transforms/IndVarSimplify/iv-fold.ll
index 26a51ce7823..af8a33b30ea 100644
--- a/llvm/test/Transforms/IndVarSimplify/iv-fold.ll
+++ b/llvm/test/Transforms/IndVarSimplify/iv-fold.ll
@@ -14,11 +14,11 @@ while.body:
%0 = phi i32 [ 0, %entry ], [ %inc.2, %while.body ]
%shr = lshr i32 %0, 5
%arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
- %tmp6 = load i32* %arrayidx, align 4
+ %tmp6 = load i32, i32* %arrayidx, align 4
%inc.1 = add i32 %0, 1
%shr.1 = lshr i32 %inc.1, 5
%arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1
- %tmp6.1 = load i32* %arrayidx.1, align 4
+ %tmp6.1 = load i32, i32* %arrayidx.1, align 4
%inc.2 = add i32 %inc.1, 1
%exitcond.3 = icmp eq i32 %inc.2, 128
br i1 %exitcond.3, label %while.end, label %while.body
@@ -41,11 +41,11 @@ while.body:
%0 = phi i32 [ 0, %entry ], [ %inc.3, %while.body ]
%shr = lshr i32 %0, 5
%arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
- %tmp6 = load i32* %arrayidx, align 4
+ %tmp6 = load i32, i32* %arrayidx, align 4
%inc.1 = add i32 %0, 1
%shr.1 = lshr i32 %inc.1, 5
%arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1
- %tmp6.1 = load i32* %arrayidx.1, align 4
+ %tmp6.1 = load i32, i32* %arrayidx.1, align 4
%inc.3 = add i32 %inc.1, 2
%exitcond.3 = icmp eq i32 %inc.3, 96
br i1 %exitcond.3, label %while.end, label %while.body
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-sext.ll b/llvm/test/Transforms/IndVarSimplify/iv-sext.ll
index 64062c327c4..89e21e1b269 100644
--- a/llvm/test/Transforms/IndVarSimplify/iv-sext.ll
+++ b/llvm/test/Transforms/IndVarSimplify/iv-sext.ll
@@ -8,7 +8,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define void @t(float* %pTmp1, float* %peakWeight, float* %nrgReducePeakrate, i32 %bandEdgeIndex, float %tmp1) nounwind {
entry:
- %tmp = load float* %peakWeight, align 4 ; <float> [#uses=1]
+ %tmp = load float, float* %peakWeight, align 4 ; <float> [#uses=1]
%tmp2 = icmp sgt i32 %bandEdgeIndex, 0 ; <i1> [#uses=1]
br i1 %tmp2, label %bb.nph22, label %return
@@ -34,12 +34,12 @@ bb1: ; preds = %bb
%tmp5 = add i32 %part.016, -1 ; <i32> [#uses=1]
%tmp6 = sext i32 %tmp5 to i64 ; <i64> [#uses=1]
%tmp7 = getelementptr float, float* %pTmp1, i64 %tmp6 ; <float*> [#uses=1]
- %tmp8 = load float* %tmp7, align 4 ; <float> [#uses=1]
+ %tmp8 = load float, float* %tmp7, align 4 ; <float> [#uses=1]
%tmp9 = fadd float %tmp8, %distERBlo.120 ; <float> [#uses=1]
%tmp10 = add i32 %part.016, -1 ; <i32> [#uses=1]
%tmp11 = sext i32 %tmp10 to i64 ; <i64> [#uses=1]
%tmp12 = getelementptr float, float* %pTmp1, i64 %tmp11 ; <float*> [#uses=1]
- %tmp13 = load float* %tmp12, align 4 ; <float> [#uses=1]
+ %tmp13 = load float, float* %tmp12, align 4 ; <float> [#uses=1]
%tmp14 = fsub float %distERBhi.121, %tmp13 ; <float> [#uses=1]
br label %bb3.preheader
@@ -58,11 +58,11 @@ bb2: ; preds = %bb3, %bb.nph
%peakCount.01 = phi float [ %tmp23, %bb3 ], [ %peakCount.117, %bb.nph ] ; <float> [#uses=1]
%tmp16 = sext i32 %loPart.02 to i64 ; <i64> [#uses=1]
%tmp17 = getelementptr float, float* %pTmp1, i64 %tmp16 ; <float*> [#uses=1]
- %tmp18 = load float* %tmp17, align 4 ; <float> [#uses=1]
+ %tmp18 = load float, float* %tmp17, align 4 ; <float> [#uses=1]
%tmp19 = fsub float %distERBlo.03, %tmp18 ; <float> [#uses=3]
%tmp20 = sext i32 %loPart.02 to i64 ; <i64> [#uses=1]
%tmp21 = getelementptr float, float* %peakWeight, i64 %tmp20 ; <float*> [#uses=1]
- %tmp22 = load float* %tmp21, align 4 ; <float> [#uses=1]
+ %tmp22 = load float, float* %tmp21, align 4 ; <float> [#uses=1]
%tmp23 = fsub float %peakCount.01, %tmp22 ; <float> [#uses=2]
%tmp24 = add i32 %loPart.02, 1 ; <i32> [#uses=2]
br label %bb3
@@ -98,12 +98,12 @@ bb4: ; preds = %bb5, %bb.nph12
%peakCount.27 = phi float [ %tmp35, %bb5 ], [ %peakCount.0.lcssa, %bb.nph12 ] ; <float> [#uses=1]
%tmp27 = sext i32 %hiPart.08 to i64 ; <i64> [#uses=1]
%tmp28 = getelementptr float, float* %pTmp1, i64 %tmp27 ; <float*> [#uses=1]
- %tmp29 = load float* %tmp28, align 4 ; <float> [#uses=1]
+ %tmp29 = load float, float* %tmp28, align 4 ; <float> [#uses=1]
%tmp30 = fadd float %tmp29, %distERBhi.29 ; <float> [#uses=3]
%tmp31 = add i32 %hiPart.08, 1 ; <i32> [#uses=4]
%tmp32 = sext i32 %tmp31 to i64 ; <i64> [#uses=1]
%tmp33 = getelementptr float, float* %peakWeight, i64 %tmp32 ; <float*> [#uses=1]
- %tmp34 = load float* %tmp33, align 4 ; <float> [#uses=1]
+ %tmp34 = load float, float* %tmp33, align 4 ; <float> [#uses=1]
%tmp35 = fadd float %tmp34, %peakCount.27 ; <float> [#uses=2]
br label %bb5
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-widen.ll b/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
index 497f9f90991..2b69cb151d2 100644
--- a/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
+++ b/llvm/test/Transforms/IndVarSimplify/iv-widen.ll
@@ -21,7 +21,7 @@ B18: ; preds = %B24, %Prologue
%tmp23 = zext i32 %.02 to i64
%tmp33 = add i32 %.02, 1
%o = getelementptr i32, i32* %a, i32 %.02
- %v = load i32* %o
+ %v = load i32, i32* %o
%t = icmp eq i32 %v, 0
br i1 %t, label %exit24, label %B24
diff --git a/llvm/test/Transforms/IndVarSimplify/iv-zext.ll b/llvm/test/Transforms/IndVarSimplify/iv-zext.ll
index 88d6fa2283c..629a85efe8e 100644
--- a/llvm/test/Transforms/IndVarSimplify/iv-zext.ll
+++ b/llvm/test/Transforms/IndVarSimplify/iv-zext.ll
@@ -12,16 +12,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
diff --git a/llvm/test/Transforms/IndVarSimplify/lftr-promote.ll b/llvm/test/Transforms/IndVarSimplify/lftr-promote.ll
index 8f82fbd9710..4fe3191391b 100644
--- a/llvm/test/Transforms/IndVarSimplify/lftr-promote.ll
+++ b/llvm/test/Transforms/IndVarSimplify/lftr-promote.ll
@@ -18,7 +18,7 @@ bb2: ; preds = %bb3, %bb.nph
%i.01 = phi i32 [ %7, %bb3 ], [ 0, %bb.nph ] ; <i32> [#uses=3]
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
%2 = getelementptr double, double* %p, i64 %1 ; <double*> [#uses=1]
- %3 = load double* %2, align 8 ; <double> [#uses=1]
+ %3 = load double, double* %2, align 8 ; <double> [#uses=1]
%4 = fmul double %3, 1.100000e+00 ; <double> [#uses=1]
%5 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
%6 = getelementptr double, double* %p, i64 %5 ; <double*> [#uses=1]
diff --git a/llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll b/llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll
index fa46250414a..befbb9e3f15 100644
--- a/llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll
+++ b/llvm/test/Transforms/IndVarSimplify/lftr-reuse.ll
@@ -109,10 +109,10 @@ loop:
%diagidx = add nsw i32 %rowidx, %i
%diagidxw = sext i32 %diagidx to i64
%matrixp = getelementptr inbounds [0 x double], [0 x double]* %matrix, i32 0, i64 %diagidxw
- %v1 = load double* %matrixp
+ %v1 = load double, double* %matrixp
%iw = sext i32 %i to i64
%vectorp = getelementptr inbounds [0 x double], [0 x double]* %vector, i32 0, i64 %iw
- %v2 = load double* %vectorp
+ %v2 = load double, double* %vectorp
%row.inc = add nsw i32 %rowidx, %ilead
%i.inc = add nsw i32 %i, 1
%cmp196 = icmp slt i32 %i.inc, %irow
@@ -143,10 +143,10 @@ loop:
%diagidx = add nsw i32 %rowidx, %i
%diagidxw = sext i32 %diagidx to i64
%matrixp = getelementptr inbounds [0 x double], [0 x double]* %matrix, i32 0, i64 %diagidxw
- %v1 = load double* %matrixp
+ %v1 = load double, double* %matrixp
%iw = sext i32 %i to i64
%vectorp = getelementptr inbounds [0 x double], [0 x double]* %vector, i32 0, i64 %iw
- %v2 = load double* %vectorp
+ %v2 = load double, double* %vectorp
%row.inc = add nsw i32 %rowidx, %ilead
%i.inc = add nsw i32 %i, 1
%cmp196 = icmp slt i32 %i.inc, %irow
diff --git a/llvm/test/Transforms/IndVarSimplify/lftr-zext.ll b/llvm/test/Transforms/IndVarSimplify/lftr-zext.ll
index a5dda92c774..f5641bcbbb6 100644
--- a/llvm/test/Transforms/IndVarSimplify/lftr-zext.ll
+++ b/llvm/test/Transforms/IndVarSimplify/lftr-zext.ll
@@ -14,7 +14,7 @@ define void @foo(i8* %a) nounwind uwtable ssp {
%p.0 = phi i8* [ getelementptr inbounds ([240 x i8]* @data, i64 0, i64 0), %0 ], [ %4, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
%2 = getelementptr inbounds i8, i8* %.0, i64 1
- %3 = load i8* %.0, align 1
+ %3 = load i8, i8* %.0, align 1
%4 = getelementptr inbounds i8, i8* %p.0, i64 1
store i8 %3, i8* %p.0, align 1
%5 = add i8 %i.0, 1
diff --git a/llvm/test/Transforms/IndVarSimplify/loop_evaluate7.ll b/llvm/test/Transforms/IndVarSimplify/loop_evaluate7.ll
index 8f86d7b5962..333ab7a4967 100644
--- a/llvm/test/Transforms/IndVarSimplify/loop_evaluate7.ll
+++ b/llvm/test/Transforms/IndVarSimplify/loop_evaluate7.ll
@@ -21,7 +21,7 @@ bb19: ; preds = %bb30
br i1 undef, label %bb20, label %bb29
bb20: ; preds = %bb19
- %0 = load i32* undef, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* undef, align 4 ; <i32> [#uses=1]
%1 = sub i32 %0, %n ; <i32> [#uses=1]
br label %bb23
diff --git a/llvm/test/Transforms/IndVarSimplify/loop_evaluate8.ll b/llvm/test/Transforms/IndVarSimplify/loop_evaluate8.ll
index 0d18ddf9e1a..28d05dff2ff 100644
--- a/llvm/test/Transforms/IndVarSimplify/loop_evaluate8.ll
+++ b/llvm/test/Transforms/IndVarSimplify/loop_evaluate8.ll
@@ -23,7 +23,7 @@ bb19: ; preds = %bb30
br i1 undef, label %bb20, label %bb29
bb20: ; preds = %bb19
- %0 = load i32* undef, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* undef, align 4 ; <i32> [#uses=1]
%1 = sub i32 %0, undef ; <i32> [#uses=1]
br label %bb23
diff --git a/llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll b/llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
index 8d8a451421b..ca7b8991166 100644
--- a/llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
+++ b/llvm/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
@@ -29,7 +29,7 @@ loop:
%s.01 = phi i32 [ 0, %ph ], [ %sinc, %loop ]
%ofs = sext i32 %i.02 to i64
%adr = getelementptr inbounds i32, i32* %arr, i64 %ofs
- %val = load i32* %adr
+ %val = load i32, i32* %adr
%sinc = add nsw i32 %s.01, %val
%iinc = add nsw i32 %i.02, 1
%cond = icmp slt i32 %iinc, %n
@@ -70,7 +70,7 @@ loop:
%s.01 = phi i64 [ 0, %ph ], [ %sinc, %loop ]
%ofs = sext i32 %i.02 to i64
%adr = getelementptr inbounds i32, i32* %arr, i64 %ofs
- %val = load i32* %adr
+ %val = load i32, i32* %adr
%vall = sext i32 %val to i64
%sinc = add nsw i64 %s.01, %vall
%iinc = add nsw i32 %i.02, 1
@@ -171,7 +171,7 @@ loop:
%max = phi i32 [ 0, %entry ], [ %max.next, %loop.inc ]
%idxprom = sext i32 %idx to i64
%adr = getelementptr inbounds i32, i32* %base, i64 %idxprom
- %val = load i32* %adr
+ %val = load i32, i32* %adr
%cmp19 = icmp sgt i32 %val, %max
br i1 %cmp19, label %if.then, label %if.else
@@ -240,7 +240,7 @@ loop:
%iv = phi i32 [ 0, %entry], [ %iv.next, %loop ]
%t1 = sext i32 %iv to i64
%adr = getelementptr i64, i64* %base, i64 %t1
- %val = load i64* %adr
+ %val = load i64, i64* %adr
%t2 = or i32 %iv, 1
%t3 = sext i32 %t2 to i64
%iv.next = add i32 %iv, 2
diff --git a/llvm/test/Transforms/IndVarSimplify/overflowcheck.ll b/llvm/test/Transforms/IndVarSimplify/overflowcheck.ll
index 0ced0403f29..c3c033dfaec 100644
--- a/llvm/test/Transforms/IndVarSimplify/overflowcheck.ll
+++ b/llvm/test/Transforms/IndVarSimplify/overflowcheck.ll
@@ -28,7 +28,7 @@ loop1:
%zxt = zext i32 %i to i64
%ofs = shl nuw nsw i64 %zxt, 3
%gep = getelementptr i64, i64* %a, i64 %zxt
- %v = load i64* %gep, align 8
+ %v = load i64, i64* %gep, align 8
%truncv = trunc i64 %v to i32
%adds = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %s, i32 %truncv)
%ovflows = extractvalue { i32, i1 } %adds, 1
diff --git a/llvm/test/Transforms/IndVarSimplify/phi-uses-value-multiple-times.ll b/llvm/test/Transforms/IndVarSimplify/phi-uses-value-multiple-times.ll
index dc36b994825..519d34d76cb 100644
--- a/llvm/test/Transforms/IndVarSimplify/phi-uses-value-multiple-times.ll
+++ b/llvm/test/Transforms/IndVarSimplify/phi-uses-value-multiple-times.ll
@@ -13,7 +13,7 @@ entry:
br label %bb38.i
bb14.i27:
- %t0 = load i64* @ue, align 8
+ %t0 = load i64, i64* @ue, align 8
%t1 = sub i64 %t0, %i.0.i35
%t2 = add i64 %t1, 1
br i1 undef, label %bb15.i28, label %bb19.i31
diff --git a/llvm/test/Transforms/IndVarSimplify/polynomial-expand.ll b/llvm/test/Transforms/IndVarSimplify/polynomial-expand.ll
index 6128c12d571..5708c64d6c8 100644
--- a/llvm/test/Transforms/IndVarSimplify/polynomial-expand.ll
+++ b/llvm/test/Transforms/IndVarSimplify/polynomial-expand.ll
@@ -21,7 +21,7 @@ bb30: ; preds = %bb30, %bb24
%tmp32 = phi i32 [ %tmp37, %bb30 ], [ %tmp27, %bb24 ] ; <i32> [#uses=2]
%tmp33 = sext i32 %tmp32 to i64 ; <i64> [#uses=1]
%tmp35 = getelementptr float, float* %tmp4, i64 %tmp33 ; <%0*> [#uses=1]
- %tmp36 = load float* %tmp35, align 4 ; <%0> [#uses=0]
+ %tmp36 = load float, float* %tmp35, align 4 ; <%0> [#uses=0]
%tmp37 = add nsw i32 %tmp32, -1 ; <i32> [#uses=1]
%tmp39 = add nsw i32 %tmp31, -1 ; <i32> [#uses=1]
%tmp38 = icmp eq i32 %tmp31, 1 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/IndVarSimplify/pr18223.ll b/llvm/test/Transforms/IndVarSimplify/pr18223.ll
index 738f75c0fe0..f922aa424a1 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr18223.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr18223.ll
@@ -8,7 +8,7 @@
define i32 @main() #0 {
entry:
- %0 = load i32* @c, align 4
+ %0 = load i32, i32* @c, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.body, label %exit
diff --git a/llvm/test/Transforms/IndVarSimplify/pr20680.ll b/llvm/test/Transforms/IndVarSimplify/pr20680.ll
index 716e013603a..0713f31783a 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr20680.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr20680.ll
@@ -13,9 +13,9 @@ define void @f() {
; CHECK-NEXT: %[[indvars_iv:.*]] = phi i32 [ %[[indvars_iv_next:.*]], %[[for_inc13:.*]] ], [ -14, %entry ]
; br i1 {{.*}}, label %[[for_inc13]], label %
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%tobool2 = icmp eq i32 %0, 0
- %1 = load i32* @a, align 4
+ %1 = load i32, i32* @a, align 4
%tobool = icmp eq i32 %1, 0
br label %for.cond2.preheader
@@ -51,7 +51,7 @@ cond.false.us.us: ; preds = %for.body3.us.us
cond.end.us.us: ; preds = %cond.false.us.us, %for.body3.us.us
%cond.us.us = phi i32 [ %div, %cond.false.us.us ], [ %conv7, %for.body3.us.us ]
- %4 = load i32* @b, align 4
+ %4 = load i32, i32* @b, align 4
%cmp91.us.us = icmp slt i32 %4, 1
br i1 %cmp91.us.us, label %for.inc.lr.ph.us.us, label %for.cond2.loopexit.us.us
@@ -87,7 +87,7 @@ cond.false.us: ; preds = %for.body3.us
cond.end.us: ; preds = %cond.false.us, %for.body3.us
%cond.us = phi i32 [ %div, %cond.false.us ], [ %conv7, %for.body3.us ]
- %6 = load i32* @b, align 4
+ %6 = load i32, i32* @b, align 4
%cmp91.us = icmp slt i32 %6, 1
br i1 %cmp91.us, label %for.inc.lr.ph.us, label %for.cond2.loopexit.us
@@ -133,7 +133,7 @@ cond.false.us4: ; preds = %for.body3.us3
cond.end.us5: ; preds = %cond.false.us4, %for.body3.us3
%cond.us6 = phi i32 [ %div, %cond.false.us4 ], [ %conv7, %for.body3.us3 ]
- %8 = load i32* @b, align 4
+ %8 = load i32, i32* @b, align 4
%cmp91.us7 = icmp slt i32 %8, 1
br i1 %cmp91.us7, label %for.inc.lr.ph.us12, label %for.cond2.loopexit.us11
@@ -177,7 +177,7 @@ cond.false: ; preds = %for.body3
cond.end: ; preds = %cond.false, %for.body3
%cond = phi i32 [ %div, %cond.false ], [ %conv7, %for.body3 ]
- %10 = load i32* @b, align 4
+ %10 = load i32, i32* @b, align 4
%cmp91 = icmp slt i32 %10, 1
br i1 %cmp91, label %for.inc.lr.ph, label %for.cond2.loopexit
diff --git a/llvm/test/Transforms/IndVarSimplify/pr22222.ll b/llvm/test/Transforms/IndVarSimplify/pr22222.ll
index ccdfe538dfa..d1f04906ae4 100644
--- a/llvm/test/Transforms/IndVarSimplify/pr22222.ll
+++ b/llvm/test/Transforms/IndVarSimplify/pr22222.ll
@@ -9,7 +9,7 @@ declare void @abort() #1
; Function Attrs: nounwind ssp uwtable
define i32 @main() {
entry:
- %a.promoted13 = load i32* @a, align 4
+ %a.promoted13 = load i32, i32* @a, align 4
br label %for.cond1.preheader
for.cond1.preheader: ; preds = %entry, %for.end
diff --git a/llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll b/llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll
index 8396bf5740c..b6765011336 100644
--- a/llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll
+++ b/llvm/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll
@@ -14,7 +14,7 @@ bb: ; preds = %bb1, %bb.nph
%n.01 = phi i32 [ %t6, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=2]
%t1 = sext i32 %n.01 to i64 ; <i64> [#uses=1]
%t2 = getelementptr i64, i64* %first, i64 %t1 ; <i64*> [#uses=1]
- %t3 = load i64* %t2, align 8 ; <i64> [#uses=1]
+ %t3 = load i64, i64* %t2, align 8 ; <i64> [#uses=1]
%t4 = lshr i64 %t3, 4 ; <i64> [#uses=1]
%t5 = add i64 %t4, %result.02 ; <i64> [#uses=2]
%t6 = add i32 %n.01, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/Transforms/IndVarSimplify/sharpen-range.ll b/llvm/test/Transforms/IndVarSimplify/sharpen-range.ll
index cb090f0d490..c103da9cec7 100644
--- a/llvm/test/Transforms/IndVarSimplify/sharpen-range.ll
+++ b/llvm/test/Transforms/IndVarSimplify/sharpen-range.ll
@@ -8,7 +8,7 @@ declare void @abort()
define i1 @bounded_below_slt(i32* nocapture readonly %buffer) {
; CHECK-LABEL: bounded_below_slt
entry:
- %length = load i32* %buffer, !range !0
+ %length = load i32, i32* %buffer, !range !0
%entry.pred = icmp eq i32 %length, 0
br i1 %entry.pred, label %abort, label %loop.preheader
@@ -42,7 +42,7 @@ oob:
define i1 @bounded_below_sle(i32* nocapture readonly %buffer) {
; CHECK-LABEL: bounded_below_sle
entry:
- %length = load i32* %buffer, !range !0
+ %length = load i32, i32* %buffer, !range !0
%entry.pred = icmp eq i32 %length, 0
br i1 %entry.pred, label %abort, label %loop.preheader
diff --git a/llvm/test/Transforms/IndVarSimplify/single-element-range.ll b/llvm/test/Transforms/IndVarSimplify/single-element-range.ll
index 4b035eea149..e047a0b254e 100644
--- a/llvm/test/Transforms/IndVarSimplify/single-element-range.ll
+++ b/llvm/test/Transforms/IndVarSimplify/single-element-range.ll
@@ -8,7 +8,7 @@ entry:
br i1 undef, label %return, label %bb
bb: ; preds = %entry
- %0 = load i8** undef, align 4 ; <i8*> [#uses=2]
+ %0 = load i8*, i8** undef, align 4 ; <i8*> [#uses=2]
%1 = ptrtoint i8* %0 to i32 ; <i32> [#uses=1]
%2 = icmp sgt i8* %0, inttoptr (i32 1 to i8*) ; <i1> [#uses=1]
br i1 %2, label %bb1, label %bb5
diff --git a/llvm/test/Transforms/IndVarSimplify/sink-alloca.ll b/llvm/test/Transforms/IndVarSimplify/sink-alloca.ll
index c7bb003dd27..81794705132 100644
--- a/llvm/test/Transforms/IndVarSimplify/sink-alloca.ll
+++ b/llvm/test/Transforms/IndVarSimplify/sink-alloca.ll
@@ -19,7 +19,7 @@ while.cond: ; preds = %while.cond, %entry
while.end: ; preds = %while.cond
store volatile i32 0, i32* %result.i
- %tmp.i = load volatile i32* %result.i ; <i32> [#uses=0]
+ %tmp.i = load volatile i32, i32* %result.i ; <i32> [#uses=0]
ret i32 0
}
declare i32 @bar()
diff --git a/llvm/test/Transforms/IndVarSimplify/udiv.ll b/llvm/test/Transforms/IndVarSimplify/udiv.ll
index 1925e860c8d..04458ffde60 100644
--- a/llvm/test/Transforms/IndVarSimplify/udiv.ll
+++ b/llvm/test/Transforms/IndVarSimplify/udiv.ll
@@ -18,7 +18,7 @@ entry:
cond.true: ; preds = %entry
%arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1 ; <i8**> [#uses=1]
- %tmp2 = load i8** %arrayidx ; <i8*> [#uses=1]
+ %tmp2 = load i8*, i8** %arrayidx ; <i8*> [#uses=1]
%call = tail call i32 @atoi(i8* %tmp2) nounwind readonly ; <i32> [#uses=1]
br label %while.cond.preheader
@@ -82,7 +82,7 @@ for.body15: ; preds = %bb.nph16, %for.cond
%count.212 = phi i32 [ 0, %bb.nph16 ], [ %count.1, %for.cond12 ] ; <i32> [#uses=2]
%i.17 = phi i64 [ 2, %bb.nph16 ], [ %inc37, %for.cond12 ] ; <i64> [#uses=4]
%arrayidx17 = getelementptr inbounds [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %i.17 ; <i8*> [#uses=1]
- %tmp18 = load i8* %arrayidx17 ; <i8> [#uses=1]
+ %tmp18 = load i8, i8* %arrayidx17 ; <i8> [#uses=1]
%tobool19 = icmp eq i8 %tmp18, 0 ; <i1> [#uses=1]
br i1 %tobool19, label %for.inc35, label %if.then
diff --git a/llvm/test/Transforms/IndVarSimplify/uglygep.ll b/llvm/test/Transforms/IndVarSimplify/uglygep.ll
index 2993e8d8051..e4343891be6 100644
--- a/llvm/test/Transforms/IndVarSimplify/uglygep.ll
+++ b/llvm/test/Transforms/IndVarSimplify/uglygep.ll
@@ -27,10 +27,10 @@ bb2: ; preds = %bb1, %bb
br i1 %tmp8, label %bb1, label %bb3
bb1: ; preds = %bb2
- %tmp = load double*** @tds, align 8 ; <double**> [#uses=1]
+ %tmp = load double**, double*** @tds, align 8 ; <double**> [#uses=1]
%tmp1 = sext i32 %i.0 to i64 ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds double*, double** %tmp, i64 %tmp1 ; <double**> [#uses=1]
- %tmp3 = load double** %tmp2, align 1 ; <double*> [#uses=1]
+ %tmp3 = load double*, double** %tmp2, align 1 ; <double*> [#uses=1]
%tmp6 = add nsw i32 %j.0, 1 ; <i32> [#uses=1]
br label %bb2
diff --git a/llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll b/llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll
index 1901f289cf7..642d1ba205f 100644
--- a/llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll
+++ b/llvm/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll
@@ -12,10 +12,10 @@ for.body: ; preds = %entry, %for.body
%add = add i32 %0, %sample
%idxprom = zext i32 %add to i64
%arrayidx = getelementptr inbounds float, float* %data, i64 %idxprom
- %1 = load float* %arrayidx, align 4
+ %1 = load float, float* %arrayidx, align 4
%mul = fmul float %1, %d
%arrayidx2 = getelementptr inbounds float, float* %autoc, i64 %indvars.iv
- %2 = load float* %arrayidx2, align 4
+ %2 = load float, float* %arrayidx2, align 4
%add3 = fadd float %2, %mul
store float %add3, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/IndVarSimplify/use-range-metadata.ll b/llvm/test/Transforms/IndVarSimplify/use-range-metadata.ll
index ea3b12dac39..1f0142608b5 100644
--- a/llvm/test/Transforms/IndVarSimplify/use-range-metadata.ll
+++ b/llvm/test/Transforms/IndVarSimplify/use-range-metadata.ll
@@ -6,7 +6,7 @@ declare void @abort()
define i1 @iterate(i32* nocapture readonly %buffer) {
entry:
- %length = load i32* %buffer, !range !0
+ %length = load i32, i32* %buffer, !range !0
br label %loop.preheader
loop.preheader:
diff --git a/llvm/test/Transforms/IndVarSimplify/variable-stride-ivs-0.ll b/llvm/test/Transforms/IndVarSimplify/variable-stride-ivs-0.ll
index a622c01d3e1..5fa4a17b915 100644
--- a/llvm/test/Transforms/IndVarSimplify/variable-stride-ivs-0.ll
+++ b/llvm/test/Transforms/IndVarSimplify/variable-stride-ivs-0.ll
@@ -7,11 +7,11 @@
define void @vnum_test8(i32* %data) {
entry:
%tmp.1 = getelementptr i32, i32* %data, i32 3 ; <i32*> [#uses=1]
- %tmp.2 = load i32* %tmp.1 ; <i32> [#uses=2]
+ %tmp.2 = load i32, i32* %tmp.1 ; <i32> [#uses=2]
%tmp.4 = getelementptr i32, i32* %data, i32 4 ; <i32*> [#uses=1]
- %tmp.5 = load i32* %tmp.4 ; <i32> [#uses=2]
+ %tmp.5 = load i32, i32* %tmp.4 ; <i32> [#uses=2]
%tmp.8 = getelementptr i32, i32* %data, i32 2 ; <i32*> [#uses=1]
- %tmp.9 = load i32* %tmp.8 ; <i32> [#uses=3]
+ %tmp.9 = load i32, i32* %tmp.8 ; <i32> [#uses=3]
%tmp.125 = icmp sgt i32 %tmp.2, 0 ; <i1> [#uses=1]
br i1 %tmp.125, label %no_exit.preheader, label %return
diff --git a/llvm/test/Transforms/IndVarSimplify/verify-scev.ll b/llvm/test/Transforms/IndVarSimplify/verify-scev.ll
index b9ce3d63ec9..ddf2e7f1e14 100644
--- a/llvm/test/Transforms/IndVarSimplify/verify-scev.ll
+++ b/llvm/test/Transforms/IndVarSimplify/verify-scev.ll
@@ -175,7 +175,7 @@ if.end250: ; preds = %for.cond246
br i1 undef, label %if.end256, label %for.end562
if.end256: ; preds = %if.end250
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
br i1 undef, label %if.then274, label %for.cond404.preheader
for.cond404.preheader: ; preds = %if.end256
@@ -379,7 +379,7 @@ for.body48: ; preds = %for.inc221, %for.bo
br i1 undef, label %for.inc221, label %for.body65.lr.ph
for.body65.lr.ph: ; preds = %for.body48
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%1 = sext i32 %0 to i64
br label %for.body65.us
diff --git a/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll b/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll
index 414cae43c48..6be2238ce9a 100644
--- a/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll
+++ b/llvm/test/Transforms/IndVarSimplify/widen-loop-comp.ll
@@ -19,13 +19,13 @@ target triple = "aarch64--linux-gnu"
define i32 @test1() {
entry:
store i32 -1, i32* @idx, align 4
- %0 = load i32* @e, align 4
+ %0 = load i32, i32* @e, align 4
%cmp4 = icmp slt i32 %0, 0
br i1 %cmp4, label %for.end.loopexit, label %for.body.lr.ph
for.body.lr.ph:
- %1 = load i32** @ptr, align 8
- %2 = load i32* @e, align 4
+ %1 = load i32*, i32** @ptr, align 8
+ %2 = load i32, i32* @e, align 4
br label %for.body
for.cond:
@@ -37,7 +37,7 @@ for.body:
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.cond ]
%idxprom = sext i32 %i.05 to i64
%arrayidx = getelementptr inbounds i32, i32* %1, i64 %idxprom
- %3 = load i32* %arrayidx, align 4
+ %3 = load i32, i32* %arrayidx, align 4
%tobool = icmp eq i32 %3, 0
br i1 %tobool, label %if.then, label %for.cond
@@ -53,7 +53,7 @@ for.end.loopexit:
br label %for.end
for.end:
- %4 = load i32* @idx, align 4
+ %4 = load i32, i32* @idx, align 4
ret i32 %4
}
@@ -82,10 +82,10 @@ for.body4.us:
%storemerge14.us = phi i32 [ 0, %for.body4.lr.ph.us ], [ %inc.us, %for.body4.us ]
%idxprom.us = sext i32 %storemerge14.us to i64
%arrayidx6.us = getelementptr inbounds [8 x i8], [8 x i8]* %a, i64 %idxprom5.us, i64 %idxprom.us
- %0 = load i8* %arrayidx6.us, align 1
+ %0 = load i8, i8* %arrayidx6.us, align 1
%idxprom7.us = zext i8 %0 to i64
%arrayidx8.us = getelementptr inbounds i8, i8* %b, i64 %idxprom7.us
- %1 = load i8* %arrayidx8.us, align 1
+ %1 = load i8, i8* %arrayidx8.us, align 1
store i8 %1, i8* %arrayidx6.us, align 1
%inc.us = add nsw i32 %storemerge14.us, 1
%cmp2.us = icmp slt i32 %inc.us, %conv
@@ -127,7 +127,7 @@ for.cond:
for.body:
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
@@ -181,7 +181,7 @@ for.cond:
for.body:
%idxprom = zext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/llvm/test/Transforms/IndVarSimplify/widen-nsw.ll b/llvm/test/Transforms/IndVarSimplify/widen-nsw.ll
index 01aa6f95eb7..8dbbb51ee51 100644
--- a/llvm/test/Transforms/IndVarSimplify/widen-nsw.ll
+++ b/llvm/test/Transforms/IndVarSimplify/widen-nsw.ll
@@ -17,7 +17,7 @@ for.cond: ; preds = %for.body, %entry
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
diff --git a/llvm/test/Transforms/Inline/2006-07-12-InlinePruneCGUpdate.ll b/llvm/test/Transforms/Inline/2006-07-12-InlinePruneCGUpdate.ll
index 69345627221..a0ddacdbd5b 100644
--- a/llvm/test/Transforms/Inline/2006-07-12-InlinePruneCGUpdate.ll
+++ b/llvm/test/Transforms/Inline/2006-07-12-InlinePruneCGUpdate.ll
@@ -214,7 +214,7 @@ bb: ; preds = %cond_true
br label %bb3
bb3: ; preds = %bb, %entry
- %tmp5 = load i8** null ; <i8*> [#uses=1]
+ %tmp5 = load i8*, i8** null ; <i8*> [#uses=1]
%tmp = icmp ne i8* null, %tmp5 ; <i1> [#uses=1]
br i1 %tmp, label %cond_true, label %cond_false
diff --git a/llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll b/llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll
index db2a799225b..1bfb55144f5 100644
--- a/llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll
+++ b/llvm/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll
@@ -10,15 +10,15 @@ entry:
%p = alloca i8* ; <i8**> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %i, i32* %i_addr
- %0 = load i32* %i_addr, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %i_addr, align 4 ; <i32> [#uses=1]
%1 = alloca i8, i32 %0 ; <i8*> [#uses=1]
store i8* %1, i8** %p, align 4
- %2 = load i8** %p, align 4 ; <i8*> [#uses=1]
+ %2 = load i8*, i8** %p, align 4 ; <i8*> [#uses=1]
store i8* %2, i8** @q, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i8** %retval ; <i8*> [#uses=1]
+ %retval1 = load i8*, i8** %retval ; <i8*> [#uses=1]
ret i8* %retval1
}
@@ -27,7 +27,7 @@ entry:
%i_addr = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %i, i32* %i_addr
- %0 = load i32* %i_addr, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %i_addr, align 4 ; <i32> [#uses=1]
%1 = call i8* @a(i32 %0) nounwind ; <i8*> [#uses=0]
br label %return
diff --git a/llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll b/llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll
index c0fc6912c9f..8d8f20feb73 100644
--- a/llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll
+++ b/llvm/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll
@@ -76,22 +76,22 @@ entry:
bb: ; preds = %entry
%0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 3 ; <%struct.quad_struct**> [#uses=1]
- %1 = load %struct.quad_struct** %0, align 4 ; <%struct.quad_struct*> [#uses=1]
+ %1 = load %struct.quad_struct*, %struct.quad_struct** %0, align 4 ; <%struct.quad_struct*> [#uses=1]
ret %struct.quad_struct* %1
bb1: ; preds = %entry
%2 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 2 ; <%struct.quad_struct**> [#uses=1]
- %3 = load %struct.quad_struct** %2, align 4 ; <%struct.quad_struct*> [#uses=1]
+ %3 = load %struct.quad_struct*, %struct.quad_struct** %2, align 4 ; <%struct.quad_struct*> [#uses=1]
ret %struct.quad_struct* %3
bb2: ; preds = %entry
%4 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 5 ; <%struct.quad_struct**> [#uses=1]
- %5 = load %struct.quad_struct** %4, align 4 ; <%struct.quad_struct*> [#uses=1]
+ %5 = load %struct.quad_struct*, %struct.quad_struct** %4, align 4 ; <%struct.quad_struct*> [#uses=1]
ret %struct.quad_struct* %5
bb3: ; preds = %entry
%6 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 4 ; <%struct.quad_struct**> [#uses=1]
- %7 = load %struct.quad_struct** %6, align 4 ; <%struct.quad_struct*> [#uses=1]
+ %7 = load %struct.quad_struct*, %struct.quad_struct** %6, align 4 ; <%struct.quad_struct*> [#uses=1]
ret %struct.quad_struct* %7
bb5: ; preds = %entry
@@ -101,9 +101,9 @@ bb5: ; preds = %entry
define internal fastcc %struct.quad_struct* @gtequal_adj_neighbor(%struct.quad_struct* nocapture %tree, i32 %d) nounwind readonly {
entry:
%0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 6 ; <%struct.quad_struct**> [#uses=1]
- %1 = load %struct.quad_struct** %0, align 4 ; <%struct.quad_struct*> [#uses=4]
+ %1 = load %struct.quad_struct*, %struct.quad_struct** %0, align 4 ; <%struct.quad_struct*> [#uses=4]
%2 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 1 ; <i32*> [#uses=1]
- %3 = load i32* %2, align 4 ; <i32> [#uses=2]
+ %3 = load i32, i32* %2, align 4 ; <i32> [#uses=2]
%4 = icmp eq %struct.quad_struct* %1, null ; <i1> [#uses=1]
br i1 %4, label %bb3, label %bb
@@ -123,7 +123,7 @@ bb3: ; preds = %bb1, %bb, %entry
bb4: ; preds = %bb3
%9 = getelementptr %struct.quad_struct, %struct.quad_struct* %q.0, i32 0, i32 0 ; <i32*> [#uses=1]
- %10 = load i32* %9, align 4 ; <i32> [#uses=1]
+ %10 = load i32, i32* %9, align 4 ; <i32> [#uses=1]
%11 = icmp eq i32 %10, 2 ; <i1> [#uses=1]
br i1 %11, label %bb5, label %bb7
@@ -141,27 +141,27 @@ declare fastcc i32 @sum_adjacent(%struct.quad_struct* nocapture, i32, i32, i32)
define i32 @perimeter(%struct.quad_struct* nocapture %tree, i32 %size) nounwind readonly {
entry:
%0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 0 ; <i32*> [#uses=1]
- %1 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
%2 = icmp eq i32 %1, 2 ; <i1> [#uses=1]
br i1 %2, label %bb, label %bb2
bb: ; preds = %entry
%3 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 4 ; <%struct.quad_struct**> [#uses=1]
- %4 = load %struct.quad_struct** %3, align 4 ; <%struct.quad_struct*> [#uses=1]
+ %4 = load %struct.quad_struct*, %struct.quad_struct** %3, align 4 ; <%struct.quad_struct*> [#uses=1]
%5 = sdiv i32 %size, 2 ; <i32> [#uses=1]
%6 = call i32 @perimeter(%struct.quad_struct* %4, i32 %5) nounwind ; <i32> [#uses=1]
%7 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 5 ; <%struct.quad_struct**> [#uses=1]
- %8 = load %struct.quad_struct** %7, align 4 ; <%struct.quad_struct*> [#uses=1]
+ %8 = load %struct.quad_struct*, %struct.quad_struct** %7, align 4 ; <%struct.quad_struct*> [#uses=1]
%9 = sdiv i32 %size, 2 ; <i32> [#uses=1]
%10 = call i32 @perimeter(%struct.quad_struct* %8, i32 %9) nounwind ; <i32> [#uses=1]
%11 = add i32 %10, %6 ; <i32> [#uses=1]
%12 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 3 ; <%struct.quad_struct**> [#uses=1]
- %13 = load %struct.quad_struct** %12, align 4 ; <%struct.quad_struct*> [#uses=1]
+ %13 = load %struct.quad_struct*, %struct.quad_struct** %12, align 4 ; <%struct.quad_struct*> [#uses=1]
%14 = sdiv i32 %size, 2 ; <i32> [#uses=1]
%15 = call i32 @perimeter(%struct.quad_struct* %13, i32 %14) nounwind ; <i32> [#uses=1]
%16 = add i32 %15, %11 ; <i32> [#uses=1]
%17 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 2 ; <%struct.quad_struct**> [#uses=1]
- %18 = load %struct.quad_struct** %17, align 4 ; <%struct.quad_struct*> [#uses=1]
+ %18 = load %struct.quad_struct*, %struct.quad_struct** %17, align 4 ; <%struct.quad_struct*> [#uses=1]
%19 = sdiv i32 %size, 2 ; <i32> [#uses=1]
%20 = call i32 @perimeter(%struct.quad_struct* %18, i32 %19) nounwind ; <i32> [#uses=1]
%21 = add i32 %20, %16 ; <i32> [#uses=1]
@@ -169,7 +169,7 @@ bb: ; preds = %entry
bb2: ; preds = %entry
%22 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 0 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 4 ; <i32> [#uses=1]
+ %23 = load i32, i32* %22, align 4 ; <i32> [#uses=1]
%24 = icmp eq i32 %23, 0 ; <i1> [#uses=1]
br i1 %24, label %bb3, label %bb23
@@ -180,13 +180,13 @@ bb3: ; preds = %bb2
bb4: ; preds = %bb3
%27 = getelementptr %struct.quad_struct, %struct.quad_struct* %25, i32 0, i32 0 ; <i32*> [#uses=1]
- %28 = load i32* %27, align 4 ; <i32> [#uses=1]
+ %28 = load i32, i32* %27, align 4 ; <i32> [#uses=1]
%29 = icmp eq i32 %28, 1 ; <i1> [#uses=1]
br i1 %29, label %bb8, label %bb6
bb6: ; preds = %bb4
%30 = getelementptr %struct.quad_struct, %struct.quad_struct* %25, i32 0, i32 0 ; <i32*> [#uses=1]
- %31 = load i32* %30, align 4 ; <i32> [#uses=1]
+ %31 = load i32, i32* %30, align 4 ; <i32> [#uses=1]
%32 = icmp eq i32 %31, 2 ; <i1> [#uses=1]
br i1 %32, label %bb7, label %bb8
@@ -202,7 +202,7 @@ bb8: ; preds = %bb7, %bb6, %bb4, %bb3
bb9: ; preds = %bb8
%36 = getelementptr %struct.quad_struct, %struct.quad_struct* %34, i32 0, i32 0 ; <i32*> [#uses=1]
- %37 = load i32* %36, align 4 ; <i32> [#uses=1]
+ %37 = load i32, i32* %36, align 4 ; <i32> [#uses=1]
%38 = icmp eq i32 %37, 1 ; <i1> [#uses=1]
br i1 %38, label %bb10, label %bb11
@@ -212,7 +212,7 @@ bb10: ; preds = %bb9, %bb8
bb11: ; preds = %bb9
%40 = getelementptr %struct.quad_struct, %struct.quad_struct* %34, i32 0, i32 0 ; <i32*> [#uses=1]
- %41 = load i32* %40, align 4 ; <i32> [#uses=1]
+ %41 = load i32, i32* %40, align 4 ; <i32> [#uses=1]
%42 = icmp eq i32 %41, 2 ; <i1> [#uses=1]
br i1 %42, label %bb12, label %bb13
@@ -229,7 +229,7 @@ bb13: ; preds = %bb12, %bb11, %bb10
bb14: ; preds = %bb13
%47 = getelementptr %struct.quad_struct, %struct.quad_struct* %45, i32 0, i32 0 ; <i32*> [#uses=1]
- %48 = load i32* %47, align 4 ; <i32> [#uses=1]
+ %48 = load i32, i32* %47, align 4 ; <i32> [#uses=1]
%49 = icmp eq i32 %48, 1 ; <i1> [#uses=1]
br i1 %49, label %bb15, label %bb16
@@ -239,7 +239,7 @@ bb15: ; preds = %bb14, %bb13
bb16: ; preds = %bb14
%51 = getelementptr %struct.quad_struct, %struct.quad_struct* %45, i32 0, i32 0 ; <i32*> [#uses=1]
- %52 = load i32* %51, align 4 ; <i32> [#uses=1]
+ %52 = load i32, i32* %51, align 4 ; <i32> [#uses=1]
%53 = icmp eq i32 %52, 2 ; <i1> [#uses=1]
br i1 %53, label %bb17, label %bb18
@@ -256,7 +256,7 @@ bb18: ; preds = %bb17, %bb16, %bb15
bb19: ; preds = %bb18
%58 = getelementptr %struct.quad_struct, %struct.quad_struct* %56, i32 0, i32 0 ; <i32*> [#uses=1]
- %59 = load i32* %58, align 4 ; <i32> [#uses=1]
+ %59 = load i32, i32* %58, align 4 ; <i32> [#uses=1]
%60 = icmp eq i32 %59, 1 ; <i1> [#uses=1]
br i1 %60, label %bb20, label %bb21
@@ -266,7 +266,7 @@ bb20: ; preds = %bb19, %bb18
bb21: ; preds = %bb19
%62 = getelementptr %struct.quad_struct, %struct.quad_struct* %56, i32 0, i32 0 ; <i32*> [#uses=1]
- %63 = load i32* %62, align 4 ; <i32> [#uses=1]
+ %63 = load i32, i32* %62, align 4 ; <i32> [#uses=1]
%64 = icmp eq i32 %63, 2 ; <i1> [#uses=1]
br i1 %64, label %bb22, label %bb23
diff --git a/llvm/test/Transforms/Inline/align.ll b/llvm/test/Transforms/Inline/align.ll
index 48e6b1b93d3..c91fe804617 100644
--- a/llvm/test/Transforms/Inline/align.ll
+++ b/llvm/test/Transforms/Inline/align.ll
@@ -4,7 +4,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @hello(float* align 128 nocapture %a, float* nocapture readonly %c) #0 {
entry:
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
ret void
@@ -13,7 +13,7 @@ entry:
define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
entry:
tail call void @hello(float* %a, float* %c)
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
@@ -25,10 +25,10 @@ entry:
; CHECK: %maskedptr = and i64 %ptrint, 127
; CHECK: %maskcond = icmp eq i64 %maskedptr, 0
; CHECK: call void @llvm.assume(i1 %maskcond)
-; CHECK: %0 = load float* %c, align 4
+; CHECK: %0 = load float, float* %c, align 4
; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4
-; CHECK: %1 = load float* %c, align 4
+; CHECK: %1 = load float, float* %c, align 4
; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
@@ -37,7 +37,7 @@ entry:
define void @fooa(float* nocapture align 128 %a, float* nocapture readonly %c) #0 {
entry:
tail call void @hello(float* %a, float* %c)
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
@@ -45,10 +45,10 @@ entry:
; CHECK: define void @fooa(float* nocapture align 128 %a, float* nocapture readonly %c) #0 {
; CHECK: entry:
-; CHECK: %0 = load float* %c, align 4
+; CHECK: %0 = load float, float* %c, align 4
; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4
-; CHECK: %1 = load float* %c, align 4
+; CHECK: %1 = load float, float* %c, align 4
; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
@@ -56,7 +56,7 @@ entry:
define void @hello2(float* align 128 nocapture %a, float* align 128 nocapture %b, float* nocapture readonly %c) #0 {
entry:
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds float, float* %b, i64 8
@@ -67,7 +67,7 @@ entry:
define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
entry:
tail call void @hello2(float* %a, float* %b, float* %c)
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
@@ -83,12 +83,12 @@ entry:
; CHECK: %maskedptr2 = and i64 %ptrint1, 127
; CHECK: %maskcond3 = icmp eq i64 %maskedptr2, 0
; CHECK: call void @llvm.assume(i1 %maskcond3)
-; CHECK: %0 = load float* %c, align 4
+; CHECK: %0 = load float, float* %c, align 4
; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4
; CHECK: %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %0, float* %arrayidx1.i, align 4
-; CHECK: %1 = load float* %c, align 4
+; CHECK: %1 = load float, float* %c, align 4
; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
diff --git a/llvm/test/Transforms/Inline/alloca-bonus.ll b/llvm/test/Transforms/Inline/alloca-bonus.ll
index bb654da8bc3..649fac909fb 100644
--- a/llvm/test/Transforms/Inline/alloca-bonus.ll
+++ b/llvm/test/Transforms/Inline/alloca-bonus.ll
@@ -15,7 +15,7 @@ define void @outer1() {
}
define void @inner1(i32 *%ptr) {
- %A = load i32* %ptr
+ %A = load i32, i32* %ptr
store i32 0, i32* %ptr
%C = getelementptr inbounds i32, i32* %ptr, i32 0
%D = getelementptr inbounds i32, i32* %ptr, i32 1
@@ -35,7 +35,7 @@ define void @outer2() {
; %D poisons this call, scalar-repl can't handle that instruction.
define void @inner2(i32 *%ptr) {
- %A = load i32* %ptr
+ %A = load i32, i32* %ptr
store i32 0, i32* %ptr
%C = getelementptr inbounds i32, i32* %ptr, i32 0
%D = getelementptr inbounds i32, i32* %ptr, i32 %A
@@ -59,7 +59,7 @@ define void @inner3(i32 *%ptr, i1 %x) {
br i1 %A, label %bb.true, label %bb.false
bb.true:
; This block musn't be counted in the inline cost.
- %t1 = load i32* %ptr
+ %t1 = load i32, i32* %ptr
%t2 = add i32 %t1, 1
%t3 = add i32 %t2, 1
%t4 = add i32 %t3, 1
@@ -100,7 +100,7 @@ define void @inner4(i32 *%ptr, i32 %A) {
br i1 %C, label %bb.true, label %bb.false
bb.true:
; This block musn't be counted in the inline cost.
- %t1 = load i32* %ptr
+ %t1 = load i32, i32* %ptr
%t2 = add i32 %t1, 1
%t3 = add i32 %t2, 1
%t4 = add i32 %t3, 1
@@ -137,7 +137,7 @@ define void @outer5() {
; the flag is set appropriately, the poisoning instruction is inside of dead
; code, and so shouldn't be counted.
define void @inner5(i1 %flag, i32 *%ptr) {
- %A = load i32* %ptr
+ %A = load i32, i32* %ptr
store i32 0, i32* %ptr
%C = getelementptr inbounds i32, i32* %ptr, i32 0
br i1 %flag, label %if.then, label %exit
diff --git a/llvm/test/Transforms/Inline/alloca-dbgdeclare.ll b/llvm/test/Transforms/Inline/alloca-dbgdeclare.ll
index ea698688438..03661a7e551 100644
--- a/llvm/test/Transforms/Inline/alloca-dbgdeclare.ll
+++ b/llvm/test/Transforms/Inline/alloca-dbgdeclare.ll
@@ -43,7 +43,7 @@ entry:
; CHECK-NEXT: call void @llvm.dbg.declare(metadata [20 x i8]* %agg.tmp.sroa.3.i,
%agg.tmp.sroa.3 = alloca [20 x i8], align 4
tail call void @llvm.dbg.declare(metadata [20 x i8]* %agg.tmp.sroa.3, metadata !46, metadata !48), !dbg !49
- %agg.tmp.sroa.0.0.copyload = load i32* getelementptr inbounds (%struct.A* @b, i64 0, i32 0), align 8, !dbg !50
+ %agg.tmp.sroa.0.0.copyload = load i32, i32* getelementptr inbounds (%struct.A* @b, i64 0, i32 0), align 8, !dbg !50
tail call void @llvm.dbg.value(metadata i32 %agg.tmp.sroa.0.0.copyload, i64 0, metadata !46, metadata !51), !dbg !49
%agg.tmp.sroa.3.0..sroa_idx = getelementptr inbounds [20 x i8], [20 x i8]* %agg.tmp.sroa.3, i64 0, i64 0, !dbg !50
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %agg.tmp.sroa.3.0..sroa_idx, i8* getelementptr (i8* bitcast (%struct.A* @b to i8*), i64 4), i64 20, i32 4, i1 false), !dbg !50
diff --git a/llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll b/llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll
index 85d26ac7d23..d51c5626747 100644
--- a/llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll
+++ b/llvm/test/Transforms/Inline/alloca-merge-align-nodl.ll
@@ -8,11 +8,11 @@ define void @foo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 4
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
+ %0 = load i32, i32* %a1, align 4
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
%b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
+ %1 = load i32, i32* %b, align 4
%arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
@@ -23,11 +23,11 @@ define void @foo0(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32]
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
+ %0 = load i32, i32* %a1, align 4
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
%b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
+ %1 = load i32, i32* %b, align 4
%arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
@@ -40,11 +40,11 @@ define void @goo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
+ %0 = load i32, i32* %a1, align 4
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
%b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
+ %1 = load i32, i32* %b, align 4
%arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
diff --git a/llvm/test/Transforms/Inline/alloca-merge-align.ll b/llvm/test/Transforms/Inline/alloca-merge-align.ll
index 84fcffd25e6..ef053a7ed76 100644
--- a/llvm/test/Transforms/Inline/alloca-merge-align.ll
+++ b/llvm/test/Transforms/Inline/alloca-merge-align.ll
@@ -9,11 +9,11 @@ define void @foo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 4
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
+ %0 = load i32, i32* %a1, align 4
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
%b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
+ %1 = load i32, i32* %b, align 4
%arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
@@ -24,11 +24,11 @@ define void @foo0(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32]
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
+ %0 = load i32, i32* %a1, align 4
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
%b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
+ %1 = load i32, i32* %b, align 4
%arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
@@ -39,11 +39,11 @@ define void @foo1(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 1
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
+ %0 = load i32, i32* %a1, align 4
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
%b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
+ %1 = load i32, i32* %b, align 4
%arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
@@ -56,11 +56,11 @@ define void @goo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
%a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
- %0 = load i32* %a1, align 4
+ %0 = load i32, i32* %a1, align 4
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
%b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
- %1 = load i32* %b, align 4
+ %1 = load i32, i32* %b, align 4
%arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
diff --git a/llvm/test/Transforms/Inline/basictest.ll b/llvm/test/Transforms/Inline/basictest.ll
index a8dbf396139..8f0b3eafaec 100644
--- a/llvm/test/Transforms/Inline/basictest.ll
+++ b/llvm/test/Transforms/Inline/basictest.ll
@@ -38,7 +38,7 @@ define i32 @test2(i1 %cond) {
%B = call %T* @test2f(i1 %cond, %T* %A)
%C = getelementptr %T, %T* %B, i32 0, i32 0
- %D = load i32* %C
+ %D = load i32, i32* %C
ret i32 %D
; CHECK-LABEL: @test2(
diff --git a/llvm/test/Transforms/Inline/byval-tail-call.ll b/llvm/test/Transforms/Inline/byval-tail-call.ll
index 154f3974b58..1e50463ed80 100644
--- a/llvm/test/Transforms/Inline/byval-tail-call.ll
+++ b/llvm/test/Transforms/Inline/byval-tail-call.ll
@@ -31,7 +31,7 @@ define internal void @qux(i32* byval %x) {
define void @frob(i32* %x) {
; CHECK-LABEL: define void @frob(
; CHECK: %[[POS:.*]] = alloca i32
-; CHECK: %[[VAL:.*]] = load i32* %x
+; CHECK: %[[VAL:.*]] = load i32, i32* %x
; CHECK: store i32 %[[VAL]], i32* %[[POS]]
; CHECK: {{^ *}}call void @ext(i32* %[[POS]]
; CHECK: tail call void @ext(i32* null)
diff --git a/llvm/test/Transforms/Inline/byval.ll b/llvm/test/Transforms/Inline/byval.ll
index 39bf1ff52e7..b1455664ade 100644
--- a/llvm/test/Transforms/Inline/byval.ll
+++ b/llvm/test/Transforms/Inline/byval.ll
@@ -8,7 +8,7 @@
define internal void @f(%struct.ss* byval %b) nounwind {
entry:
%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
- %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
%tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
store i32 %tmp2, i32* %tmp, align 4
ret void
@@ -38,7 +38,7 @@ entry:
define internal i32 @f2(%struct.ss* byval %b) nounwind readonly {
entry:
%tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
- %tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
%tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
ret i32 %tmp2
}
@@ -113,7 +113,7 @@ define internal void @f5(%struct.S0* byval nocapture readonly align 4 %p) {
entry:
store i32 0, i32* getelementptr inbounds (%struct.S0* @b, i64 0, i32 0), align 4
%f2 = getelementptr inbounds %struct.S0, %struct.S0* %p, i64 0, i32 0
- %0 = load i32* %f2, align 4
+ %0 = load i32, i32* %f2, align 4
store i32 %0, i32* @a, align 4
ret void
}
@@ -121,9 +121,9 @@ entry:
define i32 @test5() {
entry:
tail call void @f5(%struct.S0* byval align 4 @b)
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
ret i32 %0
; CHECK: @test5()
; CHECK: store i32 0, i32* getelementptr inbounds (%struct.S0* @b, i64 0, i32 0), align 4
-; CHECK-NOT: load i32* getelementptr inbounds (%struct.S0* @b, i64 0, i32 0), align 4
+; CHECK-NOT: load i32, i32* getelementptr inbounds (%struct.S0* @b, i64 0, i32 0), align 4
}
diff --git a/llvm/test/Transforms/Inline/byval_lifetime.ll b/llvm/test/Transforms/Inline/byval_lifetime.ll
index 60066852ed3..b500eaea331 100644
--- a/llvm/test/Transforms/Inline/byval_lifetime.ll
+++ b/llvm/test/Transforms/Inline/byval_lifetime.ll
@@ -12,7 +12,7 @@ define i32 @foo(%struct.foo* byval align 8 %f, i32 %a) {
entry:
%a1 = getelementptr inbounds %struct.foo, %struct.foo* %f, i32 0, i32 1
%arrayidx = getelementptr inbounds [16 x i32], [16 x i32]* %a1, i32 0, i32 %a
- %tmp2 = load i32* %arrayidx, align 1
+ %tmp2 = load i32, i32* %arrayidx, align 1
ret i32 %tmp2
}
diff --git a/llvm/test/Transforms/Inline/crash2.ll b/llvm/test/Transforms/Inline/crash2.ll
index be634f62563..4c0dfaea036 100644
--- a/llvm/test/Transforms/Inline/crash2.ll
+++ b/llvm/test/Transforms/Inline/crash2.ll
@@ -14,7 +14,7 @@ entry:
%__f_addr = alloca void (i8*)*, align 8
store void (i8*)* %__f, void (i8*)** %__f_addr
- %0 = load void (i8*)** %__f_addr, align 8
+ %0 = load void (i8*)*, void (i8*)** %__f_addr, align 8
call void %0(i8* undef)
call i8* @f1(i8* undef) ssp
unreachable
diff --git a/llvm/test/Transforms/Inline/devirtualize-3.ll b/llvm/test/Transforms/Inline/devirtualize-3.ll
index b49680843fe..76c8150de01 100644
--- a/llvm/test/Transforms/Inline/devirtualize-3.ll
+++ b/llvm/test/Transforms/Inline/devirtualize-3.ll
@@ -17,7 +17,7 @@ entry:
%tmp = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
%tmp1 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=2]
store void (i8*, i32)* bitcast (void (%struct.cont_t*, i32)* @quit to void (i8*, i32)*), void (i8*, i32)** %tmp1
- %tmp2 = load void (i8*, i32)** %tmp1 ; <void (i8*, i32)*> [#uses=1]
+ %tmp2 = load void (i8*, i32)*, void (i8*, i32)** %tmp1 ; <void (i8*, i32)*> [#uses=1]
store void (i8*, i32)* %tmp2, void (i8*, i32)** %tmp
%tmp3 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 1 ; <i8**> [#uses=1]
store i8* null, i8** %tmp3
@@ -51,14 +51,14 @@ entry:
define internal void @foo2(%struct.foo_sf_t* %sf, i32 %y) nounwind ssp {
entry:
%tmp1 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
- %tmp2 = load %struct.cont_t** %tmp1 ; <%struct.cont_t*> [#uses=1]
+ %tmp2 = load %struct.cont_t*, %struct.cont_t** %tmp1 ; <%struct.cont_t*> [#uses=1]
%tmp3 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %tmp2, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
- %tmp4 = load void (i8*, i32)** %tmp3 ; <void (i8*, i32)*> [#uses=1]
+ %tmp4 = load void (i8*, i32)*, void (i8*, i32)** %tmp3 ; <void (i8*, i32)*> [#uses=1]
%tmp6 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
- %tmp7 = load %struct.cont_t** %tmp6 ; <%struct.cont_t*> [#uses=1]
+ %tmp7 = load %struct.cont_t*, %struct.cont_t** %tmp6 ; <%struct.cont_t*> [#uses=1]
%conv = bitcast %struct.cont_t* %tmp7 to i8* ; <i8*> [#uses=1]
%tmp9 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp10 = load i32* %tmp9 ; <i32> [#uses=1]
+ %tmp10 = load i32, i32* %tmp9 ; <i32> [#uses=1]
%mul = mul i32 %tmp10, %y ; <i32> [#uses=1]
call void %tmp4(i8* %conv, i32 %mul)
ret void
@@ -67,9 +67,9 @@ entry:
define internal void @bar(%struct.cont_t* %c, i32 %y) nounwind ssp {
entry:
%tmp1 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
- %tmp2 = load void (i8*, i32)** %tmp1 ; <void (i8*, i32)*> [#uses=1]
+ %tmp2 = load void (i8*, i32)*, void (i8*, i32)** %tmp1 ; <void (i8*, i32)*> [#uses=1]
%tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 1 ; <i8**> [#uses=1]
- %tmp5 = load i8** %tmp4 ; <i8*> [#uses=1]
+ %tmp5 = load i8*, i8** %tmp4 ; <i8*> [#uses=1]
%add = add nsw i32 %y, 5 ; <i32> [#uses=1]
call void %tmp2(i8* %tmp5, i32 %add)
ret void
diff --git a/llvm/test/Transforms/Inline/devirtualize.ll b/llvm/test/Transforms/Inline/devirtualize.ll
index c009e024511..320071ff4e3 100644
--- a/llvm/test/Transforms/Inline/devirtualize.ll
+++ b/llvm/test/Transforms/Inline/devirtualize.ll
@@ -9,7 +9,7 @@ define i32 @foo(i32 ()** noalias %p, i64* noalias %q) nounwind ssp {
entry:
store i32 ()* @bar, i32 ()** %p
store i64 0, i64* %q
- %tmp3 = load i32 ()** %p ; <i32 ()*> [#uses=1]
+ %tmp3 = load i32 ()*, i32 ()** %p ; <i32 ()*> [#uses=1]
%call = call i32 %tmp3() ; <i32> [#uses=1]
%X = add i32 %call, 4
ret i32 %X
@@ -85,9 +85,9 @@ cast.notnull: ; preds = %entry
cast.end: ; preds = %entry, %cast.notnull
%3 = phi %struct.A* [ %2, %cast.notnull ], [ null, %entry ] ; <%struct.A*> [#uses=2]
%4 = bitcast %struct.A* %3 to i32 (%struct.A*)*** ; <i32 (%struct.A*)***> [#uses=1]
- %5 = load i32 (%struct.A*)*** %4 ; <i32 (%struct.A*)**> [#uses=1]
+ %5 = load i32 (%struct.A*)**, i32 (%struct.A*)*** %4 ; <i32 (%struct.A*)**> [#uses=1]
%vfn = getelementptr inbounds i32 (%struct.A*)*, i32 (%struct.A*)** %5, i64 0 ; <i32 (%struct.A*)**> [#uses=1]
- %6 = load i32 (%struct.A*)** %vfn ; <i32 (%struct.A*)*> [#uses=1]
+ %6 = load i32 (%struct.A*)*, i32 (%struct.A*)** %vfn ; <i32 (%struct.A*)*> [#uses=1]
%call = call i32 %6(%struct.A* %3) ; <i32> [#uses=1]
ret i32 %call
}
diff --git a/llvm/test/Transforms/Inline/ephemeral.ll b/llvm/test/Transforms/Inline/ephemeral.ll
index d1135c6f0c3..7e5ae034af3 100644
--- a/llvm/test/Transforms/Inline/ephemeral.ll
+++ b/llvm/test/Transforms/Inline/ephemeral.ll
@@ -3,7 +3,7 @@
@a = global i32 4
define i1 @inner() {
- %a1 = load volatile i32* @a
+ %a1 = load volatile i32, i32* @a
%x1 = add i32 %a1, %a1
%c = icmp eq i32 %x1, 0
diff --git a/llvm/test/Transforms/Inline/gvn-inline-iteration.ll b/llvm/test/Transforms/Inline/gvn-inline-iteration.ll
index 7916a13df99..b87c0609ea2 100644
--- a/llvm/test/Transforms/Inline/gvn-inline-iteration.ll
+++ b/llvm/test/Transforms/Inline/gvn-inline-iteration.ll
@@ -8,7 +8,7 @@ define i32 @foo(i32 ()** noalias nocapture %p, i64* noalias nocapture %q) nounwi
entry:
store i32 ()* @bar, i32 ()** %p
store i64 0, i64* %q
- %tmp3 = load i32 ()** %p ; <i32 ()*> [#uses=1]
+ %tmp3 = load i32 ()*, i32 ()** %p ; <i32 ()*> [#uses=1]
%call = tail call i32 %tmp3() nounwind ; <i32> [#uses=1]
ret i32 %call
}
diff --git a/llvm/test/Transforms/Inline/inline-byval-bonus.ll b/llvm/test/Transforms/Inline/inline-byval-bonus.ll
index 966bce89023..7f0c0e1fcba 100644
--- a/llvm/test/Transforms/Inline/inline-byval-bonus.ll
+++ b/llvm/test/Transforms/Inline/inline-byval-bonus.ll
@@ -30,36 +30,36 @@ declare void @fix(%struct.ray*)
define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture byval align 8 %ray, %struct.spoint* %sp) nounwind uwtable ssp {
%1 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 0
- %2 = load double* %1, align 8
+ %2 = load double, double* %1, align 8
%3 = fmul double %2, %2
%4 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 1
- %5 = load double* %4, align 8
+ %5 = load double, double* %4, align 8
%6 = fmul double %5, %5
%7 = fadd double %3, %6
%8 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 2
- %9 = load double* %8, align 8
+ %9 = load double, double* %8, align 8
%10 = fmul double %9, %9
%11 = fadd double %7, %10
%12 = fmul double %2, 2.000000e+00
%13 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 0
- %14 = load double* %13, align 8
+ %14 = load double, double* %13, align 8
%15 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 0
- %16 = load double* %15, align 8
+ %16 = load double, double* %15, align 8
%17 = fsub double %14, %16
%18 = fmul double %12, %17
%19 = fmul double %5, 2.000000e+00
%20 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 1
- %21 = load double* %20, align 8
+ %21 = load double, double* %20, align 8
%22 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 1
- %23 = load double* %22, align 8
+ %23 = load double, double* %22, align 8
%24 = fsub double %21, %23
%25 = fmul double %19, %24
%26 = fadd double %18, %25
%27 = fmul double %9, 2.000000e+00
%28 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 2
- %29 = load double* %28, align 8
+ %29 = load double, double* %28, align 8
%30 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 2
- %31 = load double* %30, align 8
+ %31 = load double, double* %30, align 8
%32 = fsub double %29, %31
%33 = fmul double %27, %32
%34 = fadd double %26, %33
@@ -83,7 +83,7 @@ define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture by
%52 = fmul double %51, 2.000000e+00
%53 = fadd double %52, %45
%54 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 1
- %55 = load double* %54, align 8
+ %55 = load double, double* %54, align 8
%56 = fmul double %55, %55
%57 = fsub double %53, %56
%58 = fmul double %34, %34
@@ -135,21 +135,21 @@ define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture by
%89 = fadd double %29, %88
%90 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 0, i32 2
store double %89, double* %90, align 8
- %91 = load double* %15, align 8
+ %91 = load double, double* %15, align 8
%92 = fsub double %83, %91
- %93 = load double* %54, align 8
+ %93 = load double, double* %54, align 8
%94 = fdiv double %92, %93
%95 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 0
store double %94, double* %95, align 8
- %96 = load double* %22, align 8
+ %96 = load double, double* %22, align 8
%97 = fsub double %86, %96
- %98 = load double* %54, align 8
+ %98 = load double, double* %54, align 8
%99 = fdiv double %97, %98
%100 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 1
store double %99, double* %100, align 8
- %101 = load double* %30, align 8
+ %101 = load double, double* %30, align 8
%102 = fsub double %89, %101
- %103 = load double* %54, align 8
+ %103 = load double, double* %54, align 8
%104 = fdiv double %102, %103
%105 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 2
store double %104, double* %105, align 8
diff --git a/llvm/test/Transforms/Inline/inline-cold.ll b/llvm/test/Transforms/Inline/inline-cold.ll
index 57433771961..b3c134538a4 100644
--- a/llvm/test/Transforms/Inline/inline-cold.ll
+++ b/llvm/test/Transforms/Inline/inline-cold.ll
@@ -17,29 +17,29 @@
; Function Attrs: nounwind readnone uwtable
define i32 @simpleFunction(i32 %a) #0 {
entry:
- %a1 = load volatile i32* @a
+ %a1 = load volatile i32, i32* @a
%x1 = add i32 %a1, %a1
- %a2 = load volatile i32* @a
+ %a2 = load volatile i32, i32* @a
%x2 = add i32 %x1, %a2
- %a3 = load volatile i32* @a
+ %a3 = load volatile i32, i32* @a
%x3 = add i32 %x2, %a3
- %a4 = load volatile i32* @a
+ %a4 = load volatile i32, i32* @a
%x4 = add i32 %x3, %a4
- %a5 = load volatile i32* @a
+ %a5 = load volatile i32, i32* @a
%x5 = add i32 %x4, %a5
- %a6 = load volatile i32* @a
+ %a6 = load volatile i32, i32* @a
%x6 = add i32 %x5, %a6
- %a7 = load volatile i32* @a
+ %a7 = load volatile i32, i32* @a
%x7 = add i32 %x6, %a6
- %a8 = load volatile i32* @a
+ %a8 = load volatile i32, i32* @a
%x8 = add i32 %x7, %a8
- %a9 = load volatile i32* @a
+ %a9 = load volatile i32, i32* @a
%x9 = add i32 %x8, %a9
- %a10 = load volatile i32* @a
+ %a10 = load volatile i32, i32* @a
%x10 = add i32 %x9, %a10
- %a11 = load volatile i32* @a
+ %a11 = load volatile i32, i32* @a
%x11 = add i32 %x10, %a11
- %a12 = load volatile i32* @a
+ %a12 = load volatile i32, i32* @a
%x12 = add i32 %x11, %a12
%add = add i32 %x12, %a
ret i32 %add
@@ -54,29 +54,29 @@ define i32 @ColdFunction(i32 %a) #1 {
; DEFAULT-LABEL: @ColdFunction
; DEFAULT: ret
entry:
- %a1 = load volatile i32* @a
+ %a1 = load volatile i32, i32* @a
%x1 = add i32 %a1, %a1
- %a2 = load volatile i32* @a
+ %a2 = load volatile i32, i32* @a
%x2 = add i32 %x1, %a2
- %a3 = load volatile i32* @a
+ %a3 = load volatile i32, i32* @a
%x3 = add i32 %x2, %a3
- %a4 = load volatile i32* @a
+ %a4 = load volatile i32, i32* @a
%x4 = add i32 %x3, %a4
- %a5 = load volatile i32* @a
+ %a5 = load volatile i32, i32* @a
%x5 = add i32 %x4, %a5
- %a6 = load volatile i32* @a
+ %a6 = load volatile i32, i32* @a
%x6 = add i32 %x5, %a6
- %a7 = load volatile i32* @a
+ %a7 = load volatile i32, i32* @a
%x7 = add i32 %x6, %a6
- %a8 = load volatile i32* @a
+ %a8 = load volatile i32, i32* @a
%x8 = add i32 %x7, %a8
- %a9 = load volatile i32* @a
+ %a9 = load volatile i32, i32* @a
%x9 = add i32 %x8, %a9
- %a10 = load volatile i32* @a
+ %a10 = load volatile i32, i32* @a
%x10 = add i32 %x9, %a10
- %a11 = load volatile i32* @a
+ %a11 = load volatile i32, i32* @a
%x11 = add i32 %x10, %a11
- %a12 = load volatile i32* @a
+ %a12 = load volatile i32, i32* @a
%x12 = add i32 %x11, %a12
%add = add i32 %x12, %a
ret i32 %add
@@ -91,79 +91,79 @@ define i32 @ColdFunction2(i32 %a) #1 {
; DEFAULT-LABEL: @ColdFunction2
; DEFAULT: ret
entry:
- %a1 = load volatile i32* @a
+ %a1 = load volatile i32, i32* @a
%x1 = add i32 %a1, %a1
- %a2 = load volatile i32* @a
+ %a2 = load volatile i32, i32* @a
%x2 = add i32 %x1, %a2
- %a3 = load volatile i32* @a
+ %a3 = load volatile i32, i32* @a
%x3 = add i32 %x2, %a3
- %a4 = load volatile i32* @a
+ %a4 = load volatile i32, i32* @a
%x4 = add i32 %x3, %a4
- %a5 = load volatile i32* @a
+ %a5 = load volatile i32, i32* @a
%x5 = add i32 %x4, %a5
- %a6 = load volatile i32* @a
+ %a6 = load volatile i32, i32* @a
%x6 = add i32 %x5, %a6
- %a7 = load volatile i32* @a
+ %a7 = load volatile i32, i32* @a
%x7 = add i32 %x6, %a7
- %a8 = load volatile i32* @a
+ %a8 = load volatile i32, i32* @a
%x8 = add i32 %x7, %a8
- %a9 = load volatile i32* @a
+ %a9 = load volatile i32, i32* @a
%x9 = add i32 %x8, %a9
- %a10 = load volatile i32* @a
+ %a10 = load volatile i32, i32* @a
%x10 = add i32 %x9, %a10
- %a11 = load volatile i32* @a
+ %a11 = load volatile i32, i32* @a
%x11 = add i32 %x10, %a11
- %a12 = load volatile i32* @a
+ %a12 = load volatile i32, i32* @a
%x12 = add i32 %x11, %a12
- %a21 = load volatile i32* @a
+ %a21 = load volatile i32, i32* @a
%x21 = add i32 %x12, %a21
- %a22 = load volatile i32* @a
+ %a22 = load volatile i32, i32* @a
%x22 = add i32 %x21, %a22
- %a23 = load volatile i32* @a
+ %a23 = load volatile i32, i32* @a
%x23 = add i32 %x22, %a23
- %a24 = load volatile i32* @a
+ %a24 = load volatile i32, i32* @a
%x24 = add i32 %x23, %a24
- %a25 = load volatile i32* @a
+ %a25 = load volatile i32, i32* @a
%x25 = add i32 %x24, %a25
- %a26 = load volatile i32* @a
+ %a26 = load volatile i32, i32* @a
%x26 = add i32 %x25, %a26
- %a27 = load volatile i32* @a
+ %a27 = load volatile i32, i32* @a
%x27 = add i32 %x26, %a27
- %a28 = load volatile i32* @a
+ %a28 = load volatile i32, i32* @a
%x28 = add i32 %x27, %a28
- %a29 = load volatile i32* @a
+ %a29 = load volatile i32, i32* @a
%x29 = add i32 %x28, %a29
- %a30 = load volatile i32* @a
+ %a30 = load volatile i32, i32* @a
%x30 = add i32 %x29, %a30
- %a31 = load volatile i32* @a
+ %a31 = load volatile i32, i32* @a
%x31 = add i32 %x30, %a31
- %a32 = load volatile i32* @a
+ %a32 = load volatile i32, i32* @a
%x32 = add i32 %x31, %a32
- %a41 = load volatile i32* @a
+ %a41 = load volatile i32, i32* @a
%x41 = add i32 %x32, %a41
- %a42 = load volatile i32* @a
+ %a42 = load volatile i32, i32* @a
%x42 = add i32 %x41, %a42
- %a43 = load volatile i32* @a
+ %a43 = load volatile i32, i32* @a
%x43 = add i32 %x42, %a43
- %a44 = load volatile i32* @a
+ %a44 = load volatile i32, i32* @a
%x44 = add i32 %x43, %a44
- %a45 = load volatile i32* @a
+ %a45 = load volatile i32, i32* @a
%x45 = add i32 %x44, %a45
- %a46 = load volatile i32* @a
+ %a46 = load volatile i32, i32* @a
%x46 = add i32 %x45, %a46
- %a47 = load volatile i32* @a
+ %a47 = load volatile i32, i32* @a
%x47 = add i32 %x46, %a47
- %a48 = load volatile i32* @a
+ %a48 = load volatile i32, i32* @a
%x48 = add i32 %x47, %a48
- %a49 = load volatile i32* @a
+ %a49 = load volatile i32, i32* @a
%x49 = add i32 %x48, %a49
- %a50 = load volatile i32* @a
+ %a50 = load volatile i32, i32* @a
%x50 = add i32 %x49, %a50
- %a51 = load volatile i32* @a
+ %a51 = load volatile i32, i32* @a
%x51 = add i32 %x50, %a51
- %a52 = load volatile i32* @a
+ %a52 = load volatile i32, i32* @a
%x52 = add i32 %x51, %a52
%add = add i32 %x52, %a
diff --git a/llvm/test/Transforms/Inline/inline-fast-math-flags.ll b/llvm/test/Transforms/Inline/inline-fast-math-flags.ll
index 01074f567cf..e5ff8f7bfe2 100644
--- a/llvm/test/Transforms/Inline/inline-fast-math-flags.ll
+++ b/llvm/test/Transforms/Inline/inline-fast-math-flags.ll
@@ -8,15 +8,15 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define float @foo(float* %a, float %b) {
entry:
- %a0 = load float* %a, align 4
+ %a0 = load float, float* %a, align 4
%mul = fmul fast float %a0, %b
%tobool = fcmp une float %mul, 0.000000e+00
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
- %a1 = load float* %a, align 8
+ %a1 = load float, float* %a, align 8
%arrayidx1 = getelementptr inbounds float, float* %a, i64 1
- %a2 = load float* %arrayidx1, align 4
+ %a2 = load float, float* %arrayidx1, align 4
%add = fadd fast float %a1, %a2
br label %if.end
diff --git a/llvm/test/Transforms/Inline/inline-fp.ll b/llvm/test/Transforms/Inline/inline-fp.ll
index 4d18ce87a5a..cf42d82379a 100644
--- a/llvm/test/Transforms/Inline/inline-fp.ll
+++ b/llvm/test/Transforms/Inline/inline-fp.ll
@@ -18,19 +18,19 @@ entry:
call void @getY(i32* %responseY, i8* %valueY)
call void @getZ(i32* %responseZ, i8* %valueZ)
- %0 = load i32* %responseX
- %1 = load i8* %valueX
+ %0 = load i32, i32* %responseX
+ %1 = load i8, i8* %valueX
%call = call float @f_nofp(i32 %0, i8 zeroext %1)
- %2 = load i32* %responseZ
- %3 = load i8* %valueZ
+ %2 = load i32, i32* %responseZ
+ %3 = load i8, i8* %valueZ
%call2 = call float @f_nofp(i32 %2, i8 zeroext %3)
%call3 = call float @fabsf(float %call)
%cmp = fcmp ogt float %call3, 0x3FC1EB8520000000
br i1 %cmp, label %if.end12, label %if.else
if.else: ; preds = %entry
- %4 = load i32* %responseY
- %5 = load i8* %valueY
+ %4 = load i32, i32* %responseY
+ %5 = load i8, i8* %valueY
%call1 = call float @f_nofp(i32 %4, i8 zeroext %5)
%call4 = call float @fabsf(float %call1)
%cmp5 = fcmp ogt float %call4, 0x3FC1EB8520000000
@@ -65,19 +65,19 @@ entry:
call void @getY(i32* %responseY, i8* %valueY)
call void @getZ(i32* %responseZ, i8* %valueZ)
- %0 = load i32* %responseX
- %1 = load i8* %valueX
+ %0 = load i32, i32* %responseX
+ %1 = load i8, i8* %valueX
%call = call float @f_hasfp(i32 %0, i8 zeroext %1)
- %2 = load i32* %responseZ
- %3 = load i8* %valueZ
+ %2 = load i32, i32* %responseZ
+ %3 = load i8, i8* %valueZ
%call2 = call float @f_hasfp(i32 %2, i8 zeroext %3)
%call3 = call float @fabsf(float %call)
%cmp = fcmp ogt float %call3, 0x3FC1EB8520000000
br i1 %cmp, label %if.end12, label %if.else
if.else: ; preds = %entry
- %4 = load i32* %responseY
- %5 = load i8* %valueY
+ %4 = load i32, i32* %responseY
+ %5 = load i8, i8* %valueY
%call1 = call float @f_hasfp(i32 %4, i8 zeroext %5)
%call4 = call float @fabsf(float %call1)
%cmp5 = fcmp ogt float %call4, 0x3FC1EB8520000000
diff --git a/llvm/test/Transforms/Inline/inline-invoke-tail.ll b/llvm/test/Transforms/Inline/inline-invoke-tail.ll
index e07752396a3..5fef4baee9e 100644
--- a/llvm/test/Transforms/Inline/inline-invoke-tail.ll
+++ b/llvm/test/Transforms/Inline/inline-invoke-tail.ll
@@ -19,7 +19,7 @@ define i32 @main() {
to label %invcont unwind label %lpad
invcont:
- %retval = load i32* %a, align 4
+ %retval = load i32, i32* %a, align 4
ret i32 %retval
lpad:
diff --git a/llvm/test/Transforms/Inline/inline-optsize.ll b/llvm/test/Transforms/Inline/inline-optsize.ll
index 3ad573a04e4..820e56f7f8e 100644
--- a/llvm/test/Transforms/Inline/inline-optsize.ll
+++ b/llvm/test/Transforms/Inline/inline-optsize.ll
@@ -11,15 +11,15 @@
; This function should be larger than the inline threshold for -Oz (25), but
; smaller than the inline threshold for optsize (75).
define i32 @inner() {
- %a1 = load volatile i32* @a
+ %a1 = load volatile i32, i32* @a
%x1 = add i32 %a1, %a1
- %a2 = load volatile i32* @a
+ %a2 = load volatile i32, i32* @a
%x2 = add i32 %x1, %a2
- %a3 = load volatile i32* @a
+ %a3 = load volatile i32, i32* @a
%x3 = add i32 %x2, %a3
- %a4 = load volatile i32* @a
+ %a4 = load volatile i32, i32* @a
%x4 = add i32 %x3, %a4
- %a5 = load volatile i32* @a
+ %a5 = load volatile i32, i32* @a
%x5 = add i32 %x3, %a5
ret i32 %x5
}
diff --git a/llvm/test/Transforms/Inline/inline_constprop.ll b/llvm/test/Transforms/Inline/inline_constprop.ll
index b59a270468e..de23b6157a7 100644
--- a/llvm/test/Transforms/Inline/inline_constprop.ll
+++ b/llvm/test/Transforms/Inline/inline_constprop.ll
@@ -267,8 +267,8 @@ entry:
br i1 %cmp, label %if.then, label %if.end3
if.then:
- %0 = load i32* %a
- %1 = load i32* %b
+ %0 = load i32, i32* %a
+ %1 = load i32, i32* %b
%cmp1 = icmp eq i32 %0, %1
br i1 %cmp1, label %return, label %if.end3
diff --git a/llvm/test/Transforms/Inline/inline_dbg_declare.ll b/llvm/test/Transforms/Inline/inline_dbg_declare.ll
index c5362fc7601..2f646e02e0c 100644
--- a/llvm/test/Transforms/Inline/inline_dbg_declare.ll
+++ b/llvm/test/Transforms/Inline/inline_dbg_declare.ll
@@ -28,7 +28,7 @@ entry:
%x.addr = alloca float, align 4
store float %x, float* %x.addr, align 4
call void @llvm.dbg.declare(metadata float* %x.addr, metadata !16, metadata !17), !dbg !18
- %0 = load float* %x.addr, align 4, !dbg !19
+ %0 = load float, float* %x.addr, align 4, !dbg !19
ret float %0, !dbg !19
}
@@ -46,15 +46,15 @@ entry:
%dst.addr = alloca float*, align 4
store float* %dst, float** %dst.addr, align 4
call void @llvm.dbg.declare(metadata float** %dst.addr, metadata !20, metadata !17), !dbg !21
- %0 = load float** %dst.addr, align 4, !dbg !22
+ %0 = load float*, float** %dst.addr, align 4, !dbg !22
%arrayidx = getelementptr inbounds float, float* %0, i32 0, !dbg !22
- %1 = load float* %arrayidx, align 4, !dbg !22
+ %1 = load float, float* %arrayidx, align 4, !dbg !22
%call = call float @foo(float %1), !dbg !22
; CHECK-NOT: call float @foo
; CHECK: void @llvm.dbg.declare(metadata float* [[x_addr_i]], metadata [[m23:![0-9]+]], metadata !17), !dbg [[m24:![0-9]+]]
- %2 = load float** %dst.addr, align 4, !dbg !22
+ %2 = load float*, float** %dst.addr, align 4, !dbg !22
%arrayidx1 = getelementptr inbounds float, float* %2, i32 0, !dbg !22
store float %call, float* %arrayidx1, align 4, !dbg !22
ret void, !dbg !23
diff --git a/llvm/test/Transforms/Inline/inline_minisize.ll b/llvm/test/Transforms/Inline/inline_minisize.ll
index d266931eb8e..0bf75d72bd1 100644
--- a/llvm/test/Transforms/Inline/inline_minisize.ll
+++ b/llvm/test/Transforms/Inline/inline_minisize.ll
@@ -8,17 +8,17 @@ entry:
%res = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
- %tmp = load i32* %a.addr, align 4
+ %tmp = load i32, i32* %a.addr, align 4
%idxprom = sext i32 %tmp to i64
- %tmp1 = load i32** @data, align 8
+ %tmp1 = load i32*, i32** @data, align 8
%arrayidx = getelementptr inbounds i32, i32* %tmp1, i64 %idxprom
- %tmp2 = load i32* %arrayidx, align 4
- %tmp3 = load i32* %a.addr, align 4
+ %tmp2 = load i32, i32* %arrayidx, align 4
+ %tmp3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %tmp3, 1
%idxprom1 = sext i32 %add to i64
- %tmp4 = load i32** @data, align 8
+ %tmp4 = load i32*, i32** @data, align 8
%arrayidx2 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom1
- %tmp5 = load i32* %arrayidx2, align 4
+ %tmp5 = load i32, i32* %arrayidx2, align 4
%mul = mul nsw i32 %tmp2, %tmp5
store i32 %mul, i32* %res, align 4
store i32 0, i32* %i, align 4
@@ -26,21 +26,21 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %tmp6 = load i32* %i, align 4
- %tmp7 = load i32* %res, align 4
+ %tmp6 = load i32, i32* %i, align 4
+ %tmp7 = load i32, i32* %res, align 4
%cmp = icmp slt i32 %tmp6, %tmp7
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %tmp8 = load i32* %i, align 4
+ %tmp8 = load i32, i32* %i, align 4
%idxprom3 = sext i32 %tmp8 to i64
- %tmp9 = load i32** @data, align 8
+ %tmp9 = load i32*, i32** @data, align 8
%arrayidx4 = getelementptr inbounds i32, i32* %tmp9, i64 %idxprom3
call void @fct0(i32* %arrayidx4)
br label %for.inc
for.inc: ; preds = %for.body
- %tmp10 = load i32* %i, align 4
+ %tmp10 = load i32, i32* %i, align 4
%inc = add nsw i32 %tmp10, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
@@ -50,21 +50,21 @@ for.end: ; preds = %for.cond
br label %for.cond5
for.cond5: ; preds = %for.inc10, %for.end
- %tmp11 = load i32* %i, align 4
- %tmp12 = load i32* %res, align 4
+ %tmp11 = load i32, i32* %i, align 4
+ %tmp12 = load i32, i32* %res, align 4
%cmp6 = icmp slt i32 %tmp11, %tmp12
br i1 %cmp6, label %for.body7, label %for.end12
for.body7: ; preds = %for.cond5
- %tmp13 = load i32* %i, align 4
+ %tmp13 = load i32, i32* %i, align 4
%idxprom8 = sext i32 %tmp13 to i64
- %tmp14 = load i32** @data, align 8
+ %tmp14 = load i32*, i32** @data, align 8
%arrayidx9 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom8
call void @fct0(i32* %arrayidx9)
br label %for.inc10
for.inc10: ; preds = %for.body7
- %tmp15 = load i32* %i, align 4
+ %tmp15 = load i32, i32* %i, align 4
%inc11 = add nsw i32 %tmp15, 1
store i32 %inc11, i32* %i, align 4
br label %for.cond5
@@ -74,27 +74,27 @@ for.end12: ; preds = %for.cond5
br label %for.cond13
for.cond13: ; preds = %for.inc18, %for.end12
- %tmp16 = load i32* %i, align 4
- %tmp17 = load i32* %res, align 4
+ %tmp16 = load i32, i32* %i, align 4
+ %tmp17 = load i32, i32* %res, align 4
%cmp14 = icmp slt i32 %tmp16, %tmp17
br i1 %cmp14, label %for.body15, label %for.end20
for.body15: ; preds = %for.cond13
- %tmp18 = load i32* %i, align 4
+ %tmp18 = load i32, i32* %i, align 4
%idxprom16 = sext i32 %tmp18 to i64
- %tmp19 = load i32** @data, align 8
+ %tmp19 = load i32*, i32** @data, align 8
%arrayidx17 = getelementptr inbounds i32, i32* %tmp19, i64 %idxprom16
call void @fct0(i32* %arrayidx17)
br label %for.inc18
for.inc18: ; preds = %for.body15
- %tmp20 = load i32* %i, align 4
+ %tmp20 = load i32, i32* %i, align 4
%inc19 = add nsw i32 %tmp20, 1
store i32 %inc19, i32* %i, align 4
br label %for.cond13
for.end20: ; preds = %for.cond13
- %tmp21 = load i32* %res, align 4
+ %tmp21 = load i32, i32* %res, align 4
ret i32 %tmp21
}
@@ -106,19 +106,19 @@ entry:
%res = alloca i32, align 4
%i = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
- %tmp = load i32* %a.addr, align 4
+ %tmp = load i32, i32* %a.addr, align 4
%shl = shl i32 %tmp, 1
%idxprom = sext i32 %shl to i64
- %tmp1 = load i32** @data, align 8
+ %tmp1 = load i32*, i32** @data, align 8
%arrayidx = getelementptr inbounds i32, i32* %tmp1, i64 %idxprom
- %tmp2 = load i32* %arrayidx, align 4
- %tmp3 = load i32* %a.addr, align 4
+ %tmp2 = load i32, i32* %arrayidx, align 4
+ %tmp3 = load i32, i32* %a.addr, align 4
%shl1 = shl i32 %tmp3, 1
%add = add nsw i32 %shl1, 13
%idxprom2 = sext i32 %add to i64
- %tmp4 = load i32** @data, align 8
+ %tmp4 = load i32*, i32** @data, align 8
%arrayidx3 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom2
- %tmp5 = load i32* %arrayidx3, align 4
+ %tmp5 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %tmp2, %tmp5
store i32 %mul, i32* %res, align 4
store i32 0, i32* %i, align 4
@@ -126,21 +126,21 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %tmp6 = load i32* %i, align 4
- %tmp7 = load i32* %res, align 4
+ %tmp6 = load i32, i32* %i, align 4
+ %tmp7 = load i32, i32* %res, align 4
%cmp = icmp slt i32 %tmp6, %tmp7
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %tmp8 = load i32* %i, align 4
+ %tmp8 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %tmp8 to i64
- %tmp9 = load i32** @data, align 8
+ %tmp9 = load i32*, i32** @data, align 8
%arrayidx5 = getelementptr inbounds i32, i32* %tmp9, i64 %idxprom4
call void @fct0(i32* %arrayidx5)
br label %for.inc
for.inc: ; preds = %for.body
- %tmp10 = load i32* %i, align 4
+ %tmp10 = load i32, i32* %i, align 4
%inc = add nsw i32 %tmp10, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
@@ -150,21 +150,21 @@ for.end: ; preds = %for.cond
br label %for.cond6
for.cond6: ; preds = %for.inc11, %for.end
- %tmp11 = load i32* %i, align 4
- %tmp12 = load i32* %res, align 4
+ %tmp11 = load i32, i32* %i, align 4
+ %tmp12 = load i32, i32* %res, align 4
%cmp7 = icmp slt i32 %tmp11, %tmp12
br i1 %cmp7, label %for.body8, label %for.end13
for.body8: ; preds = %for.cond6
- %tmp13 = load i32* %i, align 4
+ %tmp13 = load i32, i32* %i, align 4
%idxprom9 = sext i32 %tmp13 to i64
- %tmp14 = load i32** @data, align 8
+ %tmp14 = load i32*, i32** @data, align 8
%arrayidx10 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom9
call void @fct0(i32* %arrayidx10)
br label %for.inc11
for.inc11: ; preds = %for.body8
- %tmp15 = load i32* %i, align 4
+ %tmp15 = load i32, i32* %i, align 4
%inc12 = add nsw i32 %tmp15, 1
store i32 %inc12, i32* %i, align 4
br label %for.cond6
@@ -174,27 +174,27 @@ for.end13: ; preds = %for.cond6
br label %for.cond14
for.cond14: ; preds = %for.inc19, %for.end13
- %tmp16 = load i32* %i, align 4
- %tmp17 = load i32* %res, align 4
+ %tmp16 = load i32, i32* %i, align 4
+ %tmp17 = load i32, i32* %res, align 4
%cmp15 = icmp slt i32 %tmp16, %tmp17
br i1 %cmp15, label %for.body16, label %for.end21
for.body16: ; preds = %for.cond14
- %tmp18 = load i32* %i, align 4
+ %tmp18 = load i32, i32* %i, align 4
%idxprom17 = sext i32 %tmp18 to i64
- %tmp19 = load i32** @data, align 8
+ %tmp19 = load i32*, i32** @data, align 8
%arrayidx18 = getelementptr inbounds i32, i32* %tmp19, i64 %idxprom17
call void @fct0(i32* %arrayidx18)
br label %for.inc19
for.inc19: ; preds = %for.body16
- %tmp20 = load i32* %i, align 4
+ %tmp20 = load i32, i32* %i, align 4
%inc20 = add nsw i32 %tmp20, 1
store i32 %inc20, i32* %i, align 4
br label %for.cond14
for.end21: ; preds = %for.cond14
- %tmp21 = load i32* %res, align 4
+ %tmp21 = load i32, i32* %res, align 4
ret i32 %tmp21
}
@@ -206,9 +206,9 @@ entry:
;CHECK-NOT: call i32 @fct2
%c.addr = alloca i32, align 4
store i32 %c, i32* %c.addr, align 4
- %tmp = load i32* %c.addr, align 4
+ %tmp = load i32, i32* %c.addr, align 4
%call = call i32 @fct1(i32 %tmp)
- %tmp1 = load i32* %c.addr, align 4
+ %tmp1 = load i32, i32* %c.addr, align 4
%call1 = call i32 @fct2(i32 %tmp1)
%add = add nsw i32 %call, %call1
ret i32 %add
@@ -223,9 +223,9 @@ entry:
;CHECK: call i32 @fct2
%c.addr = alloca i32, align 4
store i32 %c, i32* %c.addr, align 4
- %tmp = load i32* %c.addr, align 4
+ %tmp = load i32, i32* %c.addr, align 4
%call = call i32 @fct1(i32 %tmp)
- %tmp1 = load i32* %c.addr, align 4
+ %tmp1 = load i32, i32* %c.addr, align 4
%call1 = call i32 @fct2(i32 %tmp1)
%add = add nsw i32 %call, %call1
ret i32 %add
diff --git a/llvm/test/Transforms/Inline/invoke-combine-clauses.ll b/llvm/test/Transforms/Inline/invoke-combine-clauses.ll
index 5f06039b9ed..89a4cc951c7 100644
--- a/llvm/test/Transforms/Inline/invoke-combine-clauses.ll
+++ b/llvm/test/Transforms/Inline/invoke-combine-clauses.ll
@@ -20,7 +20,7 @@ cont:
lpad:
%lp = landingpad i32 personality i8* null
catch i8* @exception_inner
- %cond = load i1* @condition
+ %cond = load i1, i1* @condition
br i1 %cond, label %resume1, label %resume2
resume1:
resume i32 1
diff --git a/llvm/test/Transforms/Inline/noalias-cs.ll b/llvm/test/Transforms/Inline/noalias-cs.ll
index bf1babdd89b..8528a391cf9 100644
--- a/llvm/test/Transforms/Inline/noalias-cs.ll
+++ b/llvm/test/Transforms/Inline/noalias-cs.ll
@@ -5,12 +5,12 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
entry:
- %0 = load float* %c, align 4, !noalias !3
+ %0 = load float, float* %c, align 4, !noalias !3
%arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !alias.scope !7, !noalias !8
%arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
store float %0, float* %arrayidx1.i, align 4, !alias.scope !8, !noalias !7
- %1 = load float* %c, align 4
+ %1 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
@@ -25,20 +25,20 @@ entry:
; CHECK: define void @foo(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
; CHECK: entry:
-; CHECK: %0 = load float* %c, align 4, !noalias !6
+; CHECK: %0 = load float, float* %c, align 4, !noalias !6
; CHECK: %arrayidx.i.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i.i, align 4, !alias.scope !12, !noalias !13
; CHECK: %arrayidx1.i.i = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %0, float* %arrayidx1.i.i, align 4, !alias.scope !14, !noalias !15
-; CHECK: %1 = load float* %c, align 4, !noalias !16
+; CHECK: %1 = load float, float* %c, align 4, !noalias !16
; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx.i, align 4, !noalias !16
-; CHECK: %2 = load float* %a, align 4, !alias.scope !16, !noalias !17
+; CHECK: %2 = load float, float* %a, align 4, !alias.scope !16, !noalias !17
; CHECK: %arrayidx.i.i1 = getelementptr inbounds float, float* %b, i64 5
; CHECK: store float %2, float* %arrayidx.i.i1, align 4, !alias.scope !21, !noalias !22
; CHECK: %arrayidx1.i.i2 = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %2, float* %arrayidx1.i.i2, align 4, !alias.scope !23, !noalias !24
-; CHECK: %3 = load float* %a, align 4, !alias.scope !16
+; CHECK: %3 = load float, float* %a, align 4, !alias.scope !16
; CHECK: %arrayidx.i3 = getelementptr inbounds float, float* %b, i64 7
; CHECK: store float %3, float* %arrayidx.i3, align 4, !alias.scope !16
; CHECK: ret void
diff --git a/llvm/test/Transforms/Inline/noalias.ll b/llvm/test/Transforms/Inline/noalias.ll
index f48aa3d3c31..27e53afc2a7 100644
--- a/llvm/test/Transforms/Inline/noalias.ll
+++ b/llvm/test/Transforms/Inline/noalias.ll
@@ -4,7 +4,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @hello(float* noalias nocapture %a, float* nocapture readonly %c) #0 {
entry:
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
ret void
@@ -13,7 +13,7 @@ entry:
define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
entry:
tail call void @hello(float* %a, float* %c)
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
@@ -21,10 +21,10 @@ entry:
; CHECK: define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
; CHECK: entry:
-; CHECK: %0 = load float* %c, align 4, !noalias !0
+; CHECK: %0 = load float, float* %c, align 4, !noalias !0
; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4, !alias.scope !0
-; CHECK: %1 = load float* %c, align 4
+; CHECK: %1 = load float, float* %c, align 4
; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
@@ -32,7 +32,7 @@ entry:
define void @hello2(float* noalias nocapture %a, float* noalias nocapture %b, float* nocapture readonly %c) #0 {
entry:
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds float, float* %b, i64 8
@@ -43,7 +43,7 @@ entry:
define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
entry:
tail call void @hello2(float* %a, float* %b, float* %c)
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
@@ -51,12 +51,12 @@ entry:
; CHECK: define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
; CHECK: entry:
-; CHECK: %0 = load float* %c, align 4, !noalias !3
+; CHECK: %0 = load float, float* %c, align 4, !noalias !3
; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4, !alias.scope !7, !noalias !8
; CHECK: %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %0, float* %arrayidx1.i, align 4, !alias.scope !8, !noalias !7
-; CHECK: %1 = load float* %c, align 4
+; CHECK: %1 = load float, float* %c, align 4
; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
diff --git a/llvm/test/Transforms/Inline/noalias2.ll b/llvm/test/Transforms/Inline/noalias2.ll
index a785329a935..432fccf431c 100644
--- a/llvm/test/Transforms/Inline/noalias2.ll
+++ b/llvm/test/Transforms/Inline/noalias2.ll
@@ -4,7 +4,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @hello(float* noalias nocapture %a, float* noalias nocapture readonly %c) #0 {
entry:
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
ret void
@@ -13,7 +13,7 @@ entry:
define void @foo(float* noalias nocapture %a, float* noalias nocapture readonly %c) #0 {
entry:
tail call void @hello(float* %a, float* %c)
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
@@ -21,10 +21,10 @@ entry:
; CHECK: define void @foo(float* noalias nocapture %a, float* noalias nocapture readonly %c) #0 {
; CHECK: entry:
-; CHECK: %0 = load float* %c, align 4, !alias.scope !0, !noalias !3
+; CHECK: %0 = load float, float* %c, align 4, !alias.scope !0, !noalias !3
; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4, !alias.scope !3, !noalias !0
-; CHECK: %1 = load float* %c, align 4
+; CHECK: %1 = load float, float* %c, align 4
; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
@@ -32,7 +32,7 @@ entry:
define void @hello2(float* noalias nocapture %a, float* noalias nocapture %b, float* nocapture readonly %c) #0 {
entry:
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 6
store float %0, float* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds float, float* %b, i64 8
@@ -46,7 +46,7 @@ define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture rea
entry:
tail call void @foo(float* %a, float* %c)
tail call void @hello2(float* %a, float* %b, float* %c)
- %0 = load float* %c, align 4
+ %0 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
@@ -54,18 +54,18 @@ entry:
; CHECK: define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
; CHECK: entry:
-; CHECK: %0 = load float* %c, align 4, !alias.scope !5, !noalias !10
+; CHECK: %0 = load float, float* %c, align 4, !alias.scope !5, !noalias !10
; CHECK: %arrayidx.i.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i.i, align 4, !alias.scope !10, !noalias !5
-; CHECK: %1 = load float* %c, align 4, !alias.scope !13, !noalias !14
+; CHECK: %1 = load float, float* %c, align 4, !alias.scope !13, !noalias !14
; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx.i, align 4, !alias.scope !14, !noalias !13
-; CHECK: %2 = load float* %c, align 4, !noalias !15
+; CHECK: %2 = load float, float* %c, align 4, !noalias !15
; CHECK: %arrayidx.i1 = getelementptr inbounds float, float* %a, i64 6
; CHECK: store float %2, float* %arrayidx.i1, align 4, !alias.scope !19, !noalias !20
; CHECK: %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %2, float* %arrayidx1.i, align 4, !alias.scope !20, !noalias !19
-; CHECK: %3 = load float* %c, align 4
+; CHECK: %3 = load float, float* %c, align 4
; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %3, float* %arrayidx, align 4
; CHECK: ret void
diff --git a/llvm/test/Transforms/Inline/optimization-remarks.ll b/llvm/test/Transforms/Inline/optimization-remarks.ll
index fb1b0478888..8a3e4d1b1fe 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks.ll
@@ -12,8 +12,8 @@ entry:
%y.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
store i32 %y, i32* %y.addr, align 4
- %0 = load i32* %x.addr, align 4
- %1 = load i32* %y.addr, align 4
+ %0 = load i32, i32* %x.addr, align 4
+ %1 = load i32, i32* %y.addr, align 4
%add = add nsw i32 %0, %1
ret i32 %add
}
@@ -25,8 +25,8 @@ entry:
%y.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
store i32 %y, i32* %y.addr, align 4
- %0 = load i32* %x.addr, align 4
- %1 = load i32* %y.addr, align 4
+ %0 = load i32, i32* %x.addr, align 4
+ %1 = load i32, i32* %y.addr, align 4
%mul = mul nsw i32 %0, %1
%conv = sitofp i32 %mul to float
ret float %conv
@@ -37,14 +37,14 @@ define i32 @bar(i32 %j) #2 {
entry:
%j.addr = alloca i32, align 4
store i32 %j, i32* %j.addr, align 4
- %0 = load i32* %j.addr, align 4
- %1 = load i32* %j.addr, align 4
+ %0 = load i32, i32* %j.addr, align 4
+ %1 = load i32, i32* %j.addr, align 4
%sub = sub nsw i32 %1, 2
%call = call i32 @foo(i32 %0, i32 %sub)
%conv = sitofp i32 %call to float
- %2 = load i32* %j.addr, align 4
+ %2 = load i32, i32* %j.addr, align 4
%sub1 = sub nsw i32 %2, 2
- %3 = load i32* %j.addr, align 4
+ %3 = load i32, i32* %j.addr, align 4
%call2 = call float @foz(i32 %sub1, i32 %3)
%mul = fmul float %conv, %call2
%conv3 = fptosi float %mul to i32
diff --git a/llvm/test/Transforms/Inline/ptr-diff.ll b/llvm/test/Transforms/Inline/ptr-diff.ll
index 8388e4f6e38..728f3793ee2 100644
--- a/llvm/test/Transforms/Inline/ptr-diff.ll
+++ b/llvm/test/Transforms/Inline/ptr-diff.ll
@@ -25,7 +25,7 @@ then:
ret i32 3
else:
- %t = load i32* %begin
+ %t = load i32, i32* %begin
ret i32 %t
}
@@ -53,7 +53,7 @@ then:
ret i32 3
else:
- %t = load i32* %begin
+ %t = load i32, i32* %begin
ret i32 %t
}
@@ -63,9 +63,9 @@ define i32 @inttoptr_free_cost(i32 %a, i32 %b, i32 %c) {
%p1 = inttoptr i32 %a to i32 addrspace(1)*
%p2 = inttoptr i32 %b to i32 addrspace(1)*
%p3 = inttoptr i32 %c to i32 addrspace(1)*
- %t1 = load i32 addrspace(1)* %p1
- %t2 = load i32 addrspace(1)* %p2
- %t3 = load i32 addrspace(1)* %p3
+ %t1 = load i32, i32 addrspace(1)* %p1
+ %t2 = load i32, i32 addrspace(1)* %p2
+ %t3 = load i32, i32 addrspace(1)* %p3
%s = add i32 %t1, %t2
%s1 = add i32 %s, %t3
ret i32 %s1
@@ -84,9 +84,9 @@ define i32 @inttoptr_cost_smaller_ptr(i32 %a, i32 %b, i32 %c) {
%p1 = inttoptr i32 %a to i32 addrspace(2)*
%p2 = inttoptr i32 %b to i32 addrspace(2)*
%p3 = inttoptr i32 %c to i32 addrspace(2)*
- %t1 = load i32 addrspace(2)* %p1
- %t2 = load i32 addrspace(2)* %p2
- %t3 = load i32 addrspace(2)* %p3
+ %t1 = load i32, i32 addrspace(2)* %p1
+ %t2 = load i32, i32 addrspace(2)* %p2
+ %t3 = load i32, i32 addrspace(2)* %p3
%s = add i32 %t1, %t2
%s1 = add i32 %s, %t3
ret i32 %s1
diff --git a/llvm/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll b/llvm/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
index f550c8349f8..b3815458089 100644
--- a/llvm/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
+++ b/llvm/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
@@ -31,14 +31,14 @@ entry:
%a.0 = alloca i32 ; <i32*> [#uses=2]
%result = alloca i32 ; <i32*> [#uses=2]
store i32 %a.1, i32* %a.0
- %tmp.0 = load i32* %a.0 ; <i32> [#uses=1]
- %tmp.1 = load i32* @silly ; <i32> [#uses=1]
+ %tmp.0 = load i32, i32* %a.0 ; <i32> [#uses=1]
+ %tmp.1 = load i32, i32* @silly ; <i32> [#uses=1]
%tmp.2 = add i32 %tmp.0, %tmp.1 ; <i32> [#uses=1]
store i32 %tmp.2, i32* %result
br label %return
return: ; preds = %entry
- %tmp.3 = load i32* %result ; <i32> [#uses=1]
+ %tmp.3 = load i32, i32* %result ; <i32> [#uses=1]
ret i32 %tmp.3
}
diff --git a/llvm/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll b/llvm/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
index 7f7390809c7..3e0cf1247d9 100644
--- a/llvm/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
+++ b/llvm/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
@@ -2,6 +2,6 @@
define void @test(i32* %P) {
; Dead but not deletable!
- %X = load volatile i32* %P ; <i32> [#uses=0]
+ %X = load volatile i32, i32* %P ; <i32> [#uses=0]
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll b/llvm/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
index 6df30c7e7fd..a086c015650 100644
--- a/llvm/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
+++ b/llvm/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
@@ -19,7 +19,7 @@ call: ; preds = %entry
cont: ; preds = %call, %entry
%P2 = phi i32* [ %P, %call ], [ null, %entry ] ; <i32*> [#uses=1]
- %V = load i32* %P2 ; <i32> [#uses=1]
+ %V = load i32, i32* %P2 ; <i32> [#uses=1]
ret i32 %V
N: ; preds = %call
diff --git a/llvm/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll b/llvm/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
index 8b549374a70..ff855dc13e5 100644
--- a/llvm/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
+++ b/llvm/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
@@ -4,7 +4,7 @@
define i32 @test(%Ty* %X) {
%Y = bitcast %Ty* %X to i32* ; <i32*> [#uses=1]
- %Z = load i32* %Y ; <i32> [#uses=1]
+ %Z = load i32, i32* %Y ; <i32> [#uses=1]
ret i32 %Z
}
diff --git a/llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll b/llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
index d17db8d7eac..10122e48ab6 100644
--- a/llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
+++ b/llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
@@ -12,7 +12,7 @@ define i32 @test(i1 %C) {
store i32 2, i32* %X2
%Y = select i1 %C, i32* %X, i32* %X2 ; <i32*> [#uses=1]
store i32 3, i32* %X
- %Z = load i32* %Y ; <i32> [#uses=1]
+ %Z = load i32, i32* %Y ; <i32> [#uses=1]
ret i32 %Z
}
diff --git a/llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll b/llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll
index 0d5fc810a8a..981a4f301a9 100644
--- a/llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll
+++ b/llvm/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll
@@ -18,7 +18,7 @@ cond_true.i: ; preds = %entry
cond_continue.i: ; preds = %cond_true.i, %entry
%mem_tmp.i.0 = phi i32* [ %X, %cond_true.i ], [ %X2, %entry ] ; <i32*> [#uses=1]
store i32 3, i32* %X
- %tmp.3 = load i32* %mem_tmp.i.0 ; <i32> [#uses=1]
+ %tmp.3 = load i32, i32* %mem_tmp.i.0 ; <i32> [#uses=1]
ret i32 %tmp.3
}
diff --git a/llvm/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll b/llvm/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
index 0a513c647c1..9c989b9ecf8 100644
--- a/llvm/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
+++ b/llvm/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
@@ -5,7 +5,7 @@
@g_07918478 = external global i32 ; <i32*> [#uses=1]
define i1 @test() {
- %tmp.0 = load i32* @g_07918478 ; <i32> [#uses=2]
+ %tmp.0 = load i32, i32* @g_07918478 ; <i32> [#uses=2]
%tmp.1 = icmp ne i32 %tmp.0, 0 ; <i1> [#uses=1]
%tmp.4 = icmp ult i32 %tmp.0, 4111 ; <i1> [#uses=1]
%bothcond = or i1 %tmp.1, %tmp.4 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2006-09-15-CastToBool.ll b/llvm/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
index ee261ced586..2eb28a3f6bb 100644
--- a/llvm/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
+++ b/llvm/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
@@ -2,7 +2,7 @@
; PR913
define i32 @test(i32* %tmp1) {
- %tmp.i = load i32* %tmp1 ; <i32> [#uses=1]
+ %tmp.i = load i32, i32* %tmp1 ; <i32> [#uses=1]
%tmp = bitcast i32 %tmp.i to i32 ; <i32> [#uses=1]
%tmp2.ui = lshr i32 %tmp, 5 ; <i32> [#uses=1]
%tmp2 = bitcast i32 %tmp2.ui to i32 ; <i32> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll b/llvm/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
index f906e5e9a44..60113fb5620 100644
--- a/llvm/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
+++ b/llvm/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
@@ -23,13 +23,13 @@ entry:
%tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
%tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
%tmp6 = getelementptr { i64 }, { i64 }* %tmp5, i64 0, i32 0 ; <i64*> [#uses=1]
- %tmp.upgrd.3 = load i64* %tmp6 ; <i64> [#uses=1]
+ %tmp.upgrd.3 = load i64, i64* %tmp6 ; <i64> [#uses=1]
%tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
%tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i64 0, i32 0 ; <i64*> [#uses=1]
- %tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
+ %tmp9 = load i64, i64* %tmp8 ; <i64> [#uses=1]
%tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
%tmp11 = getelementptr { i64 }, { i64 }* %tmp10, i64 0, i32 0 ; <i64*> [#uses=1]
- %tmp12 = load i64* %tmp11 ; <i64> [#uses=1]
+ %tmp12 = load i64, i64* %tmp11 ; <i64> [#uses=1]
%tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
br i1 %tmp.upgrd.2, label %cond_true, label %cond_false
diff --git a/llvm/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll b/llvm/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
index 1e5acfda500..f54416d1425 100644
--- a/llvm/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
+++ b/llvm/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
@@ -23,13 +23,13 @@ entry:
%tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
%tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
%tmp6 = getelementptr { i64 }, { i64 }* %tmp5, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp.upgrd.3 = load i64* %tmp6 ; <i64> [#uses=1]
+ %tmp.upgrd.3 = load i64, i64* %tmp6 ; <i64> [#uses=1]
%tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
%tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
+ %tmp9 = load i64, i64* %tmp8 ; <i64> [#uses=1]
%tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
%tmp11 = getelementptr { i64 }, { i64 }* %tmp10, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp12 = load i64* %tmp11 ; <i64> [#uses=1]
+ %tmp12 = load i64, i64* %tmp11 ; <i64> [#uses=1]
%tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
%tmp14 = icmp slt i32 %tmp13, 0 ; <i1> [#uses=1]
%tmp26 = icmp sgt i32 %tmp13, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll b/llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
index cee66101038..784b3e4fe68 100644
--- a/llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
+++ b/llvm/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
@@ -22,7 +22,7 @@ cond_next23.exitStub: ; preds = %cond_true
cond_true: ; preds = %newFuncRoot
%tmp15 = getelementptr [17 x i32], [17 x i32]* @r, i32 0, i32 %tmp12.reload ; <i32*> [#uses=1]
- %tmp16 = load i32* %tmp15 ; <i32> [#uses=4]
+ %tmp16 = load i32, i32* %tmp15 ; <i32> [#uses=4]
%tmp18 = icmp slt i32 %tmp16, -31 ; <i1> [#uses=1]
%tmp21 = icmp sgt i32 %tmp16, 31 ; <i1> [#uses=1]
%bothcond = or i1 %tmp18, %tmp21 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll b/llvm/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
index eba1ac1298c..9251e9b455d 100644
--- a/llvm/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
+++ b/llvm/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
@@ -14,9 +14,9 @@ target triple = "i686-pc-linux-gnu"
define void @mng_write_basi() {
entry:
- %tmp = load i8* null ; <i8> [#uses=1]
+ %tmp = load i8, i8* null ; <i8> [#uses=1]
%tmp.upgrd.1 = icmp ugt i8 %tmp, 8 ; <i1> [#uses=1]
- %tmp.upgrd.2 = load i16* null ; <i16> [#uses=2]
+ %tmp.upgrd.2 = load i16, i16* null ; <i16> [#uses=2]
%tmp3 = icmp eq i16 %tmp.upgrd.2, 255 ; <i1> [#uses=1]
%tmp7 = icmp eq i16 %tmp.upgrd.2, -1 ; <i1> [#uses=1]
%bOpaque.0.in = select i1 %tmp.upgrd.1, i1 %tmp7, i1 %tmp3 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll b/llvm/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
index e2bebecded5..20bbd286364 100644
--- a/llvm/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
+++ b/llvm/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
@@ -11,14 +11,14 @@ entry:
%A = alloca i32
%B = alloca i32
%tmp = call i32 (...)* @bar( i32* %A ) ; <i32> [#uses=0]
- %T = load i32* %A ; <i32> [#uses=1]
+ %T = load i32, i32* %A ; <i32> [#uses=1]
%tmp2 = icmp eq i32 %C, 0 ; <i1> [#uses=1]
br i1 %tmp2, label %cond_next, label %cond_true
cond_true: ; preds = %entry
store i32 123, i32* %B
call i32 @test2( i32 123 ) ; <i32>:0 [#uses=0]
- %T1 = load i32* %B ; <i32> [#uses=1]
+ %T1 = load i32, i32* %B ; <i32> [#uses=1]
br label %cond_next
cond_next: ; preds = %cond_true, %entry
diff --git a/llvm/test/Transforms/InstCombine/2007-02-07-PointerCast.ll b/llvm/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
index eedaaff57c6..760b6dc66b5 100644
--- a/llvm/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
+++ b/llvm/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
@@ -13,7 +13,7 @@ declare i32 @printf(i8*, ...)
define i32 @main(i32 %x, i8** %a) {
entry:
%tmp = getelementptr [6 x i8], [6 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
- %tmp1 = load i8** %a ; <i8*> [#uses=1]
+ %tmp1 = load i8*, i8** %a ; <i8*> [#uses=1]
%tmp2 = ptrtoint i8* %tmp1 to i32 ; <i32> [#uses=1]
%tmp3 = zext i32 %tmp2 to i64 ; <i64> [#uses=1]
%tmp.upgrd.1 = call i32 (i8*, ...)* @printf( i8* %tmp, i64 %tmp3 ) ; <i32> [#uses=0]
diff --git a/llvm/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll b/llvm/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
index 93ba6c6f2b2..adaaf784ab6 100644
--- a/llvm/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
+++ b/llvm/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
@@ -12,7 +12,7 @@ entry:
%tmp3 = getelementptr %struct..1anon, %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
%tmp34 = bitcast double* %tmp3 to %struct..0anon* ; <%struct..0anon*> [#uses=1]
%tmp5 = getelementptr %struct..0anon, %struct..0anon* %tmp34, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp6 = load i32* %tmp5 ; <i32> [#uses=1]
+ %tmp6 = load i32, i32* %tmp5 ; <i32> [#uses=1]
%tmp7 = shl i32 %tmp6, 1 ; <i32> [#uses=1]
%tmp8 = lshr i32 %tmp7, 21 ; <i32> [#uses=1]
%tmp89 = trunc i32 %tmp8 to i16 ; <i16> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll b/llvm/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
index 082b2155a01..2b89a9d721f 100644
--- a/llvm/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
+++ b/llvm/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
@@ -9,13 +9,13 @@ return.exitStub: ; preds = %cond_true
ret void
cond_true: ; preds = %newFuncRoot
- %tmp30 = load i64* %tmp10 ; <i64> [#uses=1]
+ %tmp30 = load i64, i64* %tmp10 ; <i64> [#uses=1]
%.cast = zext i32 63 to i64 ; <i64> [#uses=1]
%tmp31 = ashr i64 %tmp30, %.cast ; <i64> [#uses=1]
%tmp3132 = trunc i64 %tmp31 to i32 ; <i32> [#uses=1]
%tmp33 = or i32 %tmp3132, 1 ; <i32> [#uses=1]
store i32 %tmp33, i32* %tmp9
- %tmp34 = load i32* %tmp9 ; <i32> [#uses=1]
+ %tmp34 = load i32, i32* %tmp9 ; <i32> [#uses=1]
store i32 %tmp34, i32* %retval
br label %return.exitStub
}
diff --git a/llvm/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll b/llvm/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
index 625989384d7..7f06f009515 100644
--- a/llvm/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
+++ b/llvm/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
@@ -9,7 +9,7 @@ define weak i32 @pthread_cancel(i32) {
define i1 @__gthread_active_p() {
entry:
- %tmp1 = load i8** @__gthread_active_ptr.5335, align 4 ; <i8*> [#uses=1]
+ %tmp1 = load i8*, i8** @__gthread_active_ptr.5335, align 4 ; <i8*> [#uses=1]
%tmp2 = icmp ne i8* %tmp1, null ; <i1> [#uses=1]
ret i1 %tmp2
}
diff --git a/llvm/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll b/llvm/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll
index f9e82202174..67a94e5c8cc 100644
--- a/llvm/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll
+++ b/llvm/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll
@@ -8,7 +8,7 @@ define void @foo(i8* %P) {
entry:
%P_addr = alloca i8*
store i8* %P, i8** %P_addr
- %tmp = load i8** %P_addr, align 4
+ %tmp = load i8*, i8** %P_addr, align 4
%tmp1 = getelementptr [4 x i8], [4 x i8]* @.str, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %tmp1, i32 4, i32 1, i1 false)
br label %return
diff --git a/llvm/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll b/llvm/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll
index 8105b4ba557..8c2cb07af22 100644
--- a/llvm/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll
+++ b/llvm/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll
@@ -11,7 +11,7 @@ bb.i: ; preds = %entry
br label %bb51.i.i
bb27.i.i: ; preds = %bb51.i.i
- %tmp31.i.i = load i16* null, align 2 ; <i16> [#uses=2]
+ %tmp31.i.i = load i16, i16* null, align 2 ; <i16> [#uses=2]
%tmp35.i.i = icmp ult i16 %tmp31.i.i, 1 ; <i1> [#uses=1]
%tmp41.i.i = icmp ugt i16 %tmp31.i.i, -1 ; <i1> [#uses=1]
%bothcond.i.i = or i1 %tmp35.i.i, %tmp41.i.i ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2007-10-31-StringCrash.ll b/llvm/test/Transforms/InstCombine/2007-10-31-StringCrash.ll
index 220f3e22b99..876cdd53f52 100644
--- a/llvm/test/Transforms/InstCombine/2007-10-31-StringCrash.ll
+++ b/llvm/test/Transforms/InstCombine/2007-10-31-StringCrash.ll
@@ -7,7 +7,7 @@ declare void @__darwin_gcc3_preregister_frame_info()
define void @_start(i32 %argc, i8** %argv, i8** %envp) {
entry:
%tmp1 = bitcast void ()* @__darwin_gcc3_preregister_frame_info to i32* ; <i32*> [#uses=1]
- %tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
%tmp3 = icmp ne i32 %tmp2, 0 ; <i1> [#uses=1]
%tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp34, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll b/llvm/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll
index e6c9bcd01e5..ff3107297a1 100644
--- a/llvm/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll
+++ b/llvm/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll
@@ -13,8 +13,8 @@ target triple = "i686-pc-linux-gnu"
define i32 @foo() {
entry:
- %x = load i8* bitcast (%opaque_t* @g to i8*)
- %y = load i32* bitcast (%op_ts* @h to i32*)
+ %x = load i8, i8* bitcast (%opaque_t* @g to i8*)
+ %y = load i32, i32* bitcast (%op_ts* @h to i32*)
%z = zext i8 %x to i32
%r = add i32 %y, %z
ret i32 %r
diff --git a/llvm/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll b/llvm/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll
index 8721c83521b..7260c001b0d 100644
--- a/llvm/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll
+++ b/llvm/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll
@@ -4,7 +4,7 @@ define i32 @test1() {
entry:
%z = alloca i32
store i32 0, i32* %z
- %tmp = load i32* %z
+ %tmp = load i32, i32* %z
%sub = sub i32 %tmp, 1
%cmp = icmp ule i32 %sub, 0
%retval = select i1 %cmp, i32 0, i32 1
@@ -15,7 +15,7 @@ define i32 @test2() {
entry:
%z = alloca i32
store i32 0, i32* %z
- %tmp = load i32* %z
+ %tmp = load i32, i32* %z
%sub = sub i32 %tmp, 1
%cmp = icmp ugt i32 %sub, 0
%retval = select i1 %cmp, i32 1, i32 0
@@ -26,7 +26,7 @@ define i32 @test3() {
entry:
%z = alloca i32
store i32 0, i32* %z
- %tmp = load i32* %z
+ %tmp = load i32, i32* %z
%sub = sub i32 %tmp, 1
%cmp = icmp slt i32 %sub, 0
%retval = select i1 %cmp, i32 1, i32 0
@@ -37,7 +37,7 @@ define i32 @test4() {
entry:
%z = alloca i32
store i32 0, i32* %z
- %tmp = load i32* %z
+ %tmp = load i32, i32* %z
%sub = sub i32 %tmp, 1
%cmp = icmp sle i32 %sub, 0
%retval = select i1 %cmp, i32 1, i32 0
@@ -48,7 +48,7 @@ define i32 @test5() {
entry:
%z = alloca i32
store i32 0, i32* %z
- %tmp = load i32* %z
+ %tmp = load i32, i32* %z
%sub = sub i32 %tmp, 1
%cmp = icmp sge i32 %sub, 0
%retval = select i1 %cmp, i32 0, i32 1
@@ -59,7 +59,7 @@ define i32 @test6() {
entry:
%z = alloca i32
store i32 0, i32* %z
- %tmp = load i32* %z
+ %tmp = load i32, i32* %z
%sub = sub i32 %tmp, 1
%cmp = icmp sgt i32 %sub, 0
%retval = select i1 %cmp, i32 0, i32 1
@@ -70,7 +70,7 @@ define i32 @test7() {
entry:
%z = alloca i32
store i32 0, i32* %z
- %tmp = load i32* %z
+ %tmp = load i32, i32* %z
%sub = sub i32 %tmp, 1
%cmp = icmp eq i32 %sub, 0
%retval = select i1 %cmp, i32 0, i32 1
@@ -81,7 +81,7 @@ define i32 @test8() {
entry:
%z = alloca i32
store i32 0, i32* %z
- %tmp = load i32* %z
+ %tmp = load i32, i32* %z
%sub = sub i32 %tmp, 1
%cmp = icmp ne i32 %sub, 0
%retval = select i1 %cmp, i32 1, i32 0
diff --git a/llvm/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll b/llvm/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll
index 0fa4d715f28..d086f4b63b7 100644
--- a/llvm/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll
+++ b/llvm/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll
@@ -1,7 +1,7 @@
; RUN: opt < %s -instcombine -S | grep "16" | count 1
define i8* @bork(i8** %qux) {
- %tmp275 = load i8** %qux, align 1
+ %tmp275 = load i8*, i8** %qux, align 1
%tmp275276 = ptrtoint i8* %tmp275 to i32
%tmp277 = add i32 %tmp275276, 16
%tmp277278 = inttoptr i32 %tmp277 to i8*
diff --git a/llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll b/llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
index fd0217e9f4d..af662bda1e6 100644
--- a/llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
+++ b/llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
@@ -6,7 +6,7 @@ target triple = "i386-apple-darwin8"
define i32 @main() nounwind {
entry:
%tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0]
- %tmp34 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp34 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
br label %bb
bb: ; preds = %bb, %entry
@@ -16,7 +16,7 @@ bb: ; preds = %bb, %entry
store volatile i32 %tmp4, i32* @g_1, align 4
%tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2]
%tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1]
- %tmp3 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp3 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
br i1 %tmp9, label %bb, label %bb11
bb11: ; preds = %bb
diff --git a/llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll b/llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
index 8022414d6fc..3c67e513bcc 100644
--- a/llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
+++ b/llvm/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
@@ -7,11 +7,11 @@ target triple = "i386-apple-darwin8"
define i32 @main(i32 %i) nounwind {
entry:
%tmp93 = icmp slt i32 %i, 10 ; <i1> [#uses=0]
- %tmp34 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp34 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
br i1 %tmp93, label %bb11, label %bb
bb: ; preds = %bb, %entry
- %tmp3 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp3 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
br label %bb11
bb11: ; preds = %bb
diff --git a/llvm/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll b/llvm/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll
index 823ccb67f28..722f8f0fabb 100644
--- a/llvm/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll
+++ b/llvm/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll
@@ -23,7 +23,7 @@ invcont31: ; preds = %invcont
to label %invcont37 unwind label %lpad ; <i32**> [#uses=1]
invcont37: ; preds = %invcont31
- %tmp39 = load i32** %tmp38, align 8 ; <i32*> [#uses=1]
+ %tmp39 = load i32*, i32** %tmp38, align 8 ; <i32*> [#uses=1]
%tmp41 = getelementptr %"struct.std::ctype<char>", %"struct.std::ctype<char>"* %this, i32 0, i32 4 ; <i32**> [#uses=1]
store i32* %tmp39, i32** %tmp41, align 8
ret void
diff --git a/llvm/test/Transforms/InstCombine/2008-05-17-InfLoop.ll b/llvm/test/Transforms/InstCombine/2008-05-17-InfLoop.ll
index 2939a482165..f0ccc05b956 100644
--- a/llvm/test/Transforms/InstCombine/2008-05-17-InfLoop.ll
+++ b/llvm/test/Transforms/InstCombine/2008-05-17-InfLoop.ll
@@ -10,9 +10,9 @@ entry:
%blockSize100k = alloca i32 ; <i32*> [#uses=2]
store i32 %0, i32* %blockSize100k
%n = alloca i32 ; <i32*> [#uses=2]
- load i32* %blockSize100k ; <i32>:1 [#uses=1]
+ load i32, i32* %blockSize100k ; <i32>:1 [#uses=1]
store i32 %1, i32* %n
- load i32* %n ; <i32>:2 [#uses=1]
+ load i32, i32* %n ; <i32>:2 [#uses=1]
add i32 %2, 2 ; <i32>:3 [#uses=1]
mul i32 %3, ptrtoint (i32* getelementptr (i32* null, i32 1) to i32) ; <i32>:4 [#uses=1]
call void @BZALLOC( i32 %4 )
diff --git a/llvm/test/Transforms/InstCombine/2008-05-23-CompareFold.ll b/llvm/test/Transforms/InstCombine/2008-05-23-CompareFold.ll
index b7296776755..b10aac96599 100644
--- a/llvm/test/Transforms/InstCombine/2008-05-23-CompareFold.ll
+++ b/llvm/test/Transforms/InstCombine/2008-05-23-CompareFold.ll
@@ -5,7 +5,7 @@
; CHECK: ret i1 false
define i1 @f(i8* %x) {
entry:
- %tmp462 = load i8* %x, align 1 ; <i8> [#uses=1]
+ %tmp462 = load i8, i8* %x, align 1 ; <i8> [#uses=1]
%tmp462463 = sitofp i8 %tmp462 to float ; <float> [#uses=1]
%tmp464 = fcmp ugt float %tmp462463, 0x47EFFFFFE0000000 ; <i1>
ret i1 %tmp464
diff --git a/llvm/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll b/llvm/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll
index 05f1c520008..c3aab464b87 100644
--- a/llvm/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll
+++ b/llvm/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll
@@ -4,13 +4,13 @@
declare i32 @x(i32*)
define i32 @b(i32* %a, i32* %b) {
entry:
- %tmp1 = load i32* %a
- %tmp3 = load i32* %b
+ %tmp1 = load i32, i32* %a
+ %tmp3 = load i32, i32* %b
%add = add i32 %tmp1, %tmp3
%call = call i32 @x( i32* %a )
%tobool = icmp ne i32 %add, 0
; not safe to turn into an uncond load
%cond = select i1 %tobool, i32* %b, i32* %a
- %tmp8 = load i32* %cond
+ %tmp8 = load i32, i32* %cond
ret i32 %tmp8
}
diff --git a/llvm/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll b/llvm/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
index dcf4befa860..17ec9cd1d82 100644
--- a/llvm/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
+++ b/llvm/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
@@ -7,7 +7,7 @@ target triple = "i386-apple-darwin8"
define i32 @main() nounwind {
entry:
%tmp93 = icmp slt i32 0, 10 ; <i1> [#uses=0]
- %tmp34 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp34 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
br label %bb
bb: ; preds = %bb, %entry
@@ -17,7 +17,7 @@ bb: ; preds = %bb, %entry
store volatile i32 %tmp4, i32* @g_1, align 4
%tmp6 = add i32 %b.0.reg2mem.0, 1 ; <i32> [#uses=2]
%tmp9 = icmp slt i32 %tmp6, 10 ; <i1> [#uses=1]
- %tmp3 = load volatile i32* @g_1, align 4 ; <i32> [#uses=1]
+ %tmp3 = load volatile i32, i32* @g_1, align 4 ; <i32> [#uses=1]
br i1 %tmp9, label %bb, label %bb11
bb11: ; preds = %bb
diff --git a/llvm/test/Transforms/InstCombine/2008-08-05-And.ll b/llvm/test/Transforms/InstCombine/2008-08-05-And.ll
index 124c21418c8..91f1c0b0a98 100644
--- a/llvm/test/Transforms/InstCombine/2008-08-05-And.ll
+++ b/llvm/test/Transforms/InstCombine/2008-08-05-And.ll
@@ -7,7 +7,7 @@ entry:
bb:
%g1 = getelementptr i8, i8* %x, i32 0
- %l1 = load i8* %g1, align 1
+ %l1 = load i8, i8* %g1, align 1
%s1 = sub i8 %l1, 6
%c1 = icmp ugt i8 %s1, 2
%s2 = sub i8 %l1, 10
diff --git a/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll b/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
index 67d56d5d30a..23ed5aa2e45 100644
--- a/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
+++ b/llvm/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
@@ -21,7 +21,7 @@ entry:
%4 = bitcast { i32, i32 }* %3 to i64* ; <i64*> [#uses=1]
store i64 %key_token2, i64* %4, align 4
%5 = call i32 (...)* @foo(%struct.Key* byval align 4 %iospec, i32* %ret) nounwind ; <i32> [#uses=0]
- %6 = load i32* %ret, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* %ret, align 4 ; <i32> [#uses=1]
ret i32 %6
}
diff --git a/llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll b/llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll
index 68c51b43fc3..5bff5a8d550 100644
--- a/llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll
+++ b/llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll
@@ -15,9 +15,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0x7FF0000000000000, float* %x, align 4
store float 0x7FF8000000000000, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -36,9 +36,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0x7FF0000000000000, float* %x, align 4
store float 0.000000e+00, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -55,9 +55,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0x7FF0000000000000, float* %x, align 4
store float 3.500000e+00, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -74,9 +74,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0x7FF0000000000000, float* %x, align 4
store float 0x7FF0000000000000, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -93,9 +93,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0x7FF8000000000000, float* %x, align 4
store float 0x7FF0000000000000, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -112,9 +112,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0x7FF8000000000000, float* %x, align 4
store float 0.000000e+00, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -131,9 +131,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0x7FF8000000000000, float* %x, align 4
store float 3.500000e+00, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -150,9 +150,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0x7FF8000000000000, float* %x, align 4
store float 0x7FF8000000000000, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -169,9 +169,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0.000000e+00, float* %x, align 4
store float 0x7FF8000000000000, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -188,9 +188,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0.000000e+00, float* %x, align 4
store float 0x7FF0000000000000, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -207,9 +207,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0.000000e+00, float* %x, align 4
store float 0.000000e+00, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -226,9 +226,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 0.000000e+00, float* %x, align 4
store float 3.500000e+00, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -245,9 +245,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 3.500000e+00, float* %x, align 4
store float 0x7FF8000000000000, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -264,9 +264,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 3.500000e+00, float* %x, align 4
store float 0x7FF0000000000000, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -283,9 +283,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 3.500000e+00, float* %x, align 4
store float 0.000000e+00, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
@@ -302,9 +302,9 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store float 3.500000e+00, float* %x, align 4
store float 3.500000e+00, float* %y, align 4
- %0 = load float* %y, align 4 ; <float> [#uses=1]
+ %0 = load float, float* %y, align 4 ; <float> [#uses=1]
%1 = fpext float %0 to double ; <double> [#uses=1]
- %2 = load float* %x, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %x, align 4 ; <float> [#uses=1]
%3 = fpext float %2 to double ; <double> [#uses=1]
%4 = frem double %3, %1 ; <double> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), double %4) nounwind ; <i32> [#uses=0]
diff --git a/llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll b/llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll
index 6bc7ce34d1c..5adcb6bfa07 100644
--- a/llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll
+++ b/llvm/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll
@@ -14,12 +14,12 @@ entry:
%1 = frem double 1.000000e-01, 1.000000e+00 ; <double> [#uses=1]
%2 = fptrunc double %1 to float ; <float> [#uses=1]
store float %2, float* %0, align 4
- %3 = load float* %0, align 4 ; <float> [#uses=1]
+ %3 = load float, float* %0, align 4 ; <float> [#uses=1]
store float %3, float* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load float* %retval ; <float> [#uses=1]
+ %retval1 = load float, float* %retval ; <float> [#uses=1]
ret float %retval1
}
@@ -31,12 +31,12 @@ entry:
%1 = frem double -1.000000e-01, 1.000000e+00 ; <double> [#uses=1]
%2 = fptrunc double %1 to float ; <float> [#uses=1]
store float %2, float* %0, align 4
- %3 = load float* %0, align 4 ; <float> [#uses=1]
+ %3 = load float, float* %0, align 4 ; <float> [#uses=1]
store float %3, float* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load float* %retval ; <float> [#uses=1]
+ %retval1 = load float, float* %retval ; <float> [#uses=1]
ret float %retval1
}
@@ -48,12 +48,12 @@ entry:
%1 = frem double 1.000000e-01, -1.000000e+00 ; <double> [#uses=1]
%2 = fptrunc double %1 to float ; <float> [#uses=1]
store float %2, float* %0, align 4
- %3 = load float* %0, align 4 ; <float> [#uses=1]
+ %3 = load float, float* %0, align 4 ; <float> [#uses=1]
store float %3, float* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load float* %retval ; <float> [#uses=1]
+ %retval1 = load float, float* %retval ; <float> [#uses=1]
ret float %retval1
}
@@ -65,11 +65,11 @@ entry:
%1 = frem double -1.000000e-01, -1.000000e+00 ; <double> [#uses=1]
%2 = fptrunc double %1 to float ; <float> [#uses=1]
store float %2, float* %0, align 4
- %3 = load float* %0, align 4 ; <float> [#uses=1]
+ %3 = load float, float* %0, align 4 ; <float> [#uses=1]
store float %3, float* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load float* %retval ; <float> [#uses=1]
+ %retval1 = load float, float* %retval ; <float> [#uses=1]
ret float %retval1
}
diff --git a/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll b/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
index 9f0851c66b6..0f8b38c8e9c 100644
--- a/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
+++ b/llvm/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
@@ -30,11 +30,11 @@ entry:
%5 = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
%6 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >", %"struct.std::_Vector_base<int,std::allocator<int> >"* %5, i32 0, i32 0
%7 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %6, i32 0, i32 1
- %8 = load i32** %7, align 4
+ %8 = load i32*, i32** %7, align 4
%9 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
store i32* %8, i32** %9, align 4
%10 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
- %11 = load i32** %10, align 4
+ %11 = load i32*, i32** %10, align 4
%tmp2.i = ptrtoint i32* %11 to i32
%tmp1.i = inttoptr i32 %tmp2.i to i32*
%tmp3 = ptrtoint i32* %tmp1.i to i32
@@ -42,11 +42,11 @@ entry:
%12 = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
%13 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >", %"struct.std::_Vector_base<int,std::allocator<int> >"* %12, i32 0, i32 0
%14 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %13, i32 0, i32 0
- %15 = load i32** %14, align 4
+ %15 = load i32*, i32** %14, align 4
%16 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
store i32* %15, i32** %16, align 4
%17 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
- %18 = load i32** %17, align 4
+ %18 = load i32*, i32** %17, align 4
%tmp2.i17 = ptrtoint i32* %18 to i32
%tmp1.i18 = inttoptr i32 %tmp2.i17 to i32*
%tmp8 = ptrtoint i32* %tmp1.i18 to i32
@@ -54,14 +54,14 @@ entry:
%19 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
store i32* %tmp6, i32** %19
%20 = getelementptr %"struct.std::bidirectional_iterator_tag", %"struct.std::bidirectional_iterator_tag"* %1, i32 0, i32 0
- %21 = load i8* %20, align 1
+ %21 = load i8, i8* %20, align 1
%22 = or i8 %21, 0
%23 = or i8 %22, 0
%24 = or i8 %23, 0
%25 = getelementptr %"struct.std::bidirectional_iterator_tag", %"struct.std::bidirectional_iterator_tag"* %2, i32 0, i32 0
store i8 0, i8* %25, align 1
%elt.i = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
- %val.i = load i32** %elt.i
+ %val.i = load i32*, i32** %elt.i
%tmp.i = bitcast %"struct.std::bidirectional_iterator_tag"* %unnamed_arg.i to i8*
%tmp9.i = bitcast %"struct.std::bidirectional_iterator_tag"* %2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp.i, i8* %tmp9.i, i64 1, i32 1, i1 false)
@@ -70,10 +70,10 @@ entry:
%27 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
store i32* %tmp2, i32** %27
%28 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
- %29 = load i32** %28, align 4
+ %29 = load i32*, i32** %28, align 4
%30 = ptrtoint i32* %29 to i32
%31 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %32 = load i32** %31, align 4
+ %32 = load i32*, i32** %31, align 4
%33 = ptrtoint i32* %32 to i32
%34 = sub i32 %30, %33
%35 = ashr i32 %34, 2
@@ -82,9 +82,9 @@ entry:
bb.i.i: ; preds = %bb12.i.i
%37 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %38 = load i32** %37, align 4
- %39 = load i32* %38, align 4
- %40 = load i32* %4, align 4
+ %38 = load i32*, i32** %37, align 4
+ %39 = load i32, i32* %38, align 4
+ %40 = load i32, i32* %4, align 4
%41 = icmp eq i32 %39, %40
%42 = zext i1 %41 to i8
%toBool.i.i = icmp ne i8 %42, 0
@@ -92,19 +92,19 @@ bb.i.i: ; preds = %bb12.i.i
bb1.i.i: ; preds = %bb.i.i
%43 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %44 = load i32** %43, align 4
+ %44 = load i32*, i32** %43, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb2.i.i: ; preds = %bb.i.i
%45 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %46 = load i32** %45, align 4
+ %46 = load i32*, i32** %45, align 4
%47 = getelementptr i32, i32* %46, i64 1
%48 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %47, i32** %48, align 4
%49 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %50 = load i32** %49, align 4
- %51 = load i32* %50, align 4
- %52 = load i32* %4, align 4
+ %50 = load i32*, i32** %49, align 4
+ %51 = load i32, i32* %50, align 4
+ %52 = load i32, i32* %4, align 4
%53 = icmp eq i32 %51, %52
%54 = zext i1 %53 to i8
%toBool3.i.i = icmp ne i8 %54, 0
@@ -112,19 +112,19 @@ bb2.i.i: ; preds = %bb.i.i
bb4.i.i: ; preds = %bb2.i.i
%55 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %56 = load i32** %55, align 4
+ %56 = load i32*, i32** %55, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb5.i.i: ; preds = %bb2.i.i
%57 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %58 = load i32** %57, align 4
+ %58 = load i32*, i32** %57, align 4
%59 = getelementptr i32, i32* %58, i64 1
%60 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %59, i32** %60, align 4
%61 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %62 = load i32** %61, align 4
- %63 = load i32* %62, align 4
- %64 = load i32* %4, align 4
+ %62 = load i32*, i32** %61, align 4
+ %63 = load i32, i32* %62, align 4
+ %64 = load i32, i32* %4, align 4
%65 = icmp eq i32 %63, %64
%66 = zext i1 %65 to i8
%toBool6.i.i = icmp ne i8 %66, 0
@@ -132,19 +132,19 @@ bb5.i.i: ; preds = %bb2.i.i
bb7.i.i: ; preds = %bb5.i.i
%67 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %68 = load i32** %67, align 4
+ %68 = load i32*, i32** %67, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb8.i.i: ; preds = %bb5.i.i
%69 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %70 = load i32** %69, align 4
+ %70 = load i32*, i32** %69, align 4
%71 = getelementptr i32, i32* %70, i64 1
%72 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %71, i32** %72, align 4
%73 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %74 = load i32** %73, align 4
- %75 = load i32* %74, align 4
- %76 = load i32* %4, align 4
+ %74 = load i32*, i32** %73, align 4
+ %75 = load i32, i32* %74, align 4
+ %76 = load i32, i32* %4, align 4
%77 = icmp eq i32 %75, %76
%78 = zext i1 %77 to i8
%toBool9.i.i = icmp ne i8 %78, 0
@@ -152,12 +152,12 @@ bb8.i.i: ; preds = %bb5.i.i
bb10.i.i: ; preds = %bb8.i.i
%79 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %80 = load i32** %79, align 4
+ %80 = load i32*, i32** %79, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb11.i.i: ; preds = %bb8.i.i
%81 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %82 = load i32** %81, align 4
+ %82 = load i32*, i32** %81, align 4
%83 = getelementptr i32, i32* %82, i64 1
%84 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %83, i32** %84, align 4
@@ -171,10 +171,10 @@ bb12.i.i: ; preds = %bb11.i.i, %entry
bb13.i.i: ; preds = %bb12.i.i
%87 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
- %88 = load i32** %87, align 4
+ %88 = load i32*, i32** %87, align 4
%89 = ptrtoint i32* %88 to i32
%90 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %91 = load i32** %90, align 4
+ %91 = load i32*, i32** %90, align 4
%92 = ptrtoint i32* %91 to i32
%93 = sub i32 %89, %92
%94 = ashr i32 %93, 2
@@ -186,9 +186,9 @@ bb13.i.i: ; preds = %bb12.i.i
bb14.i.i: ; preds = %bb13.i.i
%95 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %96 = load i32** %95, align 4
- %97 = load i32* %96, align 4
- %98 = load i32* %4, align 4
+ %96 = load i32*, i32** %95, align 4
+ %97 = load i32, i32* %96, align 4
+ %98 = load i32, i32* %4, align 4
%99 = icmp eq i32 %97, %98
%100 = zext i1 %99 to i8
%toBool15.i.i = icmp ne i8 %100, 0
@@ -196,12 +196,12 @@ bb14.i.i: ; preds = %bb13.i.i
bb16.i.i: ; preds = %bb14.i.i
%101 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %102 = load i32** %101, align 4
+ %102 = load i32*, i32** %101, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb17.i.i: ; preds = %bb14.i.i
%103 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %104 = load i32** %103, align 4
+ %104 = load i32*, i32** %103, align 4
%105 = getelementptr i32, i32* %104, i64 1
%106 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %105, i32** %106, align 4
@@ -209,9 +209,9 @@ bb17.i.i: ; preds = %bb14.i.i
bb18.i.i: ; preds = %bb17.i.i, %bb13.i.i
%107 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %108 = load i32** %107, align 4
- %109 = load i32* %108, align 4
- %110 = load i32* %4, align 4
+ %108 = load i32*, i32** %107, align 4
+ %109 = load i32, i32* %108, align 4
+ %110 = load i32, i32* %4, align 4
%111 = icmp eq i32 %109, %110
%112 = zext i1 %111 to i8
%toBool19.i.i = icmp ne i8 %112, 0
@@ -219,12 +219,12 @@ bb18.i.i: ; preds = %bb17.i.i, %bb13.i.i
bb20.i.i: ; preds = %bb18.i.i
%113 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %114 = load i32** %113, align 4
+ %114 = load i32*, i32** %113, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb21.i.i: ; preds = %bb18.i.i
%115 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %116 = load i32** %115, align 4
+ %116 = load i32*, i32** %115, align 4
%117 = getelementptr i32, i32* %116, i64 1
%118 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %117, i32** %118, align 4
@@ -232,9 +232,9 @@ bb21.i.i: ; preds = %bb18.i.i
bb22.i.i: ; preds = %bb21.i.i, %bb13.i.i
%119 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %120 = load i32** %119, align 4
- %121 = load i32* %120, align 4
- %122 = load i32* %4, align 4
+ %120 = load i32*, i32** %119, align 4
+ %121 = load i32, i32* %120, align 4
+ %122 = load i32, i32* %4, align 4
%123 = icmp eq i32 %121, %122
%124 = zext i1 %123 to i8
%toBool23.i.i = icmp ne i8 %124, 0
@@ -242,12 +242,12 @@ bb22.i.i: ; preds = %bb21.i.i, %bb13.i.i
bb24.i.i: ; preds = %bb22.i.i
%125 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %126 = load i32** %125, align 4
+ %126 = load i32*, i32** %125, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb25.i.i: ; preds = %bb22.i.i
%127 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
- %128 = load i32** %127, align 4
+ %128 = load i32*, i32** %127, align 4
%129 = getelementptr i32, i32* %128, i64 1
%130 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %129, i32** %130, align 4
@@ -255,7 +255,7 @@ bb25.i.i: ; preds = %bb22.i.i
bb26.i.i: ; preds = %bb25.i.i, %bb13.i.i
%131 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
- %132 = load i32** %131, align 4
+ %132 = load i32*, i32** %131, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit: ; preds = %bb26.i.i, %bb24.i.i, %bb20.i.i, %bb16.i.i, %bb10.i.i, %bb7.i.i, %bb4.i.i, %bb1.i.i
diff --git a/llvm/test/Transforms/InstCombine/2009-02-21-LoadCST.ll b/llvm/test/Transforms/InstCombine/2009-02-21-LoadCST.ll
index cb8a77c23ba..90ec6d540e9 100644
--- a/llvm/test/Transforms/InstCombine/2009-02-21-LoadCST.ll
+++ b/llvm/test/Transforms/InstCombine/2009-02-21-LoadCST.ll
@@ -7,6 +7,6 @@ target triple = "i386-pc-linux-gnu"
@.str1 = internal constant [4 x i8] c"\B5%8\00"
define i32 @test() {
- %rhsv = load i32* bitcast ([4 x i8]* @.str1 to i32*), align 1
+ %rhsv = load i32, i32* bitcast ([4 x i8]* @.str1 to i32*), align 1
ret i32 %rhsv
}
diff --git a/llvm/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll b/llvm/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll
index 4880dcb1602..ef1734ba7d1 100644
--- a/llvm/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll
+++ b/llvm/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll
@@ -9,7 +9,7 @@ entry:
%tmp6 = mul i32 %max289, 4 ; <i32> [#uses=1]
%tmp7 = alloca i8, i32 0 ; <i8*> [#uses=1]
%tmp8 = bitcast i8* %tmp7 to [0 x [0 x i32]]* ; <[0 x [0 x i32]]*> [#uses=1]
- %tmp11 = load i32* null, align 1 ; <i32> [#uses=1]
+ %tmp11 = load i32, i32* null, align 1 ; <i32> [#uses=1]
%tmp12 = icmp eq i32 %tmp11, 3 ; <i1> [#uses=1]
%tmp13 = zext i1 %tmp12 to i8 ; <i8> [#uses=1]
%tmp14 = ashr i32 %tmp6, 2 ; <i32> [#uses=1]
@@ -20,7 +20,7 @@ entry:
%tmp19 = bitcast i8* %tmp18 to [0 x i32]* ; <[0 x i32]*> [#uses=1]
%tmp20 = bitcast [0 x i32]* %tmp19 to i32* ; <i32*> [#uses=1]
%tmp21 = getelementptr i32, i32* %tmp20, i32 0 ; <i32*> [#uses=1]
- %tmp22 = load i32* %tmp21, align 1 ; <i32> [#uses=1]
+ %tmp22 = load i32, i32* %tmp21, align 1 ; <i32> [#uses=1]
%tmp23 = icmp eq i32 %tmp22, 4 ; <i1> [#uses=1]
%tmp24 = zext i1 %tmp23 to i8 ; <i8> [#uses=1]
%toBool709 = icmp ne i8 %tmp13, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll b/llvm/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll
index c617ca4fcad..3847abd30c8 100644
--- a/llvm/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll
+++ b/llvm/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll
@@ -3,9 +3,9 @@
define void @0(<4 x i16>*, <4 x i16>*) {
%3 = alloca <4 x i16>* ; <<4 x i16>**> [#uses=1]
- %4 = load <4 x i16>* null, align 1 ; <<4 x i16>> [#uses=1]
+ %4 = load <4 x i16>, <4 x i16>* null, align 1 ; <<4 x i16>> [#uses=1]
%5 = ashr <4 x i16> %4, <i16 5, i16 5, i16 5, i16 5> ; <<4 x i16>> [#uses=1]
- %6 = load <4 x i16>** %3 ; <<4 x i16>*> [#uses=1]
+ %6 = load <4 x i16>*, <4 x i16>** %3 ; <<4 x i16>*> [#uses=1]
store <4 x i16> %5, <4 x i16>* %6, align 1
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll b/llvm/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll
index dd14c6beec4..ced317c4d43 100644
--- a/llvm/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll
+++ b/llvm/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll
@@ -2,7 +2,7 @@
; rdar://6903175
define i1 @f0(i32 *%a) nounwind {
- %b = load i32* %a, align 4
+ %b = load i32, i32* %a, align 4
%c = uitofp i32 %b to double
%d = fcmp ogt double %c, 0x41EFFFFFFFE00000
ret i1 %d
diff --git a/llvm/test/Transforms/InstCombine/2010-03-03-ExtElim.ll b/llvm/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
index e0def997d97..faeffb68138 100644
--- a/llvm/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
+++ b/llvm/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
@@ -8,7 +8,7 @@ target triple = "i386-unknown-linux-gnu"
define i1 @PR6486() nounwind {
; CHECK-LABEL: @PR6486(
- %tmp = load i32*** @g_177 ; <i32**> [#uses=1]
+ %tmp = load i32**, i32*** @g_177 ; <i32**> [#uses=1]
%cmp = icmp ne i32** null, %tmp ; <i1> [#uses=1]
%conv = zext i1 %cmp to i32 ; <i32> [#uses=1]
%cmp1 = icmp sle i32 0, %conv ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll b/llvm/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll
index 02b64e3bb8d..116c9713d89 100644
--- a/llvm/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll
+++ b/llvm/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll
@@ -6,9 +6,9 @@ entry:
%a.addr = alloca <2 x i16>, align 4
%.compoundliteral = alloca <2 x i16>, align 4
store <2 x i16> %a, <2 x i16>* %a.addr, align 4
- %tmp = load <2 x i16>* %a.addr, align 4
+ %tmp = load <2 x i16>, <2 x i16>* %a.addr, align 4
store <2 x i16> zeroinitializer, <2 x i16>* %.compoundliteral
- %tmp1 = load <2 x i16>* %.compoundliteral
+ %tmp1 = load <2 x i16>, <2 x i16>* %.compoundliteral
%cmp = icmp uge <2 x i16> %tmp, %tmp1
%sext = sext <2 x i1> %cmp to <2 x i16>
ret <2 x i16> %sext
diff --git a/llvm/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll b/llvm/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll
index b096d1f952f..639b64aacba 100644
--- a/llvm/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll
+++ b/llvm/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll
@@ -7,14 +7,14 @@ entry:
%on_off.addr = alloca i32, align 4
%a = alloca i32, align 4
store i32 %on_off, i32* %on_off.addr, align 4
- %tmp = load i32* %on_off.addr, align 4
+ %tmp = load i32, i32* %on_off.addr, align 4
%sub = sub i32 1, %tmp
; CHECK-NOT: mul i32
%mul = mul i32 %sub, -2
; CHECK: shl
; CHECK-NEXT: add
store i32 %mul, i32* %a, align 4
- %tmp1 = load i32* %a, align 4
+ %tmp1 = load i32, i32* %a, align 4
%conv = trunc i32 %tmp1 to i16
ret i16 %conv
}
@@ -26,15 +26,15 @@ entry:
%a = alloca i32, align 4
store i32 %on_off, i32* %on_off.addr, align 4
store i32 %q, i32* %q.addr, align 4
- %tmp = load i32* %q.addr, align 4
- %tmp1 = load i32* %on_off.addr, align 4
+ %tmp = load i32, i32* %q.addr, align 4
+ %tmp1 = load i32, i32* %on_off.addr, align 4
%sub = sub i32 %tmp, %tmp1
; CHECK-NOT: mul i32
%mul = mul i32 %sub, -4
; CHECK: sub i32
; CHECK-NEXT: shl
store i32 %mul, i32* %a, align 4
- %tmp2 = load i32* %a, align 4
+ %tmp2 = load i32, i32* %a, align 4
%conv = trunc i32 %tmp2 to i16
ret i16 %conv
}
@@ -44,14 +44,14 @@ entry:
%on_off.addr = alloca i32, align 4
%a = alloca i32, align 4
store i32 %on_off, i32* %on_off.addr, align 4
- %tmp = load i32* %on_off.addr, align 4
+ %tmp = load i32, i32* %on_off.addr, align 4
%sub = sub i32 7, %tmp
; CHECK-NOT: mul i32
%mul = mul i32 %sub, -4
; CHECK: shl
; CHECK-NEXT: add
store i32 %mul, i32* %a, align 4
- %tmp1 = load i32* %a, align 4
+ %tmp1 = load i32, i32* %a, align 4
%conv = trunc i32 %tmp1 to i16
ret i16 %conv
}
diff --git a/llvm/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll b/llvm/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll
index c8f0351858c..15c11db37f5 100644
--- a/llvm/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll
+++ b/llvm/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll
@@ -9,12 +9,12 @@ define void @fu1(i32 %parm) nounwind ssp {
%ptr = alloca double*, align 4
store i32 %parm, i32* %1, align 4
store double* null, double** %ptr, align 4
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = icmp ne i32 %2, 0
br i1 %3, label %4, label %10
; <label>:4 ; preds = %0
- %5 = load i32* %1, align 4
+ %5 = load i32, i32* %1, align 4
%6 = shl nsw i32 %5, 3
; With "nsw", the alloca and its bitcast can be fused:
%7 = add nsw i32 %6, 2048
@@ -25,7 +25,7 @@ define void @fu1(i32 %parm) nounwind ssp {
store double* %9, double** %ptr, align 4
br label %10
; <label>:10 ; preds = %4, %0
- %11 = load double** %ptr, align 4
+ %11 = load double*, double** %ptr, align 4
call void @bar(double* %11)
; CHECK: ret
ret void
@@ -39,12 +39,12 @@ define void @fu2(i32 %parm) nounwind ssp {
%ptr = alloca double*, align 4
store i32 %parm, i32* %1, align 4
store double* null, double** %ptr, align 4
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = icmp ne i32 %2, 0
br i1 %3, label %4, label %10
; <label>:4 ; preds = %0
- %5 = load i32* %1, align 4
+ %5 = load i32, i32* %1, align 4
%6 = mul nsw i32 %5, 8
; Without "nsw", the alloca and its bitcast cannot be fused:
%7 = add i32 %6, 2048
@@ -57,7 +57,7 @@ define void @fu2(i32 %parm) nounwind ssp {
br label %10
; <label>:10 ; preds = %4, %0
- %11 = load double** %ptr, align 4
+ %11 = load double*, double** %ptr, align 4
call void @bar(double* %11)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll b/llvm/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll
index 22061b272a8..122669ec04a 100644
--- a/llvm/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll
+++ b/llvm/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll
@@ -10,7 +10,7 @@ define void @t(%struct.CGPoint* %a) nounwind {
%Point = alloca %struct.CGPoint, align 4
%1 = bitcast %struct.CGPoint* %a to i64*
%2 = bitcast %struct.CGPoint* %Point to i64*
- %3 = load i64* %1, align 4
+ %3 = load i64, i64* %1, align 4
store i64 %3, i64* %2, align 4
call void @foo(i64* %2) nounwind
ret void
diff --git a/llvm/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll b/llvm/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll
index ba83fe9ec0a..ba57baf2371 100644
--- a/llvm/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll
+++ b/llvm/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll
@@ -20,7 +20,7 @@ entry:
define void @fn4() nounwind uwtable ssp {
entry:
- %0 = load i32* @d, align 4
+ %0 = load i32, i32* @d, align 4
%cmp = icmp eq i32 %0, 0
%conv = zext i1 %cmp to i32
store i32 %conv, i32* @c, align 4
@@ -34,12 +34,12 @@ entry:
store i32 %and, i32* @e, align 4
%sub = add nsw i32 %and, -1
store i32 %sub, i32* @f, align 4
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %1 = load i32* @b, align 4
+ %1 = load i32, i32* @b, align 4
%.lobit = lshr i32 %1, 31
%2 = trunc i32 %.lobit to i8
%.not = xor i8 %2, 1
diff --git a/llvm/test/Transforms/InstCombine/2012-05-28-select-hang.ll b/llvm/test/Transforms/InstCombine/2012-05-28-select-hang.ll
index db1dbd5b6e2..c514dd1f5ec 100644
--- a/llvm/test/Transforms/InstCombine/2012-05-28-select-hang.ll
+++ b/llvm/test/Transforms/InstCombine/2012-05-28-select-hang.ll
@@ -6,7 +6,7 @@
define void @func() nounwind uwtable ssp {
entry:
- %0 = load i8* @c, align 1
+ %0 = load i8, i8* @c, align 1
%conv = zext i8 %0 to i32
%or = or i32 %conv, 1
%conv1 = trunc i32 %or to i8
@@ -16,7 +16,7 @@ entry:
%and = and i32 1, %neg
%conv3 = trunc i32 %and to i8
store i8 %conv3, i8* @b, align 1
- %1 = load i8* @a, align 1
+ %1 = load i8, i8* @a, align 1
%conv4 = zext i8 %1 to i32
%conv5 = zext i8 %conv3 to i32
%tobool = icmp ne i32 %conv4, 0
diff --git a/llvm/test/Transforms/InstCombine/2012-06-06-LoadOfPHIs.ll b/llvm/test/Transforms/InstCombine/2012-06-06-LoadOfPHIs.ll
index 22466a96b66..4af1ca842c5 100644
--- a/llvm/test/Transforms/InstCombine/2012-06-06-LoadOfPHIs.ll
+++ b/llvm/test/Transforms/InstCombine/2012-06-06-LoadOfPHIs.ll
@@ -34,16 +34,16 @@ bb11: ; preds = %bb8
bb12: ; preds = %bb11, %bb10, %bb7, %bb6
%max.0 = phi double* [ %tmp, %bb6 ], [ %tmp2, %bb7 ], [ %tmp1, %bb10 ], [ %tmp2, %bb11 ]
-; CHECK: %tmp13 = load double* %tmp, align 8
-; CHECK: %tmp14 = load double* %tmp1, align 8
+; CHECK: %tmp13 = load double, double* %tmp, align 8
+; CHECK: %tmp14 = load double, double* %tmp1, align 8
; CHECK: %tmp15 = fcmp olt double %tmp13, %tmp14
- %tmp13 = load double* %tmp, align 8
- %tmp14 = load double* %tmp1, align 8
+ %tmp13 = load double, double* %tmp, align 8
+ %tmp14 = load double, double* %tmp1, align 8
%tmp15 = fcmp olt double %tmp13, %tmp14
br i1 %tmp15, label %bb16, label %bb21
bb16: ; preds = %bb12
- %tmp17 = load double* %tmp2, align 8
+ %tmp17 = load double, double* %tmp2, align 8
%tmp18 = fcmp olt double %tmp13, %tmp17
br i1 %tmp18, label %bb19, label %bb20
@@ -54,7 +54,7 @@ bb20: ; preds = %bb16
br label %bb26
bb21: ; preds = %bb12
- %tmp22 = load double* %tmp2, align 8
+ %tmp22 = load double, double* %tmp2, align 8
%tmp23 = fcmp olt double %tmp14, %tmp22
br i1 %tmp23, label %bb24, label %bb25
@@ -66,16 +66,16 @@ bb25: ; preds = %bb21
bb26: ; preds = %bb25, %bb24, %bb20, %bb19
%min.0 = phi double* [ %tmp, %bb19 ], [ %tmp2, %bb20 ], [ %tmp1, %bb24 ], [ %tmp2, %bb25 ]
-; CHECK: %tmp27 = load double* %min.0, align 8
-; CHECK: %tmp28 = load double* %max.0
+; CHECK: %tmp27 = load double, double* %min.0, align 8
+; CHECK: %tmp28 = load double, double* %max.0
; CHECK: %tmp29 = fadd double %tmp27, %tmp28
- %tmp27 = load double* %min.0, align 8
- %tmp28 = load double* %max.0
+ %tmp27 = load double, double* %min.0, align 8
+ %tmp28 = load double, double* %max.0
%tmp29 = fadd double %tmp27, %tmp28
%tmp30 = fdiv double %tmp29, 2.000000e+00
store double %tmp30, double* %outL
- %tmp31 = load double* %min.0
- %tmp32 = load double* %max.0
+ %tmp31 = load double, double* %min.0
+ %tmp32 = load double, double* %max.0
%tmp33 = fcmp oeq double %tmp31, %tmp32
br i1 %tmp33, label %bb34, label %bb35
@@ -107,11 +107,11 @@ bb45: ; preds = %bb41, %bb38
br i1 %tmp46, label %bb47, label %bb55
bb47: ; preds = %bb45
- %tmp48 = load double* %tmp1, align 8
- %tmp49 = load double* %tmp2, align 8
+ %tmp48 = load double, double* %tmp1, align 8
+ %tmp49 = load double, double* %tmp2, align 8
%tmp50 = fsub double %tmp48, %tmp49
- %tmp51 = load double* %max.0
- %tmp52 = load double* %min.0
+ %tmp51 = load double, double* %max.0
+ %tmp52 = load double, double* %min.0
%tmp53 = fsub double %tmp51, %tmp52
%tmp54 = fdiv double %tmp50, %tmp53
store double %tmp54, double* %outH
@@ -122,11 +122,11 @@ bb55: ; preds = %bb45
br i1 %tmp56, label %bb57, label %bb66
bb57: ; preds = %bb55
- %tmp58 = load double* %tmp2, align 8
- %tmp59 = load double* %tmp, align 8
+ %tmp58 = load double, double* %tmp2, align 8
+ %tmp59 = load double, double* %tmp, align 8
%tmp60 = fsub double %tmp58, %tmp59
- %tmp61 = load double* %max.0
- %tmp62 = load double* %min.0
+ %tmp61 = load double, double* %max.0
+ %tmp62 = load double, double* %min.0
%tmp63 = fsub double %tmp61, %tmp62
%tmp64 = fdiv double %tmp60, %tmp63
%tmp65 = fadd double 2.000000e+00, %tmp64
@@ -134,11 +134,11 @@ bb57: ; preds = %bb55
br label %bb75
bb66: ; preds = %bb55
- %tmp67 = load double* %tmp, align 8
- %tmp68 = load double* %tmp1, align 8
+ %tmp67 = load double, double* %tmp, align 8
+ %tmp68 = load double, double* %tmp1, align 8
%tmp69 = fsub double %tmp67, %tmp68
- %tmp70 = load double* %max.0
- %tmp71 = load double* %min.0
+ %tmp70 = load double, double* %max.0
+ %tmp71 = load double, double* %min.0
%tmp72 = fsub double %tmp70, %tmp71
%tmp73 = fdiv double %tmp69, %tmp72
%tmp74 = fadd double 4.000000e+00, %tmp73
@@ -146,7 +146,7 @@ bb66: ; preds = %bb55
br label %bb75
bb75: ; preds = %bb66, %bb57, %bb47
- %tmp76 = load double* %outH
+ %tmp76 = load double, double* %outH
%tmp77 = fdiv double %tmp76, 6.000000e+00
store double %tmp77, double* %outH
%tmp78 = fcmp olt double %tmp77, 0.000000e+00
diff --git a/llvm/test/Transforms/InstCombine/2012-07-25-LoadPart.ll b/llvm/test/Transforms/InstCombine/2012-07-25-LoadPart.ll
index 18aab7f27ef..d4f313091e1 100644
--- a/llvm/test/Transforms/InstCombine/2012-07-25-LoadPart.ll
+++ b/llvm/test/Transforms/InstCombine/2012-07-25-LoadPart.ll
@@ -5,7 +5,7 @@
@test = constant [4 x i32] [i32 1, i32 2, i32 3, i32 4]
define i64 @foo() {
- %ret = load i64* bitcast (i8* getelementptr (i8* bitcast ([4 x i32]* @test to i8*), i64 2) to i64*), align 1
+ %ret = load i64, i64* bitcast (i8* getelementptr (i8* bitcast ([4 x i32]* @test to i8*), i64 2) to i64*), align 1
ret i64 %ret
; 0x00030000_00020000 in [01 00/00 00 02 00 00 00 03 00/00 00 04 00 00 00]
; LE: ret i64 844424930263040
diff --git a/llvm/test/Transforms/InstCombine/2012-10-25-vector-of-pointers.ll b/llvm/test/Transforms/InstCombine/2012-10-25-vector-of-pointers.ll
index 653d619f0dd..23210650e2f 100644
--- a/llvm/test/Transforms/InstCombine/2012-10-25-vector-of-pointers.ll
+++ b/llvm/test/Transforms/InstCombine/2012-10-25-vector-of-pointers.ll
@@ -20,7 +20,7 @@ bb2: ; preds = %bb1
bb3: ; preds = %bb1
%tmp4 = bitcast double** %tmp to <2 x double*>*
- %tmp5 = load <2 x double*>* %tmp4, align 8
+ %tmp5 = load <2 x double*>, <2 x double*>* %tmp4, align 8
%tmp6 = ptrtoint <2 x double*> %tmp5 to <2 x i64>
%tmp7 = sub <2 x i64> zeroinitializer, %tmp6
%tmp8 = ashr exact <2 x i64> %tmp7, <i64 3, i64 3>
diff --git a/llvm/test/Transforms/InstCombine/2012-12-14-simp-vgep.ll b/llvm/test/Transforms/InstCombine/2012-12-14-simp-vgep.ll
index fc29b095e5c..46702f80c0c 100644
--- a/llvm/test/Transforms/InstCombine/2012-12-14-simp-vgep.ll
+++ b/llvm/test/Transforms/InstCombine/2012-12-14-simp-vgep.ll
@@ -3,7 +3,7 @@
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
define <4 x i32> @foo(<4 x i32*>* %in) {
- %t17 = load <4 x i32*>* %in, align 8
+ %t17 = load <4 x i32*>, <4 x i32*>* %in, align 8
%t18 = icmp eq <4 x i32*> %t17, zeroinitializer
%t19 = zext <4 x i1> %t18 to <4 x i32>
ret <4 x i32> %t19
diff --git a/llvm/test/Transforms/InstCombine/2013-03-05-Combine-BitcastTy-Into-Alloca.ll b/llvm/test/Transforms/InstCombine/2013-03-05-Combine-BitcastTy-Into-Alloca.ll
index 9c50e66ac83..86b5e0ac7a0 100644
--- a/llvm/test/Transforms/InstCombine/2013-03-05-Combine-BitcastTy-Into-Alloca.ll
+++ b/llvm/test/Transforms/InstCombine/2013-03-05-Combine-BitcastTy-Into-Alloca.ll
@@ -16,8 +16,8 @@ define { i64, i64 } @function(i32 %x, i32 %y, i32 %z) nounwind {
; CHECK-NOT: bitcast i96* %retval to %struct._my_struct*
entry:
%retval = alloca %struct._my_struct, align 8
- %k.sroa.0.0.copyload = load i96* bitcast (%struct._my_struct* @initval to i96*), align 1
- %k.sroa.1.12.copyload = load i32* bitcast ([4 x i8]* getelementptr inbounds (%struct._my_struct* @initval, i64 0, i32 1) to i32*), align 1
+ %k.sroa.0.0.copyload = load i96, i96* bitcast (%struct._my_struct* @initval to i96*), align 1
+ %k.sroa.1.12.copyload = load i32, i32* bitcast ([4 x i8]* getelementptr inbounds (%struct._my_struct* @initval, i64 0, i32 1) to i32*), align 1
%0 = zext i32 %x to i96
%bf.value = shl nuw nsw i96 %0, 6
%bf.clear = and i96 %k.sroa.0.0.copyload, -288230376151711744
@@ -39,7 +39,7 @@ entry:
%.fca.0.insert = insertvalue { i64, i64 } undef, i64 %trunc, 0
%retval.8.idx12 = getelementptr inbounds %struct._my_struct, %struct._my_struct* %retval, i64 0, i32 0, i64 8
%retval.8.cast13 = bitcast i8* %retval.8.idx12 to i64*
- %retval.8.load14 = load i64* %retval.8.cast13, align 8
+ %retval.8.load14 = load i64, i64* %retval.8.cast13, align 8
%.fca.1.insert = insertvalue { i64, i64 } %.fca.0.insert, i64 %retval.8.load14, 1
ret { i64, i64 } %.fca.1.insert
}
diff --git a/llvm/test/Transforms/InstCombine/CPP_min_max.ll b/llvm/test/Transforms/InstCombine/CPP_min_max.ll
index b3d081b613a..04bf0ceefea 100644
--- a/llvm/test/Transforms/InstCombine/CPP_min_max.ll
+++ b/llvm/test/Transforms/InstCombine/CPP_min_max.ll
@@ -10,11 +10,11 @@
define void @_Z5test1RiS_(i32* %x, i32* %y) {
entry:
- %tmp.1.i = load i32* %y ; <i32> [#uses=1]
- %tmp.3.i = load i32* %x ; <i32> [#uses=1]
+ %tmp.1.i = load i32, i32* %y ; <i32> [#uses=1]
+ %tmp.3.i = load i32, i32* %x ; <i32> [#uses=1]
%tmp.4.i = icmp slt i32 %tmp.1.i, %tmp.3.i ; <i1> [#uses=1]
%retval.i = select i1 %tmp.4.i, i32* %y, i32* %x ; <i32*> [#uses=1]
- %tmp.4 = load i32* %retval.i ; <i32> [#uses=1]
+ %tmp.4 = load i32, i32* %retval.i ; <i32> [#uses=1]
store i32 %tmp.4, i32* %x
ret void
}
@@ -22,12 +22,12 @@ entry:
define void @_Z5test2RiS_(i32* %x, i32* %y) {
entry:
%tmp.0 = alloca i32 ; <i32*> [#uses=2]
- %tmp.2 = load i32* %x ; <i32> [#uses=2]
+ %tmp.2 = load i32, i32* %x ; <i32> [#uses=2]
store i32 %tmp.2, i32* %tmp.0
- %tmp.3.i = load i32* %y ; <i32> [#uses=1]
+ %tmp.3.i = load i32, i32* %y ; <i32> [#uses=1]
%tmp.4.i = icmp slt i32 %tmp.2, %tmp.3.i ; <i1> [#uses=1]
%retval.i = select i1 %tmp.4.i, i32* %y, i32* %tmp.0 ; <i32*> [#uses=1]
- %tmp.6 = load i32* %retval.i ; <i32> [#uses=1]
+ %tmp.6 = load i32, i32* %retval.i ; <i32> [#uses=1]
store i32 %tmp.6, i32* %y
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/add3.ll b/llvm/test/Transforms/InstCombine/add3.ll
index d8e708a6346..9d3842f1a31 100644
--- a/llvm/test/Transforms/InstCombine/add3.ll
+++ b/llvm/test/Transforms/InstCombine/add3.ll
@@ -13,7 +13,7 @@ EntryBlock:
add i32 %.val24, -16
inttoptr i32 %2 to i32*
getelementptr i32, i32* %3, i32 1
- load i32* %4
+ load i32, i32* %4
tail call i32 @callee( i32 %5 )
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/addrspacecast.ll b/llvm/test/Transforms/InstCombine/addrspacecast.ll
index b52b6806a60..5a4b6b22f08 100644
--- a/llvm/test/Transforms/InstCombine/addrspacecast.ll
+++ b/llvm/test/Transforms/InstCombine/addrspacecast.ll
@@ -106,10 +106,10 @@ define i32 @canonicalize_addrspacecast([16 x i32] addrspace(1)* %arr) {
; CHECK-LABEL: @canonicalize_addrspacecast(
; CHECK-NEXT: getelementptr inbounds [16 x i32], [16 x i32] addrspace(1)* %arr, i32 0, i32 0
; CHECK-NEXT: addrspacecast i32 addrspace(1)* %{{[a-zA-Z0-9]+}} to i32*
-; CHECK-NEXT: load i32*
+; CHECK-NEXT: load i32, i32*
; CHECK-NEXT: ret i32
%p = addrspacecast [16 x i32] addrspace(1)* %arr to i32*
- %v = load i32* %p
+ %v = load i32, i32* %p
ret i32 %v
}
@@ -134,7 +134,7 @@ loop.body:
%i = phi i32 [ 0, %entry ], [ %i.inc, %loop.body ]
%sum = phi i32 [ 0, %entry ], [ %sum.inc, %loop.body]
%ptr = getelementptr i8, i8* %alloca, i32 %i
- %load = load i8* %ptr
+ %load = load i8, i8* %ptr
%ext = zext i8 %load to i32
%sum.inc = add i32 %sum, %ext
%i.inc = add i32 %i, 1
diff --git a/llvm/test/Transforms/InstCombine/alias-recursion.ll b/llvm/test/Transforms/InstCombine/alias-recursion.ll
index fa63726d2d3..8e53995d577 100644
--- a/llvm/test/Transforms/InstCombine/alias-recursion.ll
+++ b/llvm/test/Transforms/InstCombine/alias-recursion.ll
@@ -19,6 +19,6 @@ for.body: ; preds = %for.body, %entry
for.end: ; preds = %for.body, %entry
%A = phi i32 (%class.A*)** [ bitcast (i8** @vtbl to i32 (%class.A*)**), %for.body ], [ null, %entry ]
- %B = load i32 (%class.A*)** %A
+ %B = load i32 (%class.A*)*, i32 (%class.A*)** %A
ret i32 (%class.A*)* %B
}
diff --git a/llvm/test/Transforms/InstCombine/align-addr.ll b/llvm/test/Transforms/InstCombine/align-addr.ll
index cd4fc73b543..6286517ce79 100644
--- a/llvm/test/Transforms/InstCombine/align-addr.ll
+++ b/llvm/test/Transforms/InstCombine/align-addr.ll
@@ -43,7 +43,7 @@ return:
define <16 x i8> @test1(<2 x i64> %x) {
entry:
- %tmp = load <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
+ %tmp = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*), align 1
ret <16 x i8> %tmp
}
@@ -53,7 +53,7 @@ define <16 x i8> @test1_as1(<2 x i64> %x) {
; CHECK-LABEL: @test1_as1(
; CHECK: tmp = load
; CHECK: GLOBAL_as1{{.*}}align 16
- %tmp = load <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 1
+ %tmp = load <16 x i8>, <16 x i8> addrspace(1)* bitcast ([4 x i32] addrspace(1)* @GLOBAL_as1 to <16 x i8> addrspace(1)*), align 1
ret <16 x i8> %tmp
}
@@ -63,7 +63,7 @@ define <16 x i8> @test1_as1_gep(<2 x i64> %x) {
; CHECK-LABEL: @test1_as1_gep(
; CHECK: tmp = load
; CHECK: GLOBAL_as1_gep{{.*}}align 16
- %tmp = load <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr ([8 x i32] addrspace(1)* @GLOBAL_as1_gep, i16 0, i16 4) to <16 x i8> addrspace(1)*), align 1
+ %tmp = load <16 x i8>, <16 x i8> addrspace(1)* bitcast (i32 addrspace(1)* getelementptr ([8 x i32] addrspace(1)* @GLOBAL_as1_gep, i16 0, i16 4) to <16 x i8> addrspace(1)*), align 1
ret <16 x i8> %tmp
}
@@ -71,11 +71,11 @@ define <16 x i8> @test1_as1_gep(<2 x i64> %x) {
; When a load or store lacks an explicit alignment, add one.
; CHECK-LABEL: @test2(
-; CHECK: load double* %p, align 8
+; CHECK: load double, double* %p, align 8
; CHECK: store double %n, double* %p, align 8
define double @test2(double* %p, double %n) nounwind {
- %t = load double* %p
+ %t = load double, double* %p
store double %n, double* %p
ret double %t
}
diff --git a/llvm/test/Transforms/InstCombine/align-attr.ll b/llvm/test/Transforms/InstCombine/align-attr.ll
index 9f366bf8fab..99a17db13c4 100644
--- a/llvm/test/Transforms/InstCombine/align-attr.ll
+++ b/llvm/test/Transforms/InstCombine/align-attr.ll
@@ -5,11 +5,11 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define i32 @foo1(i32* align 32 %a) #0 {
entry:
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
ret i32 %0
; CHECK-LABEL: @foo1
-; CHECK-DAG: load i32* %a, align 32
+; CHECK-DAG: load i32, i32* %a, align 32
; CHECK: ret i32
}
diff --git a/llvm/test/Transforms/InstCombine/align-external.ll b/llvm/test/Transforms/InstCombine/align-external.ll
index 66ff9c16e42..ee98a012017 100644
--- a/llvm/test/Transforms/InstCombine/align-external.ll
+++ b/llvm/test/Transforms/InstCombine/align-external.ll
@@ -23,7 +23,7 @@ define i64 @foo(i64 %a) {
define i32 @bar() {
; CHECK-LABEL: @bar(
- %r = load i32* @B, align 1
+ %r = load i32, i32* @B, align 1
; CHECK: align 1
ret i32 %r
}
diff --git a/llvm/test/Transforms/InstCombine/aligned-altivec.ll b/llvm/test/Transforms/InstCombine/aligned-altivec.ll
index 7ca6406d242..10b4e4d6263 100644
--- a/llvm/test/Transforms/InstCombine/aligned-altivec.ll
+++ b/llvm/test/Transforms/InstCombine/aligned-altivec.ll
@@ -14,7 +14,7 @@ entry:
; CHECK: @llvm.ppc.altivec.lvx
; CHECK: ret <4 x i32>
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
%a = add <4 x i32> %v0, %vl
ret <4 x i32> %a
}
@@ -29,7 +29,7 @@ entry:
; CHECK-NOT: @llvm.ppc.altivec.lvx
; CHECK: ret <4 x i32>
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
%a = add <4 x i32> %v0, %vl
ret <4 x i32> %a
}
@@ -42,7 +42,7 @@ entry:
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
ret <4 x i32> %v0
; CHECK-LABEL: @test2
@@ -56,7 +56,7 @@ entry:
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
ret <4 x i32> %v0
; CHECK-LABEL: @test2
@@ -76,7 +76,7 @@ entry:
; CHECK: @llvm.ppc.altivec.lvxl
; CHECK: ret <4 x i32>
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
%a = add <4 x i32> %v0, %vl
ret <4 x i32> %a
}
@@ -91,7 +91,7 @@ entry:
; CHECK-NOT: @llvm.ppc.altivec.lvxl
; CHECK: ret <4 x i32>
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
%a = add <4 x i32> %v0, %vl
ret <4 x i32> %a
}
@@ -104,7 +104,7 @@ entry:
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvxl(<4 x i32> %d, i8* %hv)
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
ret <4 x i32> %v0
; CHECK-LABEL: @test2l
@@ -118,7 +118,7 @@ entry:
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvxl(<4 x i32> %d, i8* %hv)
- %v0 = load <4 x i32>* %h, align 8
+ %v0 = load <4 x i32>, <4 x i32>* %h, align 8
ret <4 x i32> %v0
; CHECK-LABEL: @test2l
diff --git a/llvm/test/Transforms/InstCombine/aligned-qpx.ll b/llvm/test/Transforms/InstCombine/aligned-qpx.ll
index a210418b56c..e303ddd17d5 100644
--- a/llvm/test/Transforms/InstCombine/aligned-qpx.ll
+++ b/llvm/test/Transforms/InstCombine/aligned-qpx.ll
@@ -14,7 +14,7 @@ entry:
; CHECK: @llvm.ppc.qpx.qvlfs
; CHECK: ret <4 x double>
- %v0 = load <4 x float>* %h, align 8
+ %v0 = load <4 x float>, <4 x float>* %h, align 8
%v0e = fpext <4 x float> %v0 to <4 x double>
%a = fadd <4 x double> %v0e, %vl
ret <4 x double> %a
@@ -30,7 +30,7 @@ entry:
; CHECK-NOT: @llvm.ppc.qpx.qvlfs
; CHECK: ret <4 x double>
- %v0 = load <4 x float>* %h, align 8
+ %v0 = load <4 x float>, <4 x float>* %h, align 8
%v0e = fpext <4 x float> %v0 to <4 x double>
%a = fadd <4 x double> %v0e, %vl
ret <4 x double> %a
@@ -44,7 +44,7 @@ entry:
%hv = bitcast <4 x float>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfs(<4 x double> %d, i8* %hv)
- %v0 = load <4 x float>* %h, align 8
+ %v0 = load <4 x float>, <4 x float>* %h, align 8
ret <4 x float> %v0
; CHECK-LABEL: @test2
@@ -58,7 +58,7 @@ entry:
%hv = bitcast <4 x float>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfs(<4 x double> %d, i8* %hv)
- %v0 = load <4 x float>* %h, align 8
+ %v0 = load <4 x float>, <4 x float>* %h, align 8
ret <4 x float> %v0
; CHECK-LABEL: @test2
@@ -78,7 +78,7 @@ entry:
; CHECK: @llvm.ppc.qpx.qvlfd
; CHECK: ret <4 x double>
- %v0 = load <4 x double>* %h, align 8
+ %v0 = load <4 x double>, <4 x double>* %h, align 8
%a = fadd <4 x double> %v0, %vl
ret <4 x double> %a
}
@@ -93,7 +93,7 @@ entry:
; CHECK: @llvm.ppc.qpx.qvlfd
; CHECK: ret <4 x double>
- %v0 = load <4 x double>* %h, align 8
+ %v0 = load <4 x double>, <4 x double>* %h, align 8
%a = fadd <4 x double> %v0, %vl
ret <4 x double> %a
}
@@ -108,7 +108,7 @@ entry:
; CHECK-NOT: @llvm.ppc.qpx.qvlfd
; CHECK: ret <4 x double>
- %v0 = load <4 x double>* %h, align 8
+ %v0 = load <4 x double>, <4 x double>* %h, align 8
%a = fadd <4 x double> %v0, %vl
ret <4 x double> %a
}
@@ -121,7 +121,7 @@ entry:
%hv = bitcast <4 x double>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfd(<4 x double> %d, i8* %hv)
- %v0 = load <4 x double>* %h, align 8
+ %v0 = load <4 x double>, <4 x double>* %h, align 8
ret <4 x double> %v0
; CHECK-LABEL: @test2l
@@ -135,7 +135,7 @@ entry:
%hv = bitcast <4 x double>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfd(<4 x double> %d, i8* %hv)
- %v0 = load <4 x double>* %h, align 8
+ %v0 = load <4 x double>, <4 x double>* %h, align 8
ret <4 x double> %v0
; CHECK-LABEL: @test2ln
@@ -149,7 +149,7 @@ entry:
%hv = bitcast <4 x double>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfd(<4 x double> %d, i8* %hv)
- %v0 = load <4 x double>* %h, align 8
+ %v0 = load <4 x double>, <4 x double>* %h, align 8
ret <4 x double> %v0
; CHECK-LABEL: @test2l
diff --git a/llvm/test/Transforms/InstCombine/alloca.ll b/llvm/test/Transforms/InstCombine/alloca.ll
index e2755562649..105029fb699 100644
--- a/llvm/test/Transforms/InstCombine/alloca.ll
+++ b/llvm/test/Transforms/InstCombine/alloca.ll
@@ -18,7 +18,7 @@ define void @test() {
call void (...)* @use( i32* %Y )
%Z = alloca { } ; <{ }*> [#uses=1]
call void (...)* @use( { }* %Z )
- %size = load i32* @int
+ %size = load i32, i32* @int
%A = alloca {{}}, i32 %size
call void (...)* @use( {{}}* %A )
ret void
diff --git a/llvm/test/Transforms/InstCombine/assume-loop-align.ll b/llvm/test/Transforms/InstCombine/assume-loop-align.ll
index 9fbc68eff3c..e803ba61774 100644
--- a/llvm/test/Transforms/InstCombine/assume-loop-align.ll
+++ b/llvm/test/Transforms/InstCombine/assume-loop-align.ll
@@ -19,14 +19,14 @@ entry:
br label %for.body
; CHECK-LABEL: @foo
-; CHECK: load i32* {{.*}} align 64
+; CHECK: load i32, i32* {{.*}} align 64
; CHECK: store i32 {{.*}} align 64
; CHECK: ret
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, 1
%arrayidx5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %add, i32* %arrayidx5, align 4
diff --git a/llvm/test/Transforms/InstCombine/assume-redundant.ll b/llvm/test/Transforms/InstCombine/assume-redundant.ll
index cb98755d1af..4b869ef2c50 100644
--- a/llvm/test/Transforms/InstCombine/assume-redundant.ll
+++ b/llvm/test/Transforms/InstCombine/assume-redundant.ll
@@ -16,7 +16,7 @@ define void @_Z3fooR1s(%struct.s* nocapture readonly dereferenceable(8) %x) #0 {
entry:
%a = getelementptr inbounds %struct.s, %struct.s* %x, i64 0, i32 0
- %0 = load double** %a, align 8
+ %0 = load double*, double** %a, align 8
%ptrint = ptrtoint double* %0 to i64
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
@@ -26,7 +26,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next.1, %for.body ]
tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds double, double* %0, i64 %indvars.iv
- %1 = load double* %arrayidx, align 16
+ %1 = load double, double* %arrayidx, align 16
%add = fadd double %1, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond)
%mul = fmul double %add, 2.000000e+00
@@ -34,7 +34,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
tail call void @llvm.assume(i1 %maskcond)
%arrayidx.1 = getelementptr inbounds double, double* %0, i64 %indvars.iv.next
- %2 = load double* %arrayidx.1, align 8
+ %2 = load double, double* %arrayidx.1, align 8
%add.1 = fadd double %2, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond)
%mul.1 = fmul double %add.1, 2.000000e+00
diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll
index 7e45c04622a..2f9213820f2 100644
--- a/llvm/test/Transforms/InstCombine/assume.ll
+++ b/llvm/test/Transforms/InstCombine/assume.ll
@@ -5,12 +5,12 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define i32 @foo1(i32* %a) #0 {
entry:
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
; Check that the alignment has been upgraded and that the assume has not
; been removed:
; CHECK-LABEL: @foo1
-; CHECK-DAG: load i32* %a, align 32
+; CHECK-DAG: load i32, i32* %a, align 32
; CHECK-DAG: call void @llvm.assume
; CHECK: ret i32
@@ -27,7 +27,7 @@ define i32 @foo2(i32* %a) #0 {
entry:
; Same check as in @foo1, but make sure it works if the assume is first too.
; CHECK-LABEL: @foo2
-; CHECK-DAG: load i32* %a, align 32
+; CHECK-DAG: load i32, i32* %a, align 32
; CHECK-DAG: call void @llvm.assume
; CHECK: ret i32
@@ -36,7 +36,7 @@ entry:
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
ret i32 %0
}
@@ -192,7 +192,7 @@ declare void @escape(i32* %a)
; metadata form?
define i1 @nonnull1(i32** %a) {
entry:
- %load = load i32** %a
+ %load = load i32*, i32** %a
%cmp = icmp ne i32* %load, null
tail call void @llvm.assume(i1 %cmp)
tail call void @escape(i32* %load)
@@ -209,7 +209,7 @@ entry:
; to pointer types. Doing otherwise would be illegal.
define i1 @nonnull2(i32* %a) {
entry:
- %load = load i32* %a
+ %load = load i32, i32* %a
%cmp = icmp ne i32 %load, 0
tail call void @llvm.assume(i1 %cmp)
%rval = icmp eq i32 %load, 0
@@ -224,7 +224,7 @@ entry:
; if the assume is control dependent on something else
define i1 @nonnull3(i32** %a, i1 %control) {
entry:
- %load = load i32** %a
+ %load = load i32*, i32** %a
%cmp = icmp ne i32* %load, null
br i1 %control, label %taken, label %not_taken
taken:
@@ -244,7 +244,7 @@ not_taken:
; interrupted by an exception being thrown
define i1 @nonnull4(i32** %a) {
entry:
- %load = load i32** %a
+ %load = load i32*, i32** %a
;; This call may throw!
tail call void @escape(i32* %load)
%cmp = icmp ne i32* %load, null
diff --git a/llvm/test/Transforms/InstCombine/atomic.ll b/llvm/test/Transforms/InstCombine/atomic.ll
index 98cecefcc29..5754a5a4ba5 100644
--- a/llvm/test/Transforms/InstCombine/atomic.ll
+++ b/llvm/test/Transforms/InstCombine/atomic.ll
@@ -7,10 +7,10 @@ target triple = "x86_64-apple-macosx10.7.0"
define i32 @test2(i32* %p) {
; CHECK-LABEL: define i32 @test2(
-; CHECK: %x = load atomic i32* %p seq_cst, align 4
+; CHECK: %x = load atomic i32, i32* %p seq_cst, align 4
; CHECK: shl i32 %x, 1
- %x = load atomic i32* %p seq_cst, align 4
- %y = load i32* %p, align 4
+ %x = load atomic i32, i32* %p seq_cst, align 4
+ %y = load i32, i32* %p, align 4
%z = add i32 %x, %y
ret i32 %z
}
diff --git a/llvm/test/Transforms/InstCombine/bitcast-alias-function.ll b/llvm/test/Transforms/InstCombine/bitcast-alias-function.ll
index cfec09200db..1a598a5d415 100644
--- a/llvm/test/Transforms/InstCombine/bitcast-alias-function.ll
+++ b/llvm/test/Transforms/InstCombine/bitcast-alias-function.ll
@@ -91,12 +91,12 @@ define void @bitcast_alias_scalar(float* noalias %source, float* noalias %dest)
entry:
; CHECK-LABEL: @bitcast_alias_scalar
; CHECK: bitcast float* %source to i32*
-; CHECK: load i32*
+; CHECK: load i32, i32*
; CHECK-NOT: fptoui
; CHECK-NOT: uitofp
; CHECK: bitcast float* %dest to i32*
; CHECK: store i32
- %tmp = load float* %source, align 8
+ %tmp = load float, float* %source, align 8
%call = call float @alias_i32_to_f32(float %tmp) nounwind
store float %call, float* %dest, align 8
ret void
@@ -107,12 +107,12 @@ define void @bitcast_alias_vector(<2 x float>* noalias %source, <2 x float>* noa
entry:
; CHECK-LABEL: @bitcast_alias_vector
; CHECK: bitcast <2 x float>* %source to <2 x i32>*
-; CHECK: load <2 x i32>*
+; CHECK: load <2 x i32>, <2 x i32>*
; CHECK-NOT: fptoui
; CHECK-NOT: uitofp
; CHECK: bitcast <2 x float>* %dest to <2 x i32>*
; CHECK: store <2 x i32>
- %tmp = load <2 x float>* %source, align 8
+ %tmp = load <2 x float>, <2 x float>* %source, align 8
%call = call <2 x float> @alias_v2i32_to_v2f32(<2 x float> %tmp) nounwind
store <2 x float> %call, <2 x float>* %dest, align 8
ret void
@@ -123,11 +123,11 @@ define void @bitcast_alias_vector_scalar_same_size(<2 x float>* noalias %source,
entry:
; CHECK-LABEL: @bitcast_alias_vector_scalar_same_size
; CHECK: bitcast <2 x float>* %source to i64*
-; CHECK: load i64*
+; CHECK: load i64, i64*
; CHECK: %call = call i64 @func_i64
; CHECK: bitcast <2 x float>* %dest to i64*
; CHECK: store i64
- %tmp = load <2 x float>* %source, align 8
+ %tmp = load <2 x float>, <2 x float>* %source, align 8
%call = call <2 x float> @alias_v2f32_to_i64(<2 x float> %tmp) nounwind
store <2 x float> %call, <2 x float>* %dest, align 8
ret void
@@ -137,11 +137,11 @@ define void @bitcast_alias_scalar_vector_same_size(i64* noalias %source, i64* no
entry:
; CHECK-LABEL: @bitcast_alias_scalar_vector_same_size
; CHECK: bitcast i64* %source to <2 x float>*
-; CHECK: load <2 x float>*
+; CHECK: load <2 x float>, <2 x float>*
; CHECK: call <2 x float> @func_v2f32
; CHECK: bitcast i64* %dest to <2 x float>*
; CHECK: store <2 x float>
- %tmp = load i64* %source, align 8
+ %tmp = load i64, i64* %source, align 8
%call = call i64 @alias_i64_to_v2f32(i64 %tmp) nounwind
store i64 %call, i64* %dest, align 8
ret void
@@ -151,11 +151,11 @@ define void @bitcast_alias_vector_ptrs_same_size(<2 x i64*>* noalias %source, <2
entry:
; CHECK-LABEL: @bitcast_alias_vector_ptrs_same_size
; CHECK: bitcast <2 x i64*>* %source to <2 x i32*>*
-; CHECK: load <2 x i32*>*
+; CHECK: load <2 x i32*>, <2 x i32*>*
; CHECK: call <2 x i32*> @func_v2i32p
; CHECK: bitcast <2 x i64*>* %dest to <2 x i32*>*
; CHECK: store <2 x i32*>
- %tmp = load <2 x i64*>* %source, align 8
+ %tmp = load <2 x i64*>, <2 x i64*>* %source, align 8
%call = call <2 x i64*> @alias_v2i32p_to_v2i64p(<2 x i64*> %tmp) nounwind
store <2 x i64*> %call, <2 x i64*>* %dest, align 8
ret void
@@ -169,7 +169,7 @@ entry:
; CHECK-NOT: fptoui
; CHECK: @alias_i64_to_f32
; CHECK-NOT: uitofp
- %tmp = load float* %source, align 8
+ %tmp = load float, float* %source, align 8
%call = call float @alias_i64_to_f32(float %tmp) nounwind
store float %call, float* %dest, align 8
ret void
@@ -181,7 +181,7 @@ entry:
; CHECK-NOT: fptoui <2 x float> %tmp to <2 x i64>
; CHECK: @alias_v2i64_to_v2f32
; CHECK-NOT: uitofp <2 x i64> %call to <2 x float>
- %tmp = load <2 x float>* %source, align 8
+ %tmp = load <2 x float>, <2 x float>* %source, align 8
%call = call <2 x float> @alias_v2i64_to_v2f32(<2 x float> %tmp) nounwind
store <2 x float> %call, <2 x float>* %dest, align 8
ret void
@@ -191,7 +191,7 @@ define void @bitcast_alias_vector_mismatched_number_elements(<4 x float>* noalia
entry:
; CHECK-LABEL: @bitcast_alias_vector_mismatched_number_elements
; CHECK: %call = call <4 x float> @alias_v2i32_to_v4f32
- %tmp = load <4 x float>* %source, align 8
+ %tmp = load <4 x float>, <4 x float>* %source, align 8
%call = call <4 x float> @alias_v2i32_to_v4f32(<4 x float> %tmp) nounwind
store <4 x float> %call, <4 x float>* %dest, align 8
ret void
@@ -201,7 +201,7 @@ define void @bitcast_alias_vector_scalar_mismatched_bit_size(<4 x float>* noalia
entry:
; CHECK-LABEL: @bitcast_alias_vector_scalar_mismatched_bit_size
; CHECK: %call = call <4 x float> @alias_v4f32_to_i64
- %tmp = load <4 x float>* %source, align 8
+ %tmp = load <4 x float>, <4 x float>* %source, align 8
%call = call <4 x float> @alias_v4f32_to_i64(<4 x float> %tmp) nounwind
store <4 x float> %call, <4 x float>* %dest, align 8
ret void
@@ -211,7 +211,7 @@ define void @bitcast_alias_vector_ptrs_scalar_mismatched_bit_size(<4 x i32*>* no
entry:
; CHECK-LABEL: @bitcast_alias_vector_ptrs_scalar_mismatched_bit_size
; CHECK: @alias_v4i32p_to_i64
- %tmp = load <4 x i32*>* %source, align 8
+ %tmp = load <4 x i32*>, <4 x i32*>* %source, align 8
%call = call <4 x i32*> @alias_v4i32p_to_i64(<4 x i32*> %tmp) nounwind
store <4 x i32*> %call, <4 x i32*>* %dest, align 8
ret void
@@ -221,7 +221,7 @@ define void @bitcast_alias_scalar_vector_ptrs_same_size(i64* noalias %source, i6
entry:
; CHECK-LABEL: @bitcast_alias_scalar_vector_ptrs_same_size
; CHECK: @alias_i64_to_v2i32p
- %tmp = load i64* %source, align 8
+ %tmp = load i64, i64* %source, align 8
%call = call i64 @alias_i64_to_v2i32p(i64 %tmp) nounwind
store i64 %call, i64* %dest, align 8
ret void
@@ -231,7 +231,7 @@ define void @bitcast_alias_scalar_vector_mismatched_bit_size(i64* noalias %sourc
entry:
; CHECK-LABEL: @bitcast_alias_scalar_vector_mismatched_bit_size
; CHECK: call i64 @alias_i64_to_v4f32
- %tmp = load i64* %source, align 8
+ %tmp = load i64, i64* %source, align 8
%call = call i64 @alias_i64_to_v4f32(i64 %tmp) nounwind
store i64 %call, i64* %dest, align 8
ret void
diff --git a/llvm/test/Transforms/InstCombine/bitcast.ll b/llvm/test/Transforms/InstCombine/bitcast.ll
index c7a520bcf36..579839e4245 100644
--- a/llvm/test/Transforms/InstCombine/bitcast.ll
+++ b/llvm/test/Transforms/InstCombine/bitcast.ll
@@ -148,7 +148,7 @@ define <2 x i16> @BitcastInsert(i32 %a) {
; PR17293
define <2 x i64> @test7(<2 x i8*>* %arg) nounwind {
%cast = bitcast <2 x i8*>* %arg to <2 x i64>*
- %load = load <2 x i64>* %cast, align 16
+ %load = load <2 x i64>, <2 x i64>* %cast, align 16
ret <2 x i64> %load
; CHECK: @test7
; CHECK: bitcast
diff --git a/llvm/test/Transforms/InstCombine/bittest.ll b/llvm/test/Transforms/InstCombine/bittest.ll
index 84ee7dd602d..edf65d5a87b 100644
--- a/llvm/test/Transforms/InstCombine/bittest.ll
+++ b/llvm/test/Transforms/InstCombine/bittest.ll
@@ -5,7 +5,7 @@
define void @_Z12h000007_testv(i32* %P) {
entry:
- %tmp.2 = load i32* @b_rec.0 ; <i32> [#uses=1]
+ %tmp.2 = load i32, i32* @b_rec.0 ; <i32> [#uses=1]
%tmp.9 = or i32 %tmp.2, -989855744 ; <i32> [#uses=2]
%tmp.16 = and i32 %tmp.9, -805306369 ; <i32> [#uses=2]
%tmp.17 = and i32 %tmp.9, -973078529 ; <i32> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/call2.ll b/llvm/test/Transforms/InstCombine/call2.ll
index 1f45c7ab75d..467eb077eaf 100644
--- a/llvm/test/Transforms/InstCombine/call2.ll
+++ b/llvm/test/Transforms/InstCombine/call2.ll
@@ -8,7 +8,7 @@ entry:
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
@@ -20,6 +20,6 @@ entry:
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll
index d34469924f1..c5775010cdf 100644
--- a/llvm/test/Transforms/InstCombine/cast.ll
+++ b/llvm/test/Transforms/InstCombine/cast.ll
@@ -731,8 +731,8 @@ define %s @test68(%s *%p, i64 %i) {
%pp = getelementptr inbounds i8, i8* %q, i64 %o
; CHECK-NEXT: getelementptr %s, %s*
%r = bitcast i8* %pp to %s*
- %l = load %s* %r
-; CHECK-NEXT: load %s*
+ %l = load %s, %s* %r
+; CHECK-NEXT: load %s, %s*
ret %s %l
; CHECK-NEXT: ret %s
}
@@ -741,13 +741,13 @@ define %s @test68(%s *%p, i64 %i) {
define %s @test68_addrspacecast(%s* %p, i64 %i) {
; CHECK-LABEL: @test68_addrspacecast(
; CHECK-NEXT: getelementptr %s, %s*
-; CHECK-NEXT: load %s*
+; CHECK-NEXT: load %s, %s*
; CHECK-NEXT: ret %s
%o = mul i64 %i, 12
%q = addrspacecast %s* %p to i8 addrspace(2)*
%pp = getelementptr inbounds i8, i8 addrspace(2)* %q, i64 %o
%r = addrspacecast i8 addrspace(2)* %pp to %s*
- %l = load %s* %r
+ %l = load %s, %s* %r
ret %s %l
}
@@ -755,13 +755,13 @@ define %s @test68_addrspacecast_2(%s* %p, i64 %i) {
; CHECK-LABEL: @test68_addrspacecast_2(
; CHECK-NEXT: getelementptr %s, %s* %p
; CHECK-NEXT: addrspacecast
-; CHECK-NEXT: load %s addrspace(1)*
+; CHECK-NEXT: load %s, %s addrspace(1)*
; CHECK-NEXT: ret %s
%o = mul i64 %i, 12
%q = addrspacecast %s* %p to i8 addrspace(2)*
%pp = getelementptr inbounds i8, i8 addrspace(2)* %q, i64 %o
%r = addrspacecast i8 addrspace(2)* %pp to %s addrspace(1)*
- %l = load %s addrspace(1)* %r
+ %l = load %s, %s addrspace(1)* %r
ret %s %l
}
@@ -772,8 +772,8 @@ define %s @test68_as1(%s addrspace(1)* %p, i32 %i) {
%pp = getelementptr inbounds i8, i8 addrspace(1)* %q, i32 %o
; CHECK-NEXT: getelementptr %s, %s addrspace(1)*
%r = bitcast i8 addrspace(1)* %pp to %s addrspace(1)*
- %l = load %s addrspace(1)* %r
-; CHECK-NEXT: load %s addrspace(1)*
+ %l = load %s, %s addrspace(1)* %r
+; CHECK-NEXT: load %s, %s addrspace(1)*
ret %s %l
; CHECK-NEXT: ret %s
}
@@ -785,8 +785,8 @@ define double @test69(double *%p, i64 %i) {
%pp = getelementptr inbounds i8, i8* %q, i64 %o
; CHECK-NEXT: getelementptr inbounds double, double*
%r = bitcast i8* %pp to double*
- %l = load double* %r
-; CHECK-NEXT: load double*
+ %l = load double, double* %r
+; CHECK-NEXT: load double, double*
ret double %l
; CHECK-NEXT: ret double
}
@@ -799,8 +799,8 @@ define %s @test70(%s *%p, i64 %i) {
%pp = getelementptr inbounds i8, i8* %q, i64 %o
; CHECK-NEXT: getelementptr inbounds %s, %s*
%r = bitcast i8* %pp to %s*
- %l = load %s* %r
-; CHECK-NEXT: load %s*
+ %l = load %s, %s* %r
+; CHECK-NEXT: load %s, %s*
ret %s %l
; CHECK-NEXT: ret %s
}
@@ -813,8 +813,8 @@ define double @test71(double *%p, i64 %i) {
%pp = getelementptr i8, i8* %q, i64 %o
; CHECK-NEXT: getelementptr double, double*
%r = bitcast i8* %pp to double*
- %l = load double* %r
-; CHECK-NEXT: load double*
+ %l = load double, double* %r
+; CHECK-NEXT: load double, double*
ret double %l
; CHECK-NEXT: ret double
}
@@ -828,8 +828,8 @@ define double @test72(double *%p, i32 %i) {
%pp = getelementptr inbounds i8, i8* %q, i64 %o
; CHECK-NEXT: getelementptr inbounds double, double*
%r = bitcast i8* %pp to double*
- %l = load double* %r
-; CHECK-NEXT: load double*
+ %l = load double, double* %r
+; CHECK-NEXT: load double, double*
ret double %l
; CHECK-NEXT: ret double
}
@@ -843,8 +843,8 @@ define double @test73(double *%p, i128 %i) {
%pp = getelementptr inbounds i8, i8* %q, i64 %o
; CHECK-NEXT: getelementptr double, double*
%r = bitcast i8* %pp to double*
- %l = load double* %r
-; CHECK-NEXT: load double*
+ %l = load double, double* %r
+; CHECK-NEXT: load double, double*
ret double %l
; CHECK-NEXT: ret double
}
@@ -855,8 +855,8 @@ define double @test74(double *%p, i64 %i) {
%pp = getelementptr inbounds i64, i64* %q, i64 %i
; CHECK-NEXT: getelementptr inbounds double, double*
%r = bitcast i64* %pp to double*
- %l = load double* %r
-; CHECK-NEXT: load double*
+ %l = load double, double* %r
+; CHECK-NEXT: load double, double*
ret double %l
; CHECK-NEXT: ret double
}
@@ -882,8 +882,8 @@ define %s @test76(%s *%p, i64 %i, i64 %j) {
%pp = getelementptr inbounds i8, i8* %q, i64 %o2
; CHECK-NEXT: getelementptr %s, %s* %p, i64 %o2
%r = bitcast i8* %pp to %s*
- %l = load %s* %r
-; CHECK-NEXT: load %s*
+ %l = load %s, %s* %r
+; CHECK-NEXT: load %s, %s*
ret %s %l
; CHECK-NEXT: ret %s
}
@@ -898,8 +898,8 @@ define %s @test77(%s *%p, i64 %i, i64 %j) {
%pp = getelementptr inbounds i8, i8* %q, i64 %o2
; CHECK-NEXT: getelementptr inbounds %s, %s* %p, i64 %o2
%r = bitcast i8* %pp to %s*
- %l = load %s* %r
-; CHECK-NEXT: load %s*
+ %l = load %s, %s* %r
+; CHECK-NEXT: load %s, %s*
ret %s %l
; CHECK-NEXT: ret %s
}
@@ -926,8 +926,8 @@ define %s @test78(%s *%p, i64 %i, i64 %j, i32 %k, i32 %l, i128 %m, i128 %n) {
%pp = getelementptr inbounds i8, i8* %q, i64 %h
; CHECK-NEXT: getelementptr %s, %s* %p, i64 %h
%r = bitcast i8* %pp to %s*
- %load = load %s* %r
-; CHECK-NEXT: load %s*
+ %load = load %s, %s* %r
+; CHECK-NEXT: load %s, %s*
ret %s %load
; CHECK-NEXT: ret %s
}
@@ -942,7 +942,7 @@ define %s @test79(%s *%p, i64 %i, i32 %j) {
; CHECK: bitcast
%pp = getelementptr inbounds i8, i8* %q, i32 %c
%r = bitcast i8* %pp to %s*
- %l = load %s* %r
+ %l = load %s, %s* %r
ret %s %l
}
@@ -954,8 +954,8 @@ define double @test80([100 x double]* %p, i32 %i) {
%pp = getelementptr i8, i8* %q, i32 %tmp
; CHECK-NEXT: getelementptr [100 x double], [100 x double]*
%r = bitcast i8* %pp to double*
- %l = load double* %r
-; CHECK-NEXT: load double*
+ %l = load double, double* %r
+; CHECK-NEXT: load double, double*
ret double %l
; CHECK-NEXT: ret double
}
@@ -963,13 +963,13 @@ define double @test80([100 x double]* %p, i32 %i) {
define double @test80_addrspacecast([100 x double] addrspace(1)* %p, i32 %i) {
; CHECK-LABEL: @test80_addrspacecast(
; CHECK-NEXT: getelementptr [100 x double], [100 x double] addrspace(1)* %p
-; CHECK-NEXT: load double addrspace(1)*
+; CHECK-NEXT: load double, double addrspace(1)*
; CHECK-NEXT: ret double
%tmp = shl nsw i32 %i, 3
%q = addrspacecast [100 x double] addrspace(1)* %p to i8 addrspace(2)*
%pp = getelementptr i8, i8 addrspace(2)* %q, i32 %tmp
%r = addrspacecast i8 addrspace(2)* %pp to double addrspace(1)*
- %l = load double addrspace(1)* %r
+ %l = load double, double addrspace(1)* %r
ret double %l
}
@@ -977,13 +977,13 @@ define double @test80_addrspacecast_2([100 x double] addrspace(1)* %p, i32 %i) {
; CHECK-LABEL: @test80_addrspacecast_2(
; CHECK-NEXT: getelementptr [100 x double], [100 x double] addrspace(1)*
; CHECK-NEXT: addrspacecast double addrspace(1)*
-; CHECK-NEXT: load double addrspace(3)*
+; CHECK-NEXT: load double, double addrspace(3)*
; CHECK-NEXT: ret double
%tmp = shl nsw i32 %i, 3
%q = addrspacecast [100 x double] addrspace(1)* %p to i8 addrspace(2)*
%pp = getelementptr i8, i8 addrspace(2)* %q, i32 %tmp
%r = addrspacecast i8 addrspace(2)* %pp to double addrspace(3)*
- %l = load double addrspace(3)* %r
+ %l = load double, double addrspace(3)* %r
ret double %l
}
@@ -995,8 +995,8 @@ define double @test80_as1([100 x double] addrspace(1)* %p, i16 %i) {
%pp = getelementptr i8, i8 addrspace(1)* %q, i16 %tmp
; CHECK-NEXT: getelementptr [100 x double], [100 x double] addrspace(1)*
%r = bitcast i8 addrspace(1)* %pp to double addrspace(1)*
- %l = load double addrspace(1)* %r
-; CHECK-NEXT: load double addrspace(1)*
+ %l = load double, double addrspace(1)* %r
+; CHECK-NEXT: load double, double addrspace(1)*
ret double %l
; CHECK-NEXT: ret double
}
@@ -1006,7 +1006,7 @@ define double @test81(double *%p, float %f) {
%q = bitcast double* %p to i8*
%pp = getelementptr i8, i8* %q, i64 %i
%r = bitcast i8* %pp to double*
- %l = load double* %r
+ %l = load double, double* %r
ret double %l
}
diff --git a/llvm/test/Transforms/InstCombine/cast_ptr.ll b/llvm/test/Transforms/InstCombine/cast_ptr.ll
index cc7a2bf1859..eaf946ef925 100644
--- a/llvm/test/Transforms/InstCombine/cast_ptr.ll
+++ b/llvm/test/Transforms/InstCombine/cast_ptr.ll
@@ -107,7 +107,7 @@ define i1 @test4_as2(i16 %A) {
declare %op* @foo(%op* %X)
define %unop* @test5(%op* %O) {
- %tmp = load %unop* (%op*)** bitcast ([1 x %op* (%op*)*]* @Array to %unop* (%op*)**); <%unop* (%op*)*> [#uses=1]
+ %tmp = load %unop* (%op*)*, %unop* (%op*)** bitcast ([1 x %op* (%op*)*]* @Array to %unop* (%op*)**); <%unop* (%op*)*> [#uses=1]
%tmp.2 = call %unop* %tmp( %op* %O ) ; <%unop*> [#uses=1]
ret %unop* %tmp.2
; CHECK-LABEL: @test5(
@@ -122,8 +122,8 @@ define %unop* @test5(%op* %O) {
define i8 @test6(i8 addrspace(1)* %source) {
entry:
%arrayidx223 = addrspacecast i8 addrspace(1)* %source to i8*
- %tmp4 = load i8* %arrayidx223
+ %tmp4 = load i8, i8* %arrayidx223
ret i8 %tmp4
; CHECK-LABEL: @test6(
-; CHECK: load i8* %arrayidx223
+; CHECK: load i8, i8* %arrayidx223
}
diff --git a/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll b/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
index bb7ab583f96..eb1690458e6 100644
--- a/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
+++ b/llvm/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
@@ -149,22 +149,22 @@ define i1 @constant_fold_inttoptr() {
define float @constant_fold_bitcast_ftoi_load() {
; CHECK-LABEL: @constant_fold_bitcast_ftoi_load(
-; CHECK: load float addrspace(3)* bitcast (i32 addrspace(3)* @g to float addrspace(3)*), align 4
- %a = load float addrspace(3)* bitcast (i32 addrspace(3)* @g to float addrspace(3)*), align 4
+; CHECK: load float, float addrspace(3)* bitcast (i32 addrspace(3)* @g to float addrspace(3)*), align 4
+ %a = load float, float addrspace(3)* bitcast (i32 addrspace(3)* @g to float addrspace(3)*), align 4
ret float %a
}
define i32 @constant_fold_bitcast_itof_load() {
; CHECK-LABEL: @constant_fold_bitcast_itof_load(
-; CHECK: load i32 addrspace(3)* bitcast (float addrspace(3)* @g_float_as3 to i32 addrspace(3)*), align 4
- %a = load i32 addrspace(3)* bitcast (float addrspace(3)* @g_float_as3 to i32 addrspace(3)*), align 4
+; CHECK: load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @g_float_as3 to i32 addrspace(3)*), align 4
+ %a = load i32, i32 addrspace(3)* bitcast (float addrspace(3)* @g_float_as3 to i32 addrspace(3)*), align 4
ret i32 %a
}
define <4 x float> @constant_fold_bitcast_vector_as() {
; CHECK-LABEL: @constant_fold_bitcast_vector_as(
-; CHECK: load <4 x float> addrspace(3)* @g_v4f_as3, align 16
- %a = load <4 x float> addrspace(3)* bitcast (<4 x i32> addrspace(3)* bitcast (<4 x float> addrspace(3)* @g_v4f_as3 to <4 x i32> addrspace(3)*) to <4 x float> addrspace(3)*), align 4
+; CHECK: load <4 x float>, <4 x float> addrspace(3)* @g_v4f_as3, align 16
+ %a = load <4 x float>, <4 x float> addrspace(3)* bitcast (<4 x i32> addrspace(3)* bitcast (<4 x float> addrspace(3)* @g_v4f_as3 to <4 x i32> addrspace(3)*) to <4 x float> addrspace(3)*), align 4
ret <4 x float> %a
}
@@ -172,9 +172,9 @@ define <4 x float> @constant_fold_bitcast_vector_as() {
define i32 @test_cast_gep_small_indices_as() {
; CHECK-LABEL: @test_cast_gep_small_indices_as(
-; CHECK: load i32 addrspace(3)* getelementptr inbounds ([10 x i32] addrspace(3)* @i32_array_as3, i16 0, i16 0), align 16
+; CHECK: load i32, i32 addrspace(3)* getelementptr inbounds ([10 x i32] addrspace(3)* @i32_array_as3, i16 0, i16 0), align 16
%p = getelementptr [10 x i32], [10 x i32] addrspace(3)* @i32_array_as3, i7 0, i7 0
- %x = load i32 addrspace(3)* %p, align 4
+ %x = load i32, i32 addrspace(3)* %p, align 4
ret i32 %x
}
@@ -189,17 +189,17 @@ define i32 @test_cast_gep_small_indices_as() {
define i32 @test_cast_gep_large_indices_as() {
; CHECK-LABEL: @test_cast_gep_large_indices_as(
-; CHECK: load i32 addrspace(3)* getelementptr inbounds ([10 x i32] addrspace(3)* @i32_array_as3, i16 0, i16 0), align 16
+; CHECK: load i32, i32 addrspace(3)* getelementptr inbounds ([10 x i32] addrspace(3)* @i32_array_as3, i16 0, i16 0), align 16
%p = getelementptr [10 x i32], [10 x i32] addrspace(3)* @i32_array_as3, i64 0, i64 0
- %x = load i32 addrspace(3)* %p, align 4
+ %x = load i32, i32 addrspace(3)* %p, align 4
ret i32 %x
}
define i32 @test_constant_cast_gep_struct_indices_as() {
; CHECK-LABEL: @test_constant_cast_gep_struct_indices_as(
-; CHECK: load i32 addrspace(3)* getelementptr inbounds (%struct.foo addrspace(3)* @constant_fold_global_ptr, i16 0, i32 2, i16 2), align 8
+; CHECK: load i32, i32 addrspace(3)* getelementptr inbounds (%struct.foo addrspace(3)* @constant_fold_global_ptr, i16 0, i32 2, i16 2), align 8
%x = getelementptr %struct.foo, %struct.foo addrspace(3)* @constant_fold_global_ptr, i18 0, i32 2, i12 2
- %y = load i32 addrspace(3)* %x, align 4
+ %y = load i32, i32 addrspace(3)* %x, align 4
ret i32 %y
}
@@ -209,7 +209,7 @@ define i32 @test_read_data_from_global_as3() {
; CHECK-LABEL: @test_read_data_from_global_as3(
; CHECK-NEXT: ret i32 2
%x = getelementptr [5 x i32], [5 x i32] addrspace(3)* @constant_data_as3, i32 0, i32 1
- %y = load i32 addrspace(3)* %x, align 4
+ %y = load i32, i32 addrspace(3)* %x, align 4
ret i32 %y
}
@@ -224,9 +224,9 @@ define i32 @test_read_data_from_global_as3() {
define i32 @constant_through_array_as_ptrs() {
; CHECK-LABEL: @constant_through_array_as_ptrs(
; CHECK-NEXT: ret i32 34
- %p = load i32 addrspace(1)* addrspace(2)* addrspace(0)* @indirect, align 4
- %a = load i32 addrspace(1)* addrspace(2)* %p, align 4
- %b = load i32 addrspace(1)* %a, align 4
+ %p = load i32 addrspace(1)* addrspace(2)*, i32 addrspace(1)* addrspace(2)* addrspace(0)* @indirect, align 4
+ %a = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(2)* %p, align 4
+ %b = load i32, i32 addrspace(1)* %a, align 4
ret i32 %b
}
@@ -236,6 +236,6 @@ define float @canonicalize_addrspacecast(i32 %i) {
; CHECK-LABEL: @canonicalize_addrspacecast
; CHECK-NEXT: getelementptr inbounds float, float* addrspacecast (float addrspace(3)* bitcast ([0 x i8] addrspace(3)* @shared_mem to float addrspace(3)*) to float*), i32 %i
%p = getelementptr inbounds float, float* addrspacecast ([0 x i8] addrspace(3)* @shared_mem to float*), i32 %i
- %v = load float* %p
+ %v = load float, float* %p
ret float %v
}
diff --git a/llvm/test/Transforms/InstCombine/crash.ll b/llvm/test/Transforms/InstCombine/crash.ll
index 9fd2aab5c09..76ddd09370c 100644
--- a/llvm/test/Transforms/InstCombine/crash.ll
+++ b/llvm/test/Transforms/InstCombine/crash.ll
@@ -36,7 +36,7 @@ entry:
define void @test2(<1 x i16>* nocapture %b, i32* nocapture %c) nounwind ssp {
entry:
%arrayidx = getelementptr inbounds <1 x i16>, <1 x i16>* %b, i64 undef ; <<1 x i16>*>
- %tmp2 = load <1 x i16>* %arrayidx ; <<1 x i16>> [#uses=1]
+ %tmp2 = load <1 x i16>, <1 x i16>* %arrayidx ; <<1 x i16>> [#uses=1]
%tmp6 = bitcast <1 x i16> %tmp2 to i16 ; <i16> [#uses=1]
%tmp7 = zext i16 %tmp6 to i32 ; <i32> [#uses=1]
%ins = or i32 0, %tmp7 ; <i32> [#uses=1]
@@ -60,7 +60,7 @@ define void @foo(i1) nounwind align 2 {
; <label>:3 ; preds = %2, %1
%4 = phi i8 [ 1, %2 ], [ 0, %1 ] ; <i8> [#uses=1]
%5 = icmp eq i8 %4, 0 ; <i1> [#uses=1]
- %6 = load i64* @tmp2, align 8 ; <i64> [#uses=1]
+ %6 = load i64, i64* @tmp2, align 8 ; <i64> [#uses=1]
%7 = select i1 %5, i64 0, i64 %6 ; <i64> [#uses=1]
br label %8
@@ -81,7 +81,7 @@ define void @bar3(i1, i1) nounwind align 2 {
; <label>:3 ; preds = %2
%4 = getelementptr inbounds %t0, %t0* null, i64 0, i32 1 ; <i32*> [#uses=0]
%5 = getelementptr inbounds %t1, %t1* null, i64 0, i32 4 ; <i32**> [#uses=1]
- %6 = load i32** %5, align 8 ; <i32*> [#uses=1]
+ %6 = load i32*, i32** %5, align 8 ; <i32*> [#uses=1]
%7 = icmp ne i32* %6, null ; <i1> [#uses=1]
%8 = zext i1 %7 to i32 ; <i32> [#uses=1]
%9 = add i32 %8, 0 ; <i32> [#uses=1]
@@ -115,7 +115,7 @@ BB1:
BB2:
%v5_ = phi i1 [ true, %BB0], [false, %BB1]
- %v6 = load i64* %P
+ %v6 = load i64, i64* %P
br label %l8
l8:
@@ -183,7 +183,7 @@ cont: ; preds = %ehcleanup
cond.false: ; preds = %entry
%tmp4 = getelementptr inbounds %class.RuleBasedBreakIterator, %class.RuleBasedBreakIterator* %this, i32 0, i32 0 ; <i64 ()**> [#uses=1]
- %tmp5 = load i64 ()** %tmp4 ; <i64 ()*> [#uses=1]
+ %tmp5 = load i64 ()*, i64 ()** %tmp4 ; <i64 ()*> [#uses=1]
%call = invoke i64 %tmp5()
to label %cond.end unwind label %ehcleanup ; <i64> [#uses=1]
@@ -242,10 +242,10 @@ entry:
; PR6503
define void @test12(i32* %A) nounwind {
entry:
- %tmp1 = load i32* %A
+ %tmp1 = load i32, i32* %A
%cmp = icmp ugt i32 1, %tmp1 ; <i1> [#uses=1]
%conv = zext i1 %cmp to i32 ; <i32> [#uses=1]
- %tmp2 = load i32* %A
+ %tmp2 = load i32, i32* %A
%cmp3 = icmp ne i32 %tmp2, 0 ; <i1> [#uses=1]
%conv4 = zext i1 %cmp3 to i32 ; <i32> [#uses=1]
%or = or i32 %conv, %conv4 ; <i32> [#uses=1]
@@ -261,7 +261,7 @@ entry:
%0 = getelementptr inbounds %s1, %s1* null, i64 0, i32 2, i64 0, i32 0
%1 = bitcast i64* %0 to i32*
%2 = getelementptr inbounds %s1, %s1* null, i64 0, i32 2, i64 1, i32 0
- %.pre = load i32* %1, align 8
+ %.pre = load i32, i32* %1, align 8
%3 = lshr i32 %.pre, 19
%brmerge = or i1 undef, undef
%4 = and i32 %3, 3
@@ -269,7 +269,7 @@ entry:
%6 = shl i32 %5, 19
%7 = add i32 %6, 1572864
%8 = and i32 %7, 1572864
- %9 = load i64* %2, align 8
+ %9 = load i64, i64* %2, align 8
%trunc156 = trunc i64 %9 to i32
%10 = and i32 %trunc156, -1537
%11 = and i32 %10, -6145
@@ -304,7 +304,7 @@ entry:
define void @test15(i32* %p_92) nounwind {
entry:
-%0 = load i32* %p_92, align 4
+%0 = load i32, i32* %p_92, align 4
%1 = icmp ne i32 %0, 0
%2 = zext i1 %1 to i32
%3 = call i32 @func_14() nounwind
diff --git a/llvm/test/Transforms/InstCombine/debuginfo.ll b/llvm/test/Transforms/InstCombine/debuginfo.ll
index ae72f700039..bf4a846270c 100644
--- a/llvm/test/Transforms/InstCombine/debuginfo.ll
+++ b/llvm/test/Transforms/InstCombine/debuginfo.ll
@@ -19,10 +19,10 @@ entry:
call void @llvm.dbg.declare(metadata i32* %__val.addr, metadata !7, metadata !{}), !dbg !18
store i64 %__len, i64* %__len.addr, align 8
call void @llvm.dbg.declare(metadata i64* %__len.addr, metadata !9, metadata !{}), !dbg !20
- %tmp = load i8** %__dest.addr, align 8, !dbg !21
- %tmp1 = load i32* %__val.addr, align 4, !dbg !21
- %tmp2 = load i64* %__len.addr, align 8, !dbg !21
- %tmp3 = load i8** %__dest.addr, align 8, !dbg !21
+ %tmp = load i8*, i8** %__dest.addr, align 8, !dbg !21
+ %tmp1 = load i32, i32* %__val.addr, align 4, !dbg !21
+ %tmp2 = load i64, i64* %__len.addr, align 8, !dbg !21
+ %tmp3 = load i8*, i8** %__dest.addr, align 8, !dbg !21
%0 = call i64 @llvm.objectsize.i64.p0i8(i8* %tmp3, i1 false), !dbg !21
%call = call i8* @foo(i8* %tmp, i32 %tmp1, i64 %tmp2, i64 %0), !dbg !21
ret i8* %call, !dbg !21
diff --git a/llvm/test/Transforms/InstCombine/descale-zero.ll b/llvm/test/Transforms/InstCombine/descale-zero.ll
index 8aa8818be32..4347be48e61 100644
--- a/llvm/test/Transforms/InstCombine/descale-zero.ll
+++ b/llvm/test/Transforms/InstCombine/descale-zero.ll
@@ -5,10 +5,10 @@ target triple = "x86_64-apple-macosx10.10.0"
define internal i8* @descale_zero() {
entry:
-; CHECK: load i8** inttoptr (i64 48 to i8**), align 16
+; CHECK: load i8*, i8** inttoptr (i64 48 to i8**), align 16
; CHECK-NEXT: ret i8*
- %i16_ptr = load i16** inttoptr (i64 48 to i16**), align 16
- %num = load i64* inttoptr (i64 64 to i64*), align 64
+ %i16_ptr = load i16*, i16** inttoptr (i64 48 to i16**), align 16
+ %num = load i64, i64* inttoptr (i64 64 to i64*), align 64
%num_times_2 = shl i64 %num, 1
%num_times_2_plus_4 = add i64 %num_times_2, 4
%i8_ptr = bitcast i16* %i16_ptr to i8*
diff --git a/llvm/test/Transforms/InstCombine/div-shift-crash.ll b/llvm/test/Transforms/InstCombine/div-shift-crash.ll
index 96c2c22c4c3..936173cd6d4 100644
--- a/llvm/test/Transforms/InstCombine/div-shift-crash.ll
+++ b/llvm/test/Transforms/InstCombine/div-shift-crash.ll
@@ -39,7 +39,7 @@ for.body4.i.i.i.i: ; preds = %for.cond1.i.i.i.i
br label %for.cond1.i.i.i.i
func_39.exit.i.i: ; preds = %for.cond1.i.i.i.i
- %l_8191.sroa.0.0.copyload.i.i = load i64* %0, align 1
+ %l_8191.sroa.0.0.copyload.i.i = load i64, i64* %0, align 1
br label %for.cond1.i.i.i
for.cond1.i.i.i: ; preds = %safe_div_func_uint32_t_u_u.exit.i.i.i, %func_39.exit.i.i
diff --git a/llvm/test/Transforms/InstCombine/err-rep-cold.ll b/llvm/test/Transforms/InstCombine/err-rep-cold.ll
index 0cbafc43e0f..16a68e55b09 100644
--- a/llvm/test/Transforms/InstCombine/err-rep-cold.ll
+++ b/llvm/test/Transforms/InstCombine/err-rep-cold.ll
@@ -18,7 +18,7 @@ entry:
br i1 %cmp, label %if.then, label %return
if.then: ; preds = %entry
- %0 = load %struct._IO_FILE** @stderr, align 8
+ %0 = load %struct._IO_FILE*, %struct._IO_FILE** @stderr, align 8
%call = tail call i32 (%struct._IO_FILE*, i8*, ...)* @fprintf(%struct._IO_FILE* %0, i8* getelementptr inbounds ([13 x i8]* @.str, i64 0, i64 0), i32 %a) #1
br label %return
@@ -38,7 +38,7 @@ entry:
br i1 %cmp, label %if.then, label %return
if.then: ; preds = %entry
- %0 = load %struct._IO_FILE** @stderr, align 8
+ %0 = load %struct._IO_FILE*, %struct._IO_FILE** @stderr, align 8
%1 = tail call i64 @fwrite(i8* getelementptr inbounds ([9 x i8]* @.str1, i64 0, i64 0), i64 8, i64 1, %struct._IO_FILE* %0)
br label %return
@@ -58,7 +58,7 @@ entry:
br i1 %cmp, label %if.then, label %return
if.then: ; preds = %entry
- %0 = load %struct._IO_FILE** @stdout, align 8
+ %0 = load %struct._IO_FILE*, %struct._IO_FILE** @stdout, align 8
%1 = tail call i64 @fwrite(i8* getelementptr inbounds ([9 x i8]* @.str1, i64 0, i64 0), i64 8, i64 1, %struct._IO_FILE* %0)
br label %return
diff --git a/llvm/test/Transforms/InstCombine/extractvalue.ll b/llvm/test/Transforms/InstCombine/extractvalue.ll
index 27c05dbbf5d..4dc1545cb41 100644
--- a/llvm/test/Transforms/InstCombine/extractvalue.ll
+++ b/llvm/test/Transforms/InstCombine/extractvalue.ll
@@ -41,7 +41,7 @@ define i32 @foo(i32 %a, i32 %b) {
; CHECK-LABEL: define i32 @extract2gep(
; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %pair, i32 0, i32 1
-; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32* [[GEP]]
+; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, i32* [[GEP]]
; CHECK-NEXT: store
; CHECK-NEXT: br label %loop
; CHECK-NOT: extractvalue
@@ -52,7 +52,7 @@ define i32 @extract2gep({i32, i32}* %pair, i32* %P) {
; The load + extractvalue should be converted
; to an inbounds gep + smaller load.
; The new load should be in the same spot as the old load.
- %L = load {i32, i32}* %pair
+ %L = load {i32, i32}, {i32, i32}* %pair
store i32 0, i32* %P
br label %loop
@@ -69,12 +69,12 @@ end:
; CHECK-LABEL: define i32 @doubleextract2gep(
; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %arg, i32 0, i32 1, i32 1
-; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32* [[GEP]]
+; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32, i32* [[GEP]]
; CHECK-NEXT: ret i32 [[LOAD]]
define i32 @doubleextract2gep({i32, {i32, i32}}* %arg) {
; The load + extractvalues should be converted
; to a 3-index inbounds gep + smaller load.
- %L = load {i32, {i32, i32}}* %arg
+ %L = load {i32, {i32, i32}}, {i32, {i32, i32}}* %arg
%E1 = extractvalue {i32, {i32, i32}} %L, 1
%E2 = extractvalue {i32, i32} %E1, 1
ret i32 %E2
@@ -88,7 +88,7 @@ define i32 @doubleextract2gep({i32, {i32, i32}}* %arg) {
; CHECK-NEXT: ret
define i32 @nogep-multiuse({i32, i32}* %pair) {
; The load should be left unchanged since both parts are needed.
- %L = load volatile {i32, i32}* %pair
+ %L = load volatile {i32, i32}, {i32, i32}* %pair
%LHS = extractvalue {i32, i32} %L, 0
%RHS = extractvalue {i32, i32} %L, 1
%R = add i32 %LHS, %RHS
@@ -101,7 +101,7 @@ define i32 @nogep-multiuse({i32, i32}* %pair) {
; CHECK-NEXT: ret
define i32 @nogep-volatile({i32, i32}* %pair) {
; The load volatile should be left unchanged.
- %L = load volatile {i32, i32}* %pair
+ %L = load volatile {i32, i32}, {i32, i32}* %pair
%E = extractvalue {i32, i32} %L, 1
ret i32 %E
}
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index a776765ac2c..ac3000fc051 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -74,7 +74,7 @@ define float @test7(float %x, float %y) {
; Don't crash when attempting to cast a constant FMul to an instruction.
define void @test8(i32* %inout) {
entry:
- %0 = load i32* %inout, align 4
+ %0 = load i32, i32* %inout, align 4
%conv = uitofp i32 %0 to float
%vecinit = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>, float %conv, i32 3
%sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %vecinit
diff --git a/llvm/test/Transforms/InstCombine/fold-vector-zero.ll b/llvm/test/Transforms/InstCombine/fold-vector-zero.ll
index e1d86b6cd07..bf661df7795 100644
--- a/llvm/test/Transforms/InstCombine/fold-vector-zero.ll
+++ b/llvm/test/Transforms/InstCombine/fold-vector-zero.ll
@@ -9,7 +9,7 @@ bb30:
%l0 = phi i64 [ -2222, %bb8 ], [ %r23, %bb30 ]
%r2 = add i64 %s0, %B
%r3 = inttoptr i64 %r2 to <2 x double>*
- %r4 = load <2 x double>* %r3, align 8
+ %r4 = load <2 x double>, <2 x double>* %r3, align 8
%r6 = bitcast <2 x double> %r4 to <2 x i64>
%r7 = bitcast <2 x double> zeroinitializer to <2 x i64>
%r8 = insertelement <2 x i64> undef, i64 9223372036854775807, i32 0
diff --git a/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll b/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
index 84ba236e7ed..7106933d2bd 100644
--- a/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
+++ b/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
@@ -13,11 +13,11 @@ define void @bork() nounwind {
entry:
%color = alloca %struct.NSArray*
%color.466 = alloca %struct.NSObject*
- %tmp103 = load %struct.NSArray** %color, align 4
+ %tmp103 = load %struct.NSArray*, %struct.NSArray** %color, align 4
%tmp103104 = getelementptr %struct.NSArray, %struct.NSArray* %tmp103, i32 0, i32 0
store %struct.NSObject* %tmp103104, %struct.NSObject** %color.466, align 4
- %tmp105 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
- %tmp106 = load %struct.NSObject** %color.466, align 4
+ %tmp105 = load %struct.objc_selector*, %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
+ %tmp106 = load %struct.NSObject*, %struct.NSObject** %color.466, align 4
%tmp107 = call float bitcast (void (%struct.NSObject*, ...)* @objc_msgSend_fpret to float (%struct.NSObject*, %struct.objc_selector*)*)( %struct.NSObject* %tmp106, %struct.objc_selector* %tmp105 ) nounwind
br label %exit
diff --git a/llvm/test/Transforms/InstCombine/fpextend.ll b/llvm/test/Transforms/InstCombine/fpextend.ll
index 8640cd2b6f5..775caadb45a 100644
--- a/llvm/test/Transforms/InstCombine/fpextend.ll
+++ b/llvm/test/Transforms/InstCombine/fpextend.ll
@@ -5,7 +5,7 @@
define void @test() nounwind {
entry:
- %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp = load float, float* @X, align 4 ; <float> [#uses=1]
%tmp1 = fpext float %tmp to double ; <double> [#uses=1]
%tmp3 = fadd double %tmp1, 0.000000e+00 ; <double> [#uses=1]
%tmp34 = fptrunc double %tmp3 to float ; <float> [#uses=1]
@@ -15,9 +15,9 @@ entry:
define void @test2() nounwind {
entry:
- %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp = load float, float* @X, align 4 ; <float> [#uses=1]
%tmp1 = fpext float %tmp to double ; <double> [#uses=1]
- %tmp2 = load float* @Y, align 4 ; <float> [#uses=1]
+ %tmp2 = load float, float* @Y, align 4 ; <float> [#uses=1]
%tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
%tmp5 = fmul double %tmp1, %tmp23 ; <double> [#uses=1]
%tmp56 = fptrunc double %tmp5 to float ; <float> [#uses=1]
@@ -27,9 +27,9 @@ entry:
define void @test3() nounwind {
entry:
- %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp = load float, float* @X, align 4 ; <float> [#uses=1]
%tmp1 = fpext float %tmp to double ; <double> [#uses=1]
- %tmp2 = load float* @Y, align 4 ; <float> [#uses=1]
+ %tmp2 = load float, float* @Y, align 4 ; <float> [#uses=1]
%tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
%tmp5 = fdiv double %tmp1, %tmp23 ; <double> [#uses=1]
%tmp56 = fptrunc double %tmp5 to float ; <float> [#uses=1]
@@ -39,7 +39,7 @@ entry:
define void @test4() nounwind {
entry:
- %tmp = load float* @X, align 4 ; <float> [#uses=1]
+ %tmp = load float, float* @X, align 4 ; <float> [#uses=1]
%tmp1 = fpext float %tmp to double ; <double> [#uses=1]
%tmp2 = fsub double -0.000000e+00, %tmp1 ; <double> [#uses=1]
%tmp34 = fptrunc double %tmp2 to float ; <float> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/gc.relocate.ll b/llvm/test/Transforms/InstCombine/gc.relocate.ll
index d10ef5fcfa2..8fbb752f891 100644
--- a/llvm/test/Transforms/InstCombine/gc.relocate.ll
+++ b/llvm/test/Transforms/InstCombine/gc.relocate.ll
@@ -13,7 +13,7 @@ define i32 addrspace(1)* @deref(i32 addrspace(1)* dereferenceable(8) %dparam) {
; CHECK-LABEL: @deref
; CHECK: call dereferenceable(8)
entry:
- %load = load i32 addrspace(1)* %dparam
+ %load = load i32, i32 addrspace(1)* %dparam
%tok = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 addrspace(1)* %dparam)
%relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %tok, i32 4, i32 4)
ret i32 addrspace(1)* %relocate
diff --git a/llvm/test/Transforms/InstCombine/gep-addrspace.ll b/llvm/test/Transforms/InstCombine/gep-addrspace.ll
index 093954e0e66..aa46ea67130 100644
--- a/llvm/test/Transforms/InstCombine/gep-addrspace.ll
+++ b/llvm/test/Transforms/InstCombine/gep-addrspace.ll
@@ -12,7 +12,7 @@ ST:
%B = addrspacecast %myStruct addrspace(1)* %A to %myStruct*
%C = getelementptr inbounds %myStruct, %myStruct* %B, i32 0, i32 1
%D = getelementptr inbounds [3 x float], [3 x float]* %C, i32 0, i32 2
- %E = load float* %D, align 4
+ %E = load float, float* %D, align 4
%F = fsub float %E, undef
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/gep-sext.ll b/llvm/test/Transforms/InstCombine/gep-sext.ll
index 2715c9d6957..36e2aeff02c 100644
--- a/llvm/test/Transforms/InstCombine/gep-sext.ll
+++ b/llvm/test/Transforms/InstCombine/gep-sext.ll
@@ -10,7 +10,7 @@ define void @test(i32* %p, i32 %index) {
; CHECK-NEXT: %1 = sext i32 %index to i64
; CHECK-NEXT: %addr = getelementptr i32, i32* %p, i64 %1
%addr = getelementptr i32, i32* %p, i32 %index
- %val = load i32* %addr
+ %val = load i32, i32* %addr
call void @use(i32 %val)
ret void
}
@@ -21,7 +21,7 @@ define void @test2(i32* %p, i32 %index) {
; CHECK-NEXT: %addr = getelementptr i32, i32* %p, i64 %i
%i = zext i32 %index to i64
%addr = getelementptr i32, i32* %p, i64 %i
- %val = load i32* %addr
+ %val = load i32, i32* %addr
call void @use(i32 %val)
ret void
}
@@ -33,9 +33,9 @@ define void @test3(i32* %p, i32 %index) {
; CHECK-NOT: sext
%addr_begin = getelementptr i32, i32* %p, i64 40
%addr_fixed = getelementptr i32, i32* %addr_begin, i64 48
- %val_fixed = load i32* %addr_fixed, !range !0
+ %val_fixed = load i32, i32* %addr_fixed, !range !0
%addr = getelementptr i32, i32* %addr_begin, i32 %val_fixed
- %val = load i32* %addr
+ %val = load i32, i32* %addr
call void @use(i32 %val)
ret void
}
@@ -46,10 +46,10 @@ define void @test4(i32* %p, i32 %index) {
; CHECK-NOT: sext
%addr_begin = getelementptr i32, i32* %p, i64 40
%addr_fixed = getelementptr i32, i32* %addr_begin, i64 48
- %val_fixed = load i32* %addr_fixed, !range !0
+ %val_fixed = load i32, i32* %addr_fixed, !range !0
%i = sext i32 %val_fixed to i64
%addr = getelementptr i32, i32* %addr_begin, i64 %i
- %val = load i32* %addr
+ %val = load i32, i32* %addr
call void @use(i32 %val)
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/gepphigep.ll b/llvm/test/Transforms/InstCombine/gepphigep.ll
index 7cd22b9e34c..7da11868de7 100644
--- a/llvm/test/Transforms/InstCombine/gepphigep.ll
+++ b/llvm/test/Transforms/InstCombine/gepphigep.ll
@@ -8,7 +8,7 @@
define i32 @test1(%struct1* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
bb:
%tmp = getelementptr inbounds %struct1, %struct1* %dm, i64 0, i32 0
- %tmp1 = load %struct2** %tmp, align 8
+ %tmp1 = load %struct2*, %struct2** %tmp, align 8
br i1 %tmp4, label %bb1, label %bb2
bb1:
@@ -26,7 +26,7 @@ bb2:
bb3:
%phi = phi %struct2* [ %tmp10, %bb1 ], [ %tmp20, %bb2 ]
%tmp24 = getelementptr inbounds %struct2, %struct2* %phi, i64 0, i32 1
- %tmp25 = load i32* %tmp24, align 4
+ %tmp25 = load i32, i32* %tmp24, align 4
ret i32 %tmp25
; CHECK-LABEL: @test1(
@@ -40,7 +40,7 @@ bb3:
define i32 @test2(%struct1* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
bb:
%tmp = getelementptr inbounds %struct1, %struct1* %dm, i64 0, i32 0
- %tmp1 = load %struct2** %tmp, align 8
+ %tmp1 = load %struct2*, %struct2** %tmp, align 8
%tmp10 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp9
%tmp11 = getelementptr inbounds %struct2, %struct2* %tmp10, i64 0, i32 0
store i32 0, i32* %tmp11, align 4
@@ -48,7 +48,7 @@ bb:
%tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
store i32 0, i32* %tmp21, align 4
%tmp24 = getelementptr inbounds %struct2, %struct2* %tmp10, i64 0, i32 1
- %tmp25 = load i32* %tmp24, align 4
+ %tmp25 = load i32, i32* %tmp24, align 4
ret i32 %tmp25
; CHECK-LABEL: @test2(
@@ -87,7 +87,7 @@ bb5:
%tmp27 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) catch i8* bitcast (i8** @_ZTIi to i8*)
%tmp34 = getelementptr inbounds %struct4, %struct4* %phi, i64 %tmp21, i32 1
%tmp35 = getelementptr inbounds %struct2, %struct2* %tmp34, i64 0, i32 1
- %tmp25 = load i32* %tmp35, align 4
+ %tmp25 = load i32, i32* %tmp35, align 4
ret i32 %tmp25
; CHECK-LABEL: @test3(
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 2cc703786df..ebc3ffca292 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -155,7 +155,7 @@ entry:
%new_a = bitcast %struct.B* %g4 to %struct.A*
%g5 = getelementptr %struct.A, %struct.A* %new_a, i32 0, i32 1
- %a_a = load i32* %g5, align 4
+ %a_a = load i32, i32* %g5, align 4
ret i32 %a_a
; CHECK-LABEL: @test12(
; CHECK: getelementptr %struct.A, %struct.A* %a, i64 0, i32 1
@@ -363,7 +363,7 @@ define i32 @test21() {
%pbob1 = alloca %intstruct
%pbob2 = getelementptr %intstruct, %intstruct* %pbob1
%pbobel = getelementptr %intstruct, %intstruct* %pbob2, i64 0, i32 0
- %rval = load i32* %pbobel
+ %rval = load i32, i32* %pbobel
ret i32 %rval
; CHECK-LABEL: @test21(
; CHECK: getelementptr %intstruct, %intstruct* %pbob1, i64 0, i32 0
@@ -395,8 +395,8 @@ define i1 @test23() {
define void @test25() {
entry:
%tmp = getelementptr { i64, i64, i64, i64 }, { i64, i64, i64, i64 }* null, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp.upgrd.1 = load i64* %tmp ; <i64> [#uses=1]
- %tmp8.ui = load i64* null ; <i64> [#uses=1]
+ %tmp.upgrd.1 = load i64, i64* %tmp ; <i64> [#uses=1]
+ %tmp8.ui = load i64, i64* null ; <i64> [#uses=1]
%tmp8 = bitcast i64 %tmp8.ui to i64 ; <i64> [#uses=1]
%tmp9 = and i64 %tmp8, %tmp.upgrd.1 ; <i64> [#uses=1]
%sext = trunc i64 %tmp9 to i32 ; <i32> [#uses=1]
@@ -427,14 +427,14 @@ define i1 @test26(i8* %arr) {
define i32 @test27(%struct.compat_siginfo* %to, %struct.siginfo_t* %from) {
entry:
%from_addr = alloca %struct.siginfo_t*
- %tmp344 = load %struct.siginfo_t** %from_addr, align 8
+ %tmp344 = load %struct.siginfo_t*, %struct.siginfo_t** %from_addr, align 8
%tmp345 = getelementptr %struct.siginfo_t, %struct.siginfo_t* %tmp344, i32 0, i32 3
%tmp346 = getelementptr { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] }, { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] }* %tmp345, i32 0, i32 0
%tmp346347 = bitcast { i32, i32, [0 x i8], %struct.sigval_t, i32 }* %tmp346 to { i32, i32, %struct.sigval_t }*
%tmp348 = getelementptr { i32, i32, %struct.sigval_t }, { i32, i32, %struct.sigval_t }* %tmp346347, i32 0, i32 2
%tmp349 = getelementptr %struct.sigval_t, %struct.sigval_t* %tmp348, i32 0, i32 0
%tmp349350 = bitcast i8** %tmp349 to i32*
- %tmp351 = load i32* %tmp349350, align 8
+ %tmp351 = load i32, i32* %tmp349350, align 8
%tmp360 = call i32 asm sideeffect "...",
"=r,ir,*m,i,0,~{dirflag},~{fpsr},~{flags}"( i32 %tmp351,
%struct.__large_struct* null, i32 -14, i32 0 )
@@ -482,9 +482,9 @@ declare i32 @printf(i8*, ...)
%T = type <{ i64, i64, i64 }>
define i32 @test29(i8* %start, i32 %X) nounwind {
entry:
- %tmp3 = load i64* null
+ %tmp3 = load i64, i64* null
%add.ptr = getelementptr i8, i8* %start, i64 %tmp3
- %tmp158 = load i32* null
+ %tmp158 = load i32, i32* null
%add.ptr159 = getelementptr %T, %T* null, i32 %tmp158
%add.ptr209 = getelementptr i8, i8* %start, i64 0
%add.ptr212 = getelementptr i8, i8* %add.ptr209, i32 %X
@@ -507,7 +507,7 @@ entry:
%1 = bitcast i32* %0 to [0 x i32]*
call void @test30f(i32* %0) nounwind
%2 = getelementptr [0 x i32], [0 x i32]* %1, i32 0, i32 %m
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
ret i32 %3
; CHECK-LABEL: @test30(
; CHECK: getelementptr i32
@@ -537,7 +537,7 @@ define i8* @test32(i8* %v) {
%E = bitcast i8* %D to i8**
store i8* %v, i8** %E
%F = getelementptr [4 x i8*], [4 x i8*]* %A, i32 0, i32 2
- %G = load i8** %F
+ %G = load i8*, i8** %F
ret i8* %G
; CHECK-LABEL: @test32(
; CHECK: %D = getelementptr [4 x i8*], [4 x i8*]* %A, i64 0, i64 1
@@ -599,7 +599,7 @@ entry:
%B = getelementptr %T2, %T2* %A, i64 0, i32 0
store i64 %V, i64* %mrv_gep
- %C = load i8** %B, align 8
+ %C = load i8*, i8** %B, align 8
ret i8* %C
; CHECK-LABEL: @test34(
; CHECK: %[[C:.*]] = inttoptr i64 %V to i8*
@@ -695,7 +695,7 @@ declare void @three_gep_h(%three_gep_t2*)
define void @test39(%struct.ham* %arg, i8 %arg1) nounwind {
%tmp = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 2
- %tmp2 = load %struct.zot** %tmp, align 8
+ %tmp2 = load %struct.zot*, %struct.zot** %tmp, align 8
%tmp3 = bitcast %struct.zot* %tmp2 to i8*
%tmp4 = getelementptr inbounds i8, i8* %tmp3, i64 -8
store i8 %arg1, i8* %tmp4, align 8
@@ -723,7 +723,7 @@ define i8 @test_gep_bitcast_as1(i32 addrspace(1)* %arr, i16 %N) {
%cast = bitcast i32 addrspace(1)* %arr to i8 addrspace(1)*
%V = mul i16 %N, 4
%t = getelementptr i8, i8 addrspace(1)* %cast, i16 %V
- %x = load i8 addrspace(1)* %t
+ %x = load i8, i8 addrspace(1)* %t
ret i8 %x
}
@@ -735,7 +735,7 @@ define i64 @test_gep_bitcast_array_same_size_element([100 x double]* %arr, i64 %
%cast = bitcast [100 x double]* %arr to i64*
%V = mul i64 %N, 8
%t = getelementptr i64, i64* %cast, i64 %V
- %x = load i64* %t
+ %x = load i64, i64* %t
ret i64 %x
}
@@ -745,11 +745,11 @@ define i64 @test_gep_bitcast_array_same_size_element_addrspacecast([100 x double
; CHECK: getelementptr [100 x double], [100 x double]* %arr, i64 0, i64 %V
; CHECK-NEXT: bitcast double*
; CHECK-NEXT: %t = addrspacecast i64*
-; CHECK: load i64 addrspace(3)* %t
+; CHECK: load i64, i64 addrspace(3)* %t
%cast = addrspacecast [100 x double]* %arr to i64 addrspace(3)*
%V = mul i64 %N, 8
%t = getelementptr i64, i64 addrspace(3)* %cast, i64 %V
- %x = load i64 addrspace(3)* %t
+ %x = load i64, i64 addrspace(3)* %t
ret i64 %x
}
@@ -761,7 +761,7 @@ define i8 @test_gep_bitcast_array_different_size_element([100 x double]* %arr, i
%cast = bitcast [100 x double]* %arr to i8*
%V = mul i64 %N, 8
%t = getelementptr i8, i8* %cast, i64 %V
- %x = load i8* %t
+ %x = load i8, i8* %t
ret i8 %x
}
@@ -772,7 +772,7 @@ define i64 @test_gep_bitcast_array_same_size_element_as1([100 x double] addrspac
%cast = bitcast [100 x double] addrspace(1)* %arr to i64 addrspace(1)*
%V = mul i16 %N, 8
%t = getelementptr i64, i64 addrspace(1)* %cast, i16 %V
- %x = load i64 addrspace(1)* %t
+ %x = load i64, i64 addrspace(1)* %t
ret i64 %x
}
@@ -783,7 +783,7 @@ define i8 @test_gep_bitcast_array_different_size_element_as1([100 x double] addr
%cast = bitcast [100 x double] addrspace(1)* %arr to i8 addrspace(1)*
%V = mul i16 %N, 8
%t = getelementptr i8, i8 addrspace(1)* %cast, i16 %V
- %x = load i8 addrspace(1)* %t
+ %x = load i8, i8 addrspace(1)* %t
ret i8 %x
}
diff --git a/llvm/test/Transforms/InstCombine/icmp-range.ll b/llvm/test/Transforms/InstCombine/icmp-range.ll
index 0911ab03c60..041adf76b5e 100644
--- a/llvm/test/Transforms/InstCombine/icmp-range.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-range.ll
@@ -6,14 +6,14 @@
define i1 @test_nonzero(i32* nocapture readonly %arg) {
; CHECK-LABEL:test_nonzero
; CHECK: ret i1 true
- %val = load i32* %arg, !range !0
+ %val = load i32, i32* %arg, !range !0
%rval = icmp ne i32 %val, 0
ret i1 %rval
}
define i1 @test_nonzero2(i32* nocapture readonly %arg) {
; CHECK-LABEL:test_nonzero2
; CHECK: ret i1 false
- %val = load i32* %arg, !range !0
+ %val = load i32, i32* %arg, !range !0
%rval = icmp eq i32 %val, 0
ret i1 %rval
}
@@ -23,7 +23,7 @@ define i1 @test_nonzero3(i32* nocapture readonly %arg) {
; CHECK-LABEL: test_nonzero3
; Check that this does not trigger - it wouldn't be legal
; CHECK: icmp
- %val = load i32* %arg, !range !1
+ %val = load i32, i32* %arg, !range !1
%rval = icmp ne i32 %val, 0
ret i1 %rval
}
@@ -32,7 +32,7 @@ define i1 @test_nonzero3(i32* nocapture readonly %arg) {
define i1 @test_nonzero4(i8* nocapture readonly %arg) {
; CHECK-LABEL: test_nonzero4
; CHECK: ret i1 false
- %val = load i8* %arg, !range !2
+ %val = load i8, i8* %arg, !range !2
%rval = icmp ne i8 %val, 0
ret i1 %rval
}
@@ -40,7 +40,7 @@ define i1 @test_nonzero4(i8* nocapture readonly %arg) {
define i1 @test_nonzero5(i8* nocapture readonly %arg) {
; CHECK-LABEL: test_nonzero5
; CHECK: ret i1 false
- %val = load i8* %arg, !range !2
+ %val = load i8, i8* %arg, !range !2
%rval = icmp ugt i8 %val, 0
ret i1 %rval
}
@@ -49,7 +49,7 @@ define i1 @test_nonzero5(i8* nocapture readonly %arg) {
define i1 @test_nonzero6(i8* %argw) {
; CHECK-LABEL: test_nonzero6
; CHECK: icmp ne i8 %val, 0
- %val = load i8* %argw, !range !3
+ %val = load i8, i8* %argw, !range !3
%rval = icmp sgt i8 %val, 0
ret i1 %rval
}
diff --git a/llvm/test/Transforms/InstCombine/invariant.ll b/llvm/test/Transforms/InstCombine/invariant.ll
index 38323802269..246f9e64041 100644
--- a/llvm/test/Transforms/InstCombine/invariant.ll
+++ b/llvm/test/Transforms/InstCombine/invariant.ll
@@ -11,6 +11,6 @@ define i8 @f() {
%i = call {}* @llvm.invariant.start(i64 1, i8* %a) ; <{}*> [#uses=0]
; CHECK: call {}* @llvm.invariant.start
call void @g(i8* %a)
- %r = load i8* %a ; <i8> [#uses=1]
+ %r = load i8, i8* %a ; <i8> [#uses=1]
ret i8 %r
}
diff --git a/llvm/test/Transforms/InstCombine/known_align.ll b/llvm/test/Transforms/InstCombine/known_align.ll
index 0249951966d..670011fb60b 100644
--- a/llvm/test/Transforms/InstCombine/known_align.ll
+++ b/llvm/test/Transforms/InstCombine/known_align.ll
@@ -11,17 +11,17 @@ entry:
%tmp = alloca i32, align 4 ; <i32*> [#uses=2]
%tmp1 = alloca i32, align 4 ; <i32*> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp3 = load i32* getelementptr (%struct.p* @t, i32 0, i32 1), align 1 ; <i32> [#uses=1]
+ %tmp3 = load i32, i32* getelementptr (%struct.p* @t, i32 0, i32 1), align 1 ; <i32> [#uses=1]
store i32 %tmp3, i32* %tmp1, align 4
- %tmp5 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
store i32 %tmp5, i32* getelementptr (%struct.p* @u, i32 0, i32 1), align 1
- %tmp6 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp6 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
store i32 %tmp6, i32* %tmp, align 4
- %tmp7 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
store i32 %tmp7, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval8 = load i32* %retval ; <i32> [#uses=1]
+ %retval8 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval8
}
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index 8b4c63a4b44..7f4e2806c80 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -22,7 +22,7 @@
define i1 @test1(i32 %X) {
%P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16* %P
+ %Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
; NODL-LABEL: @test1(
@@ -36,7 +36,7 @@ define i1 @test1(i32 %X) {
define i1 @test1_noinbounds(i32 %X) {
%P = getelementptr [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16* %P
+ %Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
; NODL-LABEL: @test1_noinbounds(
@@ -49,7 +49,7 @@ define i1 @test1_noinbounds(i32 %X) {
define i1 @test1_noinbounds_i64(i64 %X) {
%P = getelementptr [10 x i16], [10 x i16]* @G16, i64 0, i64 %X
- %Q = load i16* %P
+ %Q = load i16, i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
; NODL-LABEL: @test1_noinbounds_i64(
@@ -62,7 +62,7 @@ define i1 @test1_noinbounds_i64(i64 %X) {
define i1 @test1_noinbounds_as1(i32 %x) {
%p = getelementptr [10 x i16], [10 x i16] addrspace(1)* @G16_as1, i16 0, i32 %x
- %q = load i16 addrspace(1)* %p
+ %q = load i16, i16 addrspace(1)* %p
%r = icmp eq i16 %q, 0
ret i1 %r
@@ -74,7 +74,7 @@ define i1 @test1_noinbounds_as1(i32 %x) {
define i1 @test2(i32 %X) {
%P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16* %P
+ %Q = load i16, i16* %P
%R = icmp slt i16 %Q, 85
ret i1 %R
; NODL-LABEL: @test2(
@@ -84,7 +84,7 @@ define i1 @test2(i32 %X) {
define i1 @test3(i32 %X) {
%P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
- %Q = load double* %P
+ %Q = load double, double* %P
%R = fcmp oeq double %Q, 1.0
ret i1 %R
; NODL-LABEL: @test3(
@@ -99,7 +99,7 @@ define i1 @test3(i32 %X) {
define i1 @test4(i32 %X) {
%P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16* %P
+ %Q = load i16, i16* %P
%R = icmp sle i16 %Q, 73
ret i1 %R
; NODL-LABEL: @test4(
@@ -117,7 +117,7 @@ define i1 @test4(i32 %X) {
define i1 @test4_i16(i16 %X) {
%P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i16 %X
- %Q = load i16* %P
+ %Q = load i16, i16* %P
%R = icmp sle i16 %Q, 73
ret i1 %R
@@ -137,7 +137,7 @@ define i1 @test4_i16(i16 %X) {
define i1 @test5(i32 %X) {
%P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16* %P
+ %Q = load i16, i16* %P
%R = icmp eq i16 %Q, 69
ret i1 %R
; NODL-LABEL: @test5(
@@ -155,7 +155,7 @@ define i1 @test5(i32 %X) {
define i1 @test6(i32 %X) {
%P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
- %Q = load double* %P
+ %Q = load double, double* %P
%R = fcmp ogt double %Q, 0.0
ret i1 %R
; NODL-LABEL: @test6(
@@ -171,7 +171,7 @@ define i1 @test6(i32 %X) {
define i1 @test7(i32 %X) {
%P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
- %Q = load double* %P
+ %Q = load double, double* %P
%R = fcmp olt double %Q, 0.0
ret i1 %R
; NODL-LABEL: @test7(
@@ -187,7 +187,7 @@ define i1 @test7(i32 %X) {
define i1 @test8(i32 %X) {
%P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
- %Q = load i16* %P
+ %Q = load i16, i16* %P
%R = and i16 %Q, 3
%S = icmp eq i16 %R, 0
ret i1 %S
@@ -211,7 +211,7 @@ define i1 @test8(i32 %X) {
define i1 @test9(i32 %X) {
%P = getelementptr inbounds [4 x { i32, i32 } ], [4 x { i32, i32 } ]* @GA, i32 0, i32 %X, i32 1
- %Q = load i32* %P
+ %Q = load i32, i32* %P
%R = icmp eq i32 %Q, 1
ret i1 %R
; NODL-LABEL: @test9(
@@ -232,7 +232,7 @@ define i1 @test10_struct(i32 %x) {
; P32-LABEL: @test10_struct(
; P32: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i32 %x, i32 0
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
@@ -244,7 +244,7 @@ define i1 @test10_struct_noinbounds(i32 %x) {
; P32-LABEL: @test10_struct_noinbounds(
; P32: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
%p = getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
@@ -258,7 +258,7 @@ define i1 @test10_struct_i16(i16 %x){
; P32-LABEL: @test10_struct_i16(
; P32: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i16 %x, i32 0
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 0
ret i1 %r
}
@@ -272,7 +272,7 @@ define i1 @test10_struct_i64(i64 %x){
; P32-LABEL: @test10_struct_i64(
; P32: ret i1 false
%p = getelementptr inbounds %Foo, %Foo* @GS, i64 %x, i32 0
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 0
ret i1 %r
}
@@ -285,7 +285,7 @@ define i1 @test10_struct_noinbounds_i16(i16 %x) {
; P32: %1 = sext i16 %x to i32
; P32: getelementptr %Foo, %Foo* @GS, i32 %1, i32 0
%p = getelementptr %Foo, %Foo* @GS, i16 %x, i32 0
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 0
ret i1 %r
}
@@ -299,7 +299,7 @@ define i1 @test10_struct_arr(i32 %x) {
; P32-NEXT: %r = icmp ne i32 %x, 1
; P32-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
@@ -311,7 +311,7 @@ define i1 @test10_struct_arr_noinbounds(i32 %x) {
; P32-LABEL: @test10_struct_arr_noinbounds(
; P32-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
@@ -325,7 +325,7 @@ define i1 @test10_struct_arr_i16(i16 %x) {
; P32-NEXT: %r = icmp ne i16 %x, 1
; P32-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i16 0, i16 %x, i32 2
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
@@ -340,7 +340,7 @@ define i1 @test10_struct_arr_i64(i64 %x) {
; P32-NEXT: %r = icmp ne i32 %1, 1
; P32-NEXT: ret i1 %r
%p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i64 0, i64 %x, i32 2
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
@@ -352,7 +352,7 @@ define i1 @test10_struct_arr_noinbounds_i16(i16 %x) {
; P32-LABEL: @test10_struct_arr_noinbounds_i16(
; P32-NEXT: %r = icmp ne i16 %x, 1
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
@@ -366,7 +366,7 @@ define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
; P32: %r = icmp ne i32 %1, 1
; P32-NEXT: ret i1 %r
%p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
- %q = load i32* %p
+ %q = load i32, i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
}
diff --git a/llvm/test/Transforms/InstCombine/load-select.ll b/llvm/test/Transforms/InstCombine/load-select.ll
index e8cbad335de..e9dfaa164fb 100644
--- a/llvm/test/Transforms/InstCombine/load-select.ll
+++ b/llvm/test/Transforms/InstCombine/load-select.ll
@@ -11,6 +11,6 @@ define i32 @b(i32 %y) nounwind readonly {
entry:
%0 = icmp eq i32 %y, 0 ; <i1> [#uses=1]
%storemerge = select i1 %0, i32* getelementptr inbounds ([2 x i32]* @a, i32 0, i32 1), i32* getelementptr inbounds ([2 x i32]* @a, i32 0, i32 0) ; <i32*> [#uses=1]
- %1 = load i32* %storemerge, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %storemerge, align 4 ; <i32> [#uses=1]
ret i32 %1
}
diff --git a/llvm/test/Transforms/InstCombine/load.ll b/llvm/test/Transforms/InstCombine/load.ll
index 3f8ed561e6f..2cc55e99fd2 100644
--- a/llvm/test/Transforms/InstCombine/load.ll
+++ b/llvm/test/Transforms/InstCombine/load.ll
@@ -16,7 +16,7 @@ target datalayout = "e-m:e-p:64:64:64-i64:64-f80:128-n8:16:32:64-S128"
; CHECK-LABEL: @test1(
; CHECK-NOT: load
define i32 @test1() {
- %B = load i32* @X ; <i32> [#uses=1]
+ %B = load i32, i32* @X ; <i32> [#uses=1]
ret i32 %B
}
@@ -24,7 +24,7 @@ define i32 @test1() {
; CHECK-NOT: load
define float @test2() {
%A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
- %B = load float* %A ; <float> [#uses=1]
+ %B = load float, float* %A ; <float> [#uses=1]
ret float %B
}
@@ -32,7 +32,7 @@ define float @test2() {
; CHECK-NOT: load
define i32 @test3() {
%A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
- %B = load i32* %A ; <i32> [#uses=1]
+ %B = load i32, i32* %A ; <i32> [#uses=1]
ret i32 %B
}
@@ -40,7 +40,7 @@ define i32 @test3() {
; CHECK-NOT: load
define i32 @test4() {
%A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Z, i64 0, i64 1, i32 0 ; <i32*> [#uses=1]
- %B = load i32* %A ; <i32> [#uses=1]
+ %B = load i32, i32* %A ; <i32> [#uses=1]
ret i32 %B
}
@@ -48,7 +48,7 @@ define i32 @test4() {
; CHECK-NOT: load
define i32 @test5(i1 %C) {
%Y = select i1 %C, i32* @X, i32* @X2 ; <i32*> [#uses=1]
- %Z = load i32* %Y ; <i32> [#uses=1]
+ %Z = load i32, i32* %Y ; <i32> [#uses=1]
ret i32 %Z
}
@@ -56,7 +56,7 @@ define i32 @test5(i1 %C) {
; CHECK-NOT: load
define i32 @test7(i32 %X) {
%V = getelementptr i32, i32* null, i32 %X ; <i32*> [#uses=1]
- %R = load i32* %V ; <i32> [#uses=1]
+ %R = load i32, i32* %V ; <i32> [#uses=1]
ret i32 %R
}
@@ -64,15 +64,15 @@ define i32 @test7(i32 %X) {
; CHECK-NOT: load
define i32 @test8(i32* %P) {
store i32 1, i32* %P
- %X = load i32* %P ; <i32> [#uses=1]
+ %X = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %X
}
; CHECK-LABEL: @test9(
; CHECK-NOT: load
define i32 @test9(i32* %P) {
- %X = load i32* %P ; <i32> [#uses=1]
- %Y = load i32* %P ; <i32> [#uses=1]
+ %X = load i32, i32* %P ; <i32> [#uses=1]
+ %Y = load i32, i32* %P ; <i32> [#uses=1]
%Z = sub i32 %X, %Y ; <i32> [#uses=1]
ret i32 %Z
}
@@ -89,7 +89,7 @@ F: ; preds = %0
store i32 0, i32* %P
br label %C
C: ; preds = %F, %T
- %V = load i32* %P ; <i32> [#uses=1]
+ %V = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %V
}
@@ -99,7 +99,7 @@ define double @test11(double* %p) {
%t0 = getelementptr double, double* %p, i32 1
store double 2.0, double* %t0
%t1 = getelementptr double, double* %p, i32 1
- %x = load double* %t1
+ %x = load double, double* %t1
ret double %x
}
@@ -110,14 +110,14 @@ define i32 @test12(i32* %P) {
store i32 123, i32* %A
; Cast the result of the load not the source
%Q = bitcast i32* %A to i32*
- %V = load i32* %Q
+ %V = load i32, i32* %Q
ret i32 %V
}
; CHECK-LABEL: @test13(
; CHECK-NOT: load
define <16 x i8> @test13(<2 x i64> %x) {
- %tmp = load <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*)
+ %tmp = load <16 x i8>, <16 x i8>* bitcast ([4 x i32]* @GLOBAL to <16 x i8>*)
ret <16 x i8> %tmp
}
@@ -128,13 +128,13 @@ define i8 @test14(i8 %x, i32 %y) {
; those confuse the analysis into thinking that the second store does not alias
; the first.
; CHECK-LABEL: @test14(
-; CHECK: %[[R:.*]] = load i8*
+; CHECK: %[[R:.*]] = load i8, i8*
; CHECK-NEXT: ret i8 %[[R]]
%a = alloca i32
%a.i8 = bitcast i32* %a to i8*
store i8 %x, i8* %a.i8
store i32 %y, i32* %a
- %r = load i8* %a.i8
+ %r = load i8, i8* %a.i8
ret i8 %r
}
@@ -143,12 +143,12 @@ define i8 @test14(i8 %x, i32 %y) {
define i8 @test15(i8 %x, i32 %y) {
; Same test as @test14 essentially, but using a global instead of an alloca.
; CHECK-LABEL: @test15(
-; CHECK: %[[R:.*]] = load i8*
+; CHECK: %[[R:.*]] = load i8, i8*
; CHECK-NEXT: ret i8 %[[R]]
%g.i8 = bitcast i32* @test15_global to i8*
store i8 %x, i8* %g.i8
store i32 %y, i32* @test15_global
- %r = load i8* %g.i8
+ %r = load i8, i8* %g.i8
ret i8 %r
}
@@ -156,12 +156,12 @@ define void @test16(i8* %x, i8* %a, i8* %b, i8* %c) {
; Check that we canonicalize loads which are only stored to use integer types
; when there is a valid integer type.
; CHECK-LABEL: @test16(
-; CHECK: %[[L1:.*]] = load i32*
+; CHECK: %[[L1:.*]] = load i32, i32*
; CHECK-NOT: load
; CHECK: store i32 %[[L1]], i32*
; CHECK: store i32 %[[L1]], i32*
; CHECK-NOT: store
-; CHECK: %[[L1:.*]] = load i32*
+; CHECK: %[[L1:.*]] = load i32, i32*
; CHECK-NOT: load
; CHECK: store i32 %[[L1]], i32*
; CHECK: store i32 %[[L1]], i32*
@@ -174,11 +174,11 @@ entry:
%b.cast = bitcast i8* %b to float*
%c.cast = bitcast i8* %c to i32*
- %x1 = load float* %x.cast
+ %x1 = load float, float* %x.cast
store float %x1, float* %a.cast
store float %x1, float* %b.cast
- %x2 = load float* %x.cast
+ %x2 = load float, float* %x.cast
store float %x2, float* %b.cast
%x2.cast = bitcast float %x2 to i32
store i32 %x2.cast, i32* %c.cast
@@ -192,11 +192,11 @@ define void @test17(i8** %x, i8 %y) {
; than the value.
;
; CHECK-LABEL: @test17(
-; CHECK: %[[L:.*]] = load i8**
+; CHECK: %[[L:.*]] = load i8*, i8**
; CHECK: store i8 %y, i8* %[[L]]
entry:
- %x.load = load i8** %x
+ %x.load = load i8*, i8** %x
store i8 %y, i8* %x.load
ret void
diff --git a/llvm/test/Transforms/InstCombine/load3.ll b/llvm/test/Transforms/InstCombine/load3.ll
index bcec75f5529..00f4eafedb9 100644
--- a/llvm/test/Transforms/InstCombine/load3.ll
+++ b/llvm/test/Transforms/InstCombine/load3.ll
@@ -6,9 +6,9 @@ target triple = "i386-apple-macosx10.0.0"
define i32 @test1(i32* %p) {
%t0 = getelementptr i32, i32* %p, i32 1
- %y = load i32* %t0
+ %y = load i32, i32* %t0
%t1 = getelementptr i32, i32* %p, i32 1
- %x = load i32* %t1
+ %x = load i32, i32* %t1
%a = sub i32 %y, %x
ret i32 %a
; CHECK-LABEL: @test1(
@@ -19,7 +19,7 @@ define i32 @test1(i32* %p) {
; PR7429
@.str = private constant [4 x i8] c"XYZ\00"
define float @test2() {
- %tmp = load float* bitcast ([4 x i8]* @.str to float*), align 1
+ %tmp = load float, float* bitcast ([4 x i8]* @.str to float*), align 1
ret float %tmp
; CHECK-LABEL: @test2(
@@ -37,7 +37,7 @@ align 4
; PR14986
define void @test3() nounwind {
; This is a weird way of computing zero.
- %l = load i32* getelementptr ([36 x i32]* @expect32, i32 29826161, i32 28), align 4
+ %l = load i32, i32* getelementptr ([36 x i32]* @expect32, i32 29826161, i32 28), align 4
store i32 %l, i32* getelementptr ([36 x i32]* @rslts32, i32 29826161, i32 28), align 4
ret void
diff --git a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
index 66ee4d21651..4afa82dcb4c 100644
--- a/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
+++ b/llvm/test/Transforms/InstCombine/loadstore-alignment.ll
@@ -10,7 +10,7 @@ target datalayout = "E-p:64:64:64-p1:64:64:64-p2:32:32:32-a0:0:8-f32:32:32-f64:6
; CHECK: , align 16
define <2 x i64> @static_hem() {
%t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
- %tmp1 = load <2 x i64>* %t, align 1
+ %tmp1 = load <2 x i64>, <2 x i64>* %t, align 1
ret <2 x i64> %tmp1
}
@@ -19,7 +19,7 @@ define <2 x i64> @static_hem() {
define <2 x i64> @static_hem_addrspacecast() {
%t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
%t.asc = addrspacecast <2 x i64>* %t to <2 x i64> addrspace(1)*
- %tmp1 = load <2 x i64> addrspace(1)* %t.asc, align 1
+ %tmp1 = load <2 x i64>, <2 x i64> addrspace(1)* %t.asc, align 1
ret <2 x i64> %tmp1
}
@@ -28,7 +28,7 @@ define <2 x i64> @static_hem_addrspacecast() {
define <2 x i64> @static_hem_addrspacecast_smaller_ptr() {
%t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
%t.asc = addrspacecast <2 x i64>* %t to <2 x i64> addrspace(2)*
- %tmp1 = load <2 x i64> addrspace(2)* %t.asc, align 1
+ %tmp1 = load <2 x i64>, <2 x i64> addrspace(2)* %t.asc, align 1
ret <2 x i64> %tmp1
}
@@ -37,7 +37,7 @@ define <2 x i64> @static_hem_addrspacecast_smaller_ptr() {
define <2 x i64> @static_hem_addrspacecast_larger_ptr() {
%t = getelementptr <2 x i64>, <2 x i64> addrspace(2)* @x.as2, i32 7
%t.asc = addrspacecast <2 x i64> addrspace(2)* %t to <2 x i64> addrspace(1)*
- %tmp1 = load <2 x i64> addrspace(1)* %t.asc, align 1
+ %tmp1 = load <2 x i64>, <2 x i64> addrspace(1)* %t.asc, align 1
ret <2 x i64> %tmp1
}
@@ -45,7 +45,7 @@ define <2 x i64> @static_hem_addrspacecast_larger_ptr() {
; CHECK: , align 16
define <2 x i64> @hem(i32 %i) {
%t = getelementptr <2 x i64>, <2 x i64>* @x, i32 %i
- %tmp1 = load <2 x i64>* %t, align 1
+ %tmp1 = load <2 x i64>, <2 x i64>* %t, align 1
ret <2 x i64> %tmp1
}
@@ -53,14 +53,14 @@ define <2 x i64> @hem(i32 %i) {
; CHECK: , align 16
define <2 x i64> @hem_2d(i32 %i, i32 %j) {
%t = getelementptr [13 x <2 x i64>], [13 x <2 x i64>]* @xx, i32 %i, i32 %j
- %tmp1 = load <2 x i64>* %t, align 1
+ %tmp1 = load <2 x i64>, <2 x i64>* %t, align 1
ret <2 x i64> %tmp1
}
; CHECK-LABEL: @foo(
; CHECK: , align 16
define <2 x i64> @foo() {
- %tmp1 = load <2 x i64>* @x, align 1
+ %tmp1 = load <2 x i64>, <2 x i64>* @x, align 1
ret <2 x i64> %tmp1
}
@@ -70,7 +70,7 @@ define <2 x i64> @foo() {
define <2 x i64> @bar() {
%t = alloca <2 x i64>
call void @kip(<2 x i64>* %t)
- %tmp1 = load <2 x i64>* %t, align 1
+ %tmp1 = load <2 x i64>, <2 x i64>* %t, align 1
ret <2 x i64> %tmp1
}
diff --git a/llvm/test/Transforms/InstCombine/loadstore-metadata.ll b/llvm/test/Transforms/InstCombine/loadstore-metadata.ll
index d0ec7853a43..a30c0bc852e 100644
--- a/llvm/test/Transforms/InstCombine/loadstore-metadata.ll
+++ b/llvm/test/Transforms/InstCombine/loadstore-metadata.ll
@@ -5,9 +5,9 @@ target datalayout = "e-m:e-p:64:64:64-i64:64-f80:128-n8:16:32:64-S128"
define i32 @test_load_cast_combine_tbaa(float* %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves TBAA.
; CHECK-LABEL: @test_load_cast_combine_tbaa(
-; CHECK: load i32* %{{.*}}, !tbaa !0
+; CHECK: load i32, i32* %{{.*}}, !tbaa !0
entry:
- %l = load float* %ptr, !tbaa !0
+ %l = load float, float* %ptr, !tbaa !0
%c = bitcast float %l to i32
ret i32 %c
}
@@ -15,9 +15,9 @@ entry:
define i32 @test_load_cast_combine_noalias(float* %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves no-alias metadata.
; CHECK-LABEL: @test_load_cast_combine_noalias(
-; CHECK: load i32* %{{.*}}, !alias.scope !2, !noalias !1
+; CHECK: load i32, i32* %{{.*}}, !alias.scope !2, !noalias !1
entry:
- %l = load float* %ptr, !alias.scope !2, !noalias !1
+ %l = load float, float* %ptr, !alias.scope !2, !noalias !1
%c = bitcast float %l to i32
ret i32 %c
}
@@ -27,11 +27,11 @@ define float @test_load_cast_combine_range(i32* %ptr) {
; would be nice to preserve or update it somehow but this is hard when moving
; between types.
; CHECK-LABEL: @test_load_cast_combine_range(
-; CHECK: load float* %{{.*}}
+; CHECK: load float, float* %{{.*}}
; CHECK-NOT: !range
; CHECK: ret float
entry:
- %l = load i32* %ptr, !range !5
+ %l = load i32, i32* %ptr, !range !5
%c = bitcast i32 %l to float
ret float %c
}
@@ -39,9 +39,9 @@ entry:
define i32 @test_load_cast_combine_invariant(float* %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves invariant metadata.
; CHECK-LABEL: @test_load_cast_combine_invariant(
-; CHECK: load i32* %{{.*}}, !invariant.load !3
+; CHECK: load i32, i32* %{{.*}}, !invariant.load !3
entry:
- %l = load float* %ptr, !invariant.load !3
+ %l = load float, float* %ptr, !invariant.load !3
%c = bitcast float %l to i32
ret i32 %c
}
@@ -50,9 +50,9 @@ define i32 @test_load_cast_combine_nontemporal(float* %ptr) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves nontemporal
; metadata.
; CHECK-LABEL: @test_load_cast_combine_nontemporal(
-; CHECK: load i32* %{{.*}}, !nontemporal !4
+; CHECK: load i32, i32* %{{.*}}, !nontemporal !4
entry:
- %l = load float* %ptr, !nontemporal !4
+ %l = load float, float* %ptr, !nontemporal !4
%c = bitcast float %l to i32
ret i32 %c
}
@@ -61,7 +61,7 @@ define void @test_load_cast_combine_loop(float* %src, i32* %dst, i32 %n) {
; Ensure (cast (load (...))) -> (load (cast (...))) preserves loop access
; metadata.
; CHECK-LABEL: @test_load_cast_combine_loop(
-; CHECK: load i32* %{{.*}}, !llvm.mem.parallel_loop_access !1
+; CHECK: load i32, i32* %{{.*}}, !llvm.mem.parallel_loop_access !1
entry:
br label %loop
@@ -69,7 +69,7 @@ loop:
%i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
%src.gep = getelementptr inbounds float, float* %src, i32 %i
%dst.gep = getelementptr inbounds i32, i32* %dst, i32 %i
- %l = load float* %src.gep, !llvm.mem.parallel_loop_access !1
+ %l = load float, float* %src.gep, !llvm.mem.parallel_loop_access !1
%c = bitcast float %l to i32
store i32 %c, i32* %dst.gep
%i.next = add i32 %i, 1
@@ -93,11 +93,11 @@ define void @test_load_cast_combine_nonnull(float** %ptr) {
; file, and no LABEL lines are to be added after this point.
;
; CHECK-LABEL: @test_load_cast_combine_nonnull(
-; CHECK: %[[V:.*]] = load i64* %{{.*}}, !range ![[MD:[0-9]+]]
+; CHECK: %[[V:.*]] = load i64, i64* %{{.*}}, !range ![[MD:[0-9]+]]
; CHECK-NOT: !nonnull
; CHECK: store i64 %[[V]], i64*
entry:
- %p = load float** %ptr, !nonnull !3
+ %p = load float*, float** %ptr, !nonnull !3
%gep = getelementptr float*, float** %ptr, i32 42
store float* %p, float** %gep
ret void
diff --git a/llvm/test/Transforms/InstCombine/lshr-phi.ll b/llvm/test/Transforms/InstCombine/lshr-phi.ll
index ffc5754e637..79dc8b33192 100644
--- a/llvm/test/Transforms/InstCombine/lshr-phi.ll
+++ b/llvm/test/Transforms/InstCombine/lshr-phi.ll
@@ -8,7 +8,7 @@
define i32 @hash_string(i8* nocapture %key) nounwind readonly {
entry:
- %t0 = load i8* %key, align 1 ; <i8> [#uses=1]
+ %t0 = load i8, i8* %key, align 1 ; <i8> [#uses=1]
%t1 = icmp eq i8 %t0, 0 ; <i1> [#uses=1]
br i1 %t1, label %bb2, label %bb
@@ -19,13 +19,13 @@ bb: ; preds = %bb, %entry
%t2 = shl i32 %k.04, 1 ; <i32> [#uses=1]
%t3 = lshr i32 %k.04, 14 ; <i32> [#uses=1]
%t4 = add i32 %t2, %t3 ; <i32> [#uses=1]
- %t5 = load i8* %cp.05, align 1 ; <i8> [#uses=1]
+ %t5 = load i8, i8* %cp.05, align 1 ; <i8> [#uses=1]
%t6 = sext i8 %t5 to i32 ; <i32> [#uses=1]
%t7 = xor i32 %t6, %t4 ; <i32> [#uses=1]
%t8 = and i32 %t7, 16383 ; <i32> [#uses=2]
%tmp = add i64 %indvar, 1 ; <i64> [#uses=2]
%scevgep = getelementptr i8, i8* %key, i64 %tmp ; <i8*> [#uses=1]
- %t9 = load i8* %scevgep, align 1 ; <i8> [#uses=1]
+ %t9 = load i8, i8* %scevgep, align 1 ; <i8> [#uses=1]
%t10 = icmp eq i8 %t9, 0 ; <i1> [#uses=1]
br i1 %t10, label %bb2, label %bb
diff --git a/llvm/test/Transforms/InstCombine/malloc-free-delete.ll b/llvm/test/Transforms/InstCombine/malloc-free-delete.ll
index 765c8c35c92..399b4749da5 100644
--- a/llvm/test/Transforms/InstCombine/malloc-free-delete.ll
+++ b/llvm/test/Transforms/InstCombine/malloc-free-delete.ll
@@ -5,7 +5,7 @@ define i32 @main(i32 %argc, i8** %argv) {
%c_19 = alloca i8*
%malloc_206 = tail call i8* @malloc(i32 mul (i32 ptrtoint (i8* getelementptr (i8* null, i32 1) to i32), i32 10))
store i8* %malloc_206, i8** %c_19
- %tmp_207 = load i8** %c_19
+ %tmp_207 = load i8*, i8** %c_19
tail call void @free(i8* %tmp_207)
ret i32 0
; CHECK-NEXT: ret i32 0
diff --git a/llvm/test/Transforms/InstCombine/mem-gep-zidx.ll b/llvm/test/Transforms/InstCombine/mem-gep-zidx.ll
index ddc6be3068b..cf021b13370 100644
--- a/llvm/test/Transforms/InstCombine/mem-gep-zidx.ll
+++ b/llvm/test/Transforms/InstCombine/mem-gep-zidx.ll
@@ -9,7 +9,7 @@ define signext i32 @test1(i32 signext %x) #0 {
entry:
%idxprom = sext i32 %x to i64
%arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* @f.a, i64 0, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: @test1
@@ -37,7 +37,7 @@ entry:
%idxprom = sext i32 %x to i64
%p = select i1 %y, [1 x i32]* @f.a, [1 x i32]* @f.b
%arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* %p, i64 0, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: @test3
diff --git a/llvm/test/Transforms/InstCombine/memcmp-1.ll b/llvm/test/Transforms/InstCombine/memcmp-1.ll
index 5fcd603fb79..db15bd66b71 100644
--- a/llvm/test/Transforms/InstCombine/memcmp-1.ll
+++ b/llvm/test/Transforms/InstCombine/memcmp-1.ll
@@ -33,9 +33,9 @@ define i32 @test_simplify2(i8* %mem1, i8* %mem2) {
define i32 @test_simplify3(i8* %mem1, i8* %mem2) {
; CHECK-LABEL: @test_simplify3(
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 1)
-; CHECK: [[LOAD1:%[a-z]+]] = load i8* %mem1, align 1
+; CHECK: [[LOAD1:%[a-z]+]] = load i8, i8* %mem1, align 1
; CHECK: [[ZEXT1:%[a-z]+]] = zext i8 [[LOAD1]] to i32
-; CHECK: [[LOAD2:%[a-z]+]] = load i8* %mem2, align 1
+; CHECK: [[LOAD2:%[a-z]+]] = load i8, i8* %mem2, align 1
; CHECK: [[ZEXT2:%[a-z]+]] = zext i8 [[LOAD2]] to i32
; CHECK: [[RET:%[a-z]+]] = sub nsw i32 [[ZEXT1]], [[ZEXT2]]
ret i32 %ret
diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
index 8efb2a3ab63..cfd2398cf73 100644
--- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
+++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll
@@ -15,22 +15,22 @@ entry:
%tmp3 = shl i32 %hash, 2 ; <i32> [#uses=1]
%tmp5 = and i32 %tmp3, 124 ; <i32> [#uses=4]
%tmp753 = getelementptr [128 x float], [128 x float]* %lookupTable, i32 0, i32 %tmp5 ; <float*> [#uses=1]
- %tmp9 = load float* %tmp753 ; <float> [#uses=1]
+ %tmp9 = load float, float* %tmp753 ; <float> [#uses=1]
%tmp11 = fmul float %tmp9, %x ; <float> [#uses=1]
%tmp13 = fadd float %tmp11, 0.000000e+00 ; <float> [#uses=1]
%tmp17.sum52 = or i32 %tmp5, 1 ; <i32> [#uses=1]
%tmp1851 = getelementptr [128 x float], [128 x float]* %lookupTable, i32 0, i32 %tmp17.sum52 ; <float*> [#uses=1]
- %tmp19 = load float* %tmp1851 ; <float> [#uses=1]
+ %tmp19 = load float, float* %tmp1851 ; <float> [#uses=1]
%tmp21 = fmul float %tmp19, %y ; <float> [#uses=1]
%tmp23 = fadd float %tmp21, %tmp13 ; <float> [#uses=1]
%tmp27.sum50 = or i32 %tmp5, 2 ; <i32> [#uses=1]
%tmp2849 = getelementptr [128 x float], [128 x float]* %lookupTable, i32 0, i32 %tmp27.sum50 ; <float*> [#uses=1]
- %tmp29 = load float* %tmp2849 ; <float> [#uses=1]
+ %tmp29 = load float, float* %tmp2849 ; <float> [#uses=1]
%tmp31 = fmul float %tmp29, %z ; <float> [#uses=1]
%tmp33 = fadd float %tmp31, %tmp23 ; <float> [#uses=1]
%tmp37.sum48 = or i32 %tmp5, 3 ; <i32> [#uses=1]
%tmp3847 = getelementptr [128 x float], [128 x float]* %lookupTable, i32 0, i32 %tmp37.sum48 ; <float*> [#uses=1]
- %tmp39 = load float* %tmp3847 ; <float> [#uses=1]
+ %tmp39 = load float, float* %tmp3847 ; <float> [#uses=1]
%tmp41 = fmul float %tmp39, %w ; <float> [#uses=1]
%tmp43 = fadd float %tmp41, %tmp33 ; <float> [#uses=1]
ret float %tmp43
diff --git a/llvm/test/Transforms/InstCombine/merge-icmp.ll b/llvm/test/Transforms/InstCombine/merge-icmp.ll
index b021fe0429a..6a65b5befa3 100644
--- a/llvm/test/Transforms/InstCombine/merge-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/merge-icmp.ll
@@ -1,7 +1,7 @@
; RUN: opt -S -instcombine < %s | FileCheck %s
define i1 @test1(i16* %x) {
- %load = load i16* %x, align 4
+ %load = load i16, i16* %x, align 4
%trunc = trunc i16 %load to i8
%cmp1 = icmp eq i8 %trunc, 127
%and = and i16 %load, -256
@@ -15,7 +15,7 @@ define i1 @test1(i16* %x) {
}
define i1 @test2(i16* %x) {
- %load = load i16* %x, align 4
+ %load = load i16, i16* %x, align 4
%and = and i16 %load, -256
%cmp1 = icmp eq i16 %and, 32512
%trunc = trunc i16 %load to i8
diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll
index 4d1e6c700bd..de09f129cdf 100644
--- a/llvm/test/Transforms/InstCombine/mul.ll
+++ b/llvm/test/Transforms/InstCombine/mul.ll
@@ -107,7 +107,7 @@ define i32 @test12(i32 %a, i32 %b) {
; PR2642
define internal void @test13(<4 x float>*) {
; CHECK-LABEL: @test13(
- load <4 x float>* %0, align 1
+ load <4 x float>, <4 x float>* %0, align 1
fmul <4 x float> %2, < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >
store <4 x float> %3, <4 x float>* %0, align 1
ret void
diff --git a/llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll b/llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll
index a24925394d9..4e5b2100f34 100644
--- a/llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll
+++ b/llvm/test/Transforms/InstCombine/multi-size-address-space-pointer.ll
@@ -6,7 +6,7 @@ define i32 @test_as0(i32 addrspace(0)* %a) {
; CHECK-LABEL: @test_as0(
; CHECK: %arrayidx = getelementptr i32, i32* %a, i32 1
%arrayidx = getelementptr i32, i32 addrspace(0)* %a, i64 1
- %y = load i32 addrspace(0)* %arrayidx, align 4
+ %y = load i32, i32 addrspace(0)* %arrayidx, align 4
ret i32 %y
}
@@ -14,7 +14,7 @@ define i32 @test_as1(i32 addrspace(1)* %a) {
; CHECK-LABEL: @test_as1(
; CHECK: %arrayidx = getelementptr i32, i32 addrspace(1)* %a, i64 1
%arrayidx = getelementptr i32, i32 addrspace(1)* %a, i32 1
- %y = load i32 addrspace(1)* %arrayidx, align 4
+ %y = load i32, i32 addrspace(1)* %arrayidx, align 4
ret i32 %y
}
@@ -22,7 +22,7 @@ define i32 @test_as2(i32 addrspace(2)* %a) {
; CHECK-LABEL: @test_as2(
; CHECK: %arrayidx = getelementptr i32, i32 addrspace(2)* %a, i8 1
%arrayidx = getelementptr i32, i32 addrspace(2)* %a, i32 1
- %y = load i32 addrspace(2)* %arrayidx, align 4
+ %y = load i32, i32 addrspace(2)* %arrayidx, align 4
ret i32 %y
}
@@ -30,17 +30,17 @@ define i32 @test_as3(i32 addrspace(3)* %a) {
; CHECK-LABEL: @test_as3(
; CHECK: %arrayidx = getelementptr i32, i32 addrspace(3)* %a, i16 1
%arrayidx = getelementptr i32, i32 addrspace(3)* %a, i32 1
- %y = load i32 addrspace(3)* %arrayidx, align 4
+ %y = load i32, i32 addrspace(3)* %arrayidx, align 4
ret i32 %y
}
define i32 @test_combine_ptrtoint(i32 addrspace(2)* %a) {
; CHECK-LABEL: @test_combine_ptrtoint(
-; CHECK-NEXT: %y = load i32 addrspace(2)* %a
+; CHECK-NEXT: %y = load i32, i32 addrspace(2)* %a
; CHECK-NEXT: ret i32 %y
%cast = ptrtoint i32 addrspace(2)* %a to i8
%castback = inttoptr i8 %cast to i32 addrspace(2)*
- %y = load i32 addrspace(2)* %castback, align 4
+ %y = load i32, i32 addrspace(2)* %castback, align 4
ret i32 %y
}
@@ -55,12 +55,12 @@ define i8 @test_combine_inttoptr(i8 %a) {
define i32 @test_combine_vector_ptrtoint(<2 x i32 addrspace(2)*> %a) {
; CHECK-LABEL: @test_combine_vector_ptrtoint(
; CHECK-NEXT: %p = extractelement <2 x i32 addrspace(2)*> %a, i32 0
-; CHECK-NEXT: %y = load i32 addrspace(2)* %p, align 4
+; CHECK-NEXT: %y = load i32, i32 addrspace(2)* %p, align 4
; CHECK-NEXT: ret i32 %y
%cast = ptrtoint <2 x i32 addrspace(2)*> %a to <2 x i8>
%castback = inttoptr <2 x i8> %cast to <2 x i32 addrspace(2)*>
%p = extractelement <2 x i32 addrspace(2)*> %castback, i32 0
- %y = load i32 addrspace(2)* %p, align 4
+ %y = load i32, i32 addrspace(2)* %p, align 4
ret i32 %y
}
diff --git a/llvm/test/Transforms/InstCombine/no-negzero.ll b/llvm/test/Transforms/InstCombine/no-negzero.ll
index 4ed28360344..07e68251b5a 100644
--- a/llvm/test/Transforms/InstCombine/no-negzero.ll
+++ b/llvm/test/Transforms/InstCombine/no-negzero.ll
@@ -14,17 +14,17 @@ entry:
%0 = alloca double, align 8 ; <double*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store double %x, double* %x_addr
- %1 = load double* %x_addr, align 8 ; <double> [#uses=1]
+ %1 = load double, double* %x_addr, align 8 ; <double> [#uses=1]
%2 = call double @fabs(double %1) nounwind readnone ; <double> [#uses=1]
%3 = call double @sqrt(double %2) nounwind readonly ; <double> [#uses=1]
%4 = fadd double %3, 0.000000e+00 ; <double> [#uses=1]
store double %4, double* %0, align 8
- %5 = load double* %0, align 8 ; <double> [#uses=1]
+ %5 = load double, double* %0, align 8 ; <double> [#uses=1]
store double %5, double* %retval, align 8
br label %return
return: ; preds = %entry
- %retval1 = load double* %retval ; <double> [#uses=1]
+ %retval1 = load double, double* %retval ; <double> [#uses=1]
ret double %retval1
}
diff --git a/llvm/test/Transforms/InstCombine/obfuscated_splat.ll b/llvm/test/Transforms/InstCombine/obfuscated_splat.ll
index fa9cb423d02..c37456cc9cf 100644
--- a/llvm/test/Transforms/InstCombine/obfuscated_splat.ll
+++ b/llvm/test/Transforms/InstCombine/obfuscated_splat.ll
@@ -1,7 +1,7 @@
; RUN: opt -instcombine -S < %s | FileCheck %s
define void @test(<4 x float> *%in_ptr, <4 x float> *%out_ptr) {
- %A = load <4 x float>* %in_ptr, align 16
+ %A = load <4 x float>, <4 x float>* %in_ptr, align 16
%B = shufflevector <4 x float> %A, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 undef, i32 undef>
%C = shufflevector <4 x float> %B, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 4, i32 undef>
%D = shufflevector <4 x float> %C, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
diff --git a/llvm/test/Transforms/InstCombine/objsize.ll b/llvm/test/Transforms/InstCombine/objsize.ll
index 38dd33d3e83..d531ba2efe4 100644
--- a/llvm/test/Transforms/InstCombine/objsize.ll
+++ b/llvm/test/Transforms/InstCombine/objsize.ll
@@ -22,11 +22,11 @@ entry:
br i1 %cmp, label %cond.true, label %cond.false
cond.true:
- %1 = load i8** %retval
+ %1 = load i8*, i8** %retval
ret i8* %1
cond.false:
- %2 = load i8** %retval
+ %2 = load i8*, i8** %retval
ret i8* %2
}
@@ -126,7 +126,7 @@ define i8* @test5(i32 %n) nounwind ssp {
entry:
%0 = tail call noalias i8* @malloc(i32 20) nounwind
%1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %0, i1 false)
- %2 = load i8** @s, align 8
+ %2 = load i8*, i8** @s, align 8
; CHECK-NOT: @llvm.objectsize
; CHECK: @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 10, i32 1, i1 false)
%3 = tail call i8* @__memcpy_chk(i8* %0, i8* %2, i32 10, i32 %1) nounwind
@@ -138,7 +138,7 @@ define void @test6(i32 %n) nounwind ssp {
entry:
%0 = tail call noalias i8* @malloc(i32 20) nounwind
%1 = tail call i32 @llvm.objectsize.i32.p0i8(i8* %0, i1 false)
- %2 = load i8** @s, align 8
+ %2 = load i8*, i8** @s, align 8
; CHECK-NOT: @llvm.objectsize
; CHECK: @__memcpy_chk(i8* %0, i8* %1, i32 30, i32 20)
%3 = tail call i8* @__memcpy_chk(i8* %0, i8* %2, i32 30, i32 %1) nounwind
diff --git a/llvm/test/Transforms/InstCombine/odr-linkage.ll b/llvm/test/Transforms/InstCombine/odr-linkage.ll
index 2ce62468dc7..73675efb08b 100644
--- a/llvm/test/Transforms/InstCombine/odr-linkage.ll
+++ b/llvm/test/Transforms/InstCombine/odr-linkage.ll
@@ -6,10 +6,10 @@
@g4 = internal constant i32 4
define i32 @test() {
- %A = load i32* @g1
- %B = load i32* @g2
- %C = load i32* @g3
- %D = load i32* @g4
+ %A = load i32, i32* @g1
+ %B = load i32, i32* @g2
+ %C = load i32, i32* @g3
+ %D = load i32, i32* @g4
%a = add i32 %A, %B
%b = add i32 %a, %C
diff --git a/llvm/test/Transforms/InstCombine/or.ll b/llvm/test/Transforms/InstCombine/or.ll
index f604bafcc33..b91a5954d97 100644
--- a/llvm/test/Transforms/InstCombine/or.ll
+++ b/llvm/test/Transforms/InstCombine/or.ll
@@ -394,7 +394,7 @@ define i32 @test37(i32* %xp, i32 %y) {
; CHECK: select i1 %tobool, i32 -1, i32 %x
%tobool = icmp ne i32 %y, 0
%sext = sext i1 %tobool to i32
- %x = load i32* %xp
+ %x = load i32, i32* %xp
%or = or i32 %sext, %x
ret i32 %or
}
@@ -404,7 +404,7 @@ define i32 @test38(i32* %xp, i32 %y) {
; CHECK: select i1 %tobool, i32 -1, i32 %x
%tobool = icmp ne i32 %y, 0
%sext = sext i1 %tobool to i32
- %x = load i32* %xp
+ %x = load i32, i32* %xp
%or = or i32 %x, %sext
ret i32 %or
}
diff --git a/llvm/test/Transforms/InstCombine/phi-merge-gep.ll b/llvm/test/Transforms/InstCombine/phi-merge-gep.ll
index 138e8687ec5..b548e51a56a 100644
--- a/llvm/test/Transforms/InstCombine/phi-merge-gep.ll
+++ b/llvm/test/Transforms/InstCombine/phi-merge-gep.ll
@@ -34,12 +34,12 @@ entry:
br label %bb13
bb: ; preds = %bb13
- %24 = load float* %A0r.0, align 4 ; <float> [#uses=1]
- %25 = load float* %A0i.0, align 4 ; <float> [#uses=1]
- %26 = load float* %A1r.0, align 4 ; <float> [#uses=2]
- %27 = load float* %A1i.0, align 4 ; <float> [#uses=2]
- %28 = load float* %A2r.0, align 4 ; <float> [#uses=2]
- %29 = load float* %A2i.0, align 4 ; <float> [#uses=2]
+ %24 = load float, float* %A0r.0, align 4 ; <float> [#uses=1]
+ %25 = load float, float* %A0i.0, align 4 ; <float> [#uses=1]
+ %26 = load float, float* %A1r.0, align 4 ; <float> [#uses=2]
+ %27 = load float, float* %A1i.0, align 4 ; <float> [#uses=2]
+ %28 = load float, float* %A2r.0, align 4 ; <float> [#uses=2]
+ %29 = load float, float* %A2i.0, align 4 ; <float> [#uses=2]
%30 = fadd float %26, %28 ; <float> [#uses=2]
%31 = fadd float %27, %29 ; <float> [#uses=2]
%32 = fsub float %26, %28 ; <float> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 6574164c7fc..54cc4cfe459 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -149,11 +149,11 @@ entry:
br i1 %c, label %bb1, label %bb
bb:
- %C = load i32* %B, align 1
+ %C = load i32, i32* %B, align 1
br label %bb2
bb1:
- %D = load i32* %A, align 1
+ %D = load i32, i32* %A, align 1
br label %bb2
bb2:
@@ -162,7 +162,7 @@ bb2:
; CHECK-LABEL: @test9(
; CHECK: bb2:
; CHECK-NEXT: phi i32* [ %B, %bb ], [ %A, %bb1 ]
-; CHECK-NEXT: %E = load i32* %{{[^,]*}}, align 1
+; CHECK-NEXT: %E = load i32, i32* %{{[^,]*}}, align 1
; CHECK-NEXT: ret i32 %E
}
@@ -173,11 +173,11 @@ entry:
br i1 %c, label %bb1, label %bb
bb:
- %C = load i32* %B, align 16
+ %C = load i32, i32* %B, align 16
br label %bb2
bb1:
- %D = load i32* %A, align 32
+ %D = load i32, i32* %A, align 32
br label %bb2
bb2:
@@ -186,7 +186,7 @@ bb2:
; CHECK-LABEL: @test10(
; CHECK: bb2:
; CHECK-NEXT: phi i32* [ %B, %bb ], [ %A, %bb1 ]
-; CHECK-NEXT: %E = load i32* %{{[^,]*}}, align 16
+; CHECK-NEXT: %E = load i32, i32* %{{[^,]*}}, align 16
; CHECK-NEXT: ret i32 %E
}
@@ -375,30 +375,30 @@ entry:
store i32 %flag, i32* %flag.addr
store i32* %pointer2, i32** %pointer2.addr
store i32 10, i32* %res
- %tmp = load i32* %flag.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %flag.addr ; <i32> [#uses=1]
%tobool = icmp ne i32 %tmp, 0 ; <i1> [#uses=1]
br i1 %tobool, label %if.then, label %if.else
return: ; preds = %if.end
- %tmp7 = load i32* %retval ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %tmp7
if.end: ; preds = %if.else, %if.then
- %tmp6 = load i32* %res ; <i32> [#uses=1]
+ %tmp6 = load i32, i32* %res ; <i32> [#uses=1]
store i32 %tmp6, i32* %retval
br label %return
if.then: ; preds = %entry
- %tmp1 = load i32 addrspace(1)** %pointer1.addr ; <i32 addrspace(1)*>
+ %tmp1 = load i32 addrspace(1)*, i32 addrspace(1)** %pointer1.addr ; <i32 addrspace(1)*>
%arrayidx = getelementptr i32, i32 addrspace(1)* %tmp1, i32 0 ; <i32 addrspace(1)*> [#uses=1]
- %tmp2 = load i32 addrspace(1)* %arrayidx ; <i32> [#uses=1]
+ %tmp2 = load i32, i32 addrspace(1)* %arrayidx ; <i32> [#uses=1]
store i32 %tmp2, i32* %res
br label %if.end
if.else: ; preds = %entry
- %tmp3 = load i32** %pointer2.addr ; <i32*> [#uses=1]
+ %tmp3 = load i32*, i32** %pointer2.addr ; <i32*> [#uses=1]
%arrayidx4 = getelementptr i32, i32* %tmp3, i32 0 ; <i32*> [#uses=1]
- %tmp5 = load i32* %arrayidx4 ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %arrayidx4 ; <i32> [#uses=1]
store i32 %tmp5, i32* %res
br label %if.end
}
diff --git a/llvm/test/Transforms/InstCombine/pr12251.ll b/llvm/test/Transforms/InstCombine/pr12251.ll
index 8c382bb104a..7197bda2e54 100644
--- a/llvm/test/Transforms/InstCombine/pr12251.ll
+++ b/llvm/test/Transforms/InstCombine/pr12251.ll
@@ -2,13 +2,13 @@
define zeroext i1 @_Z3fooPb(i8* nocapture %x) {
entry:
- %a = load i8* %x, align 1, !range !0
+ %a = load i8, i8* %x, align 1, !range !0
%b = and i8 %a, 1
%tobool = icmp ne i8 %b, 0
ret i1 %tobool
}
-; CHECK: %a = load i8* %x, align 1, !range !0
+; CHECK: %a = load i8, i8* %x, align 1, !range !0
; CHECK-NEXT: %tobool = icmp ne i8 %a, 0
; CHECK-NEXT: ret i1 %tobool
diff --git a/llvm/test/Transforms/InstCombine/pr2645-0.ll b/llvm/test/Transforms/InstCombine/pr2645-0.ll
index e2977c626f6..21bfa64a860 100644
--- a/llvm/test/Transforms/InstCombine/pr2645-0.ll
+++ b/llvm/test/Transforms/InstCombine/pr2645-0.ll
@@ -6,10 +6,10 @@
define internal void @""(i8*) {
; <label>:1
bitcast i8* %0 to i32* ; <i32*>:2 [#uses=1]
- load i32* %2, align 1 ; <i32>:3 [#uses=1]
+ load i32, i32* %2, align 1 ; <i32>:3 [#uses=1]
getelementptr i8, i8* %0, i32 4 ; <i8*>:4 [#uses=1]
bitcast i8* %4 to i32* ; <i32*>:5 [#uses=1]
- load i32* %5, align 1 ; <i32>:6 [#uses=1]
+ load i32, i32* %5, align 1 ; <i32>:6 [#uses=1]
br label %7
; <label>:7 ; preds = %9, %1
diff --git a/llvm/test/Transforms/InstCombine/pr2645-1.ll b/llvm/test/Transforms/InstCombine/pr2645-1.ll
index 2818c07dada..2986d21866b 100644
--- a/llvm/test/Transforms/InstCombine/pr2645-1.ll
+++ b/llvm/test/Transforms/InstCombine/pr2645-1.ll
@@ -15,7 +15,7 @@ define internal void @""(i8*, i32, i8*) {
; <label>:6 ; preds = %4
%7 = getelementptr i8, i8* %2, i32 %.0 ; <i8*> [#uses=1]
%8 = bitcast i8* %7 to <4 x i16>* ; <<4 x i16>*> [#uses=1]
- %9 = load <4 x i16>* %8, align 1 ; <<4 x i16>> [#uses=1]
+ %9 = load <4 x i16>, <4 x i16>* %8, align 1 ; <<4 x i16>> [#uses=1]
%10 = bitcast <4 x i16> %9 to <1 x i64> ; <<1 x i64>> [#uses=1]
%11 = call <2 x i64> @foo(<1 x i64> %10)
; <<2 x i64>> [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/select-cmp-br.ll b/llvm/test/Transforms/InstCombine/select-cmp-br.ll
index c4086cd8a2c..1dc7e153f5f 100644
--- a/llvm/test/Transforms/InstCombine/select-cmp-br.ll
+++ b/llvm/test/Transforms/InstCombine/select-cmp-br.ll
@@ -10,12 +10,12 @@ declare void @foobar()
define void @test1(%C*) {
entry:
%1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
- %m = load i64** %1, align 8
+ %m = load i64*, i64** %1, align 8
%2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
- %n = load i64** %2, align 8
+ %n = load i64*, i64** %2, align 8
%3 = getelementptr inbounds i64, i64* %m, i64 9
%4 = bitcast i64* %3 to i64 (%C*)**
- %5 = load i64 (%C*)** %4, align 8
+ %5 = load i64 (%C*)*, i64 (%C*)** %4, align 8
%6 = icmp eq i64* %m, %n
%7 = select i1 %6, %C* %0, %C* null
%8 = icmp eq %C* %7, null
@@ -41,12 +41,12 @@ entry:
define void @test2(%C*) {
entry:
%1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
- %m = load i64** %1, align 8
+ %m = load i64*, i64** %1, align 8
%2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
- %n = load i64** %2, align 8
+ %n = load i64*, i64** %2, align 8
%3 = getelementptr inbounds i64, i64* %m, i64 9
%4 = bitcast i64* %3 to i64 (%C*)**
- %5 = load i64 (%C*)** %4, align 8
+ %5 = load i64 (%C*)*, i64 (%C*)** %4, align 8
%6 = icmp eq i64* %m, %n
%7 = select i1 %6, %C* null, %C* %0
%8 = icmp eq %C* %7, null
@@ -72,12 +72,12 @@ entry:
define void @test3(%C*) {
entry:
%1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
- %m = load i64** %1, align 8
+ %m = load i64*, i64** %1, align 8
%2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
- %n = load i64** %2, align 8
+ %n = load i64*, i64** %2, align 8
%3 = getelementptr inbounds i64, i64* %m, i64 9
%4 = bitcast i64* %3 to i64 (%C*)**
- %5 = load i64 (%C*)** %4, align 8
+ %5 = load i64 (%C*)*, i64 (%C*)** %4, align 8
%6 = icmp eq i64* %m, %n
%7 = select i1 %6, %C* %0, %C* null
%8 = icmp ne %C* %7, null
@@ -103,12 +103,12 @@ entry:
define void @test4(%C*) {
entry:
%1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
- %m = load i64** %1, align 8
+ %m = load i64*, i64** %1, align 8
%2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
- %n = load i64** %2, align 8
+ %n = load i64*, i64** %2, align 8
%3 = getelementptr inbounds i64, i64* %m, i64 9
%4 = bitcast i64* %3 to i64 (%C*)**
- %5 = load i64 (%C*)** %4, align 8
+ %5 = load i64 (%C*)*, i64 (%C*)** %4, align 8
%6 = icmp eq i64* %m, %n
%7 = select i1 %6, %C* null, %C* %0
%8 = icmp ne %C* %7, null
diff --git a/llvm/test/Transforms/InstCombine/select-load-call.ll b/llvm/test/Transforms/InstCombine/select-load-call.ll
index b63468de53e..ad0ef4f85eb 100644
--- a/llvm/test/Transforms/InstCombine/select-load-call.ll
+++ b/llvm/test/Transforms/InstCombine/select-load-call.ll
@@ -10,6 +10,6 @@ define i32 @test(i1 %cond, i32 *%P) {
call void @test2() readonly
%P2 = select i1 %cond, i32 *%P, i32* %A
- %V = load i32* %P2
+ %V = load i32, i32* %P2
ret i32 %V
}
diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index a6a7aa9e3cc..e4cc6f58c05 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -301,30 +301,30 @@ define i32 @test15j(i32 %X) {
define i32 @test16(i1 %C, i32* %P) {
%P2 = select i1 %C, i32* %P, i32* null
- %V = load i32* %P2
+ %V = load i32, i32* %P2
ret i32 %V
; CHECK-LABEL: @test16(
-; CHECK-NEXT: %V = load i32* %P
+; CHECK-NEXT: %V = load i32, i32* %P
; CHECK: ret i32 %V
}
;; It may be legal to load from a null address in a non-zero address space
define i32 @test16_neg(i1 %C, i32 addrspace(1)* %P) {
%P2 = select i1 %C, i32 addrspace(1)* %P, i32 addrspace(1)* null
- %V = load i32 addrspace(1)* %P2
+ %V = load i32, i32 addrspace(1)* %P2
ret i32 %V
; CHECK-LABEL: @test16_neg
; CHECK-NEXT: %P2 = select i1 %C, i32 addrspace(1)* %P, i32 addrspace(1)* null
-; CHECK-NEXT: %V = load i32 addrspace(1)* %P2
+; CHECK-NEXT: %V = load i32, i32 addrspace(1)* %P2
; CHECK: ret i32 %V
}
define i32 @test16_neg2(i1 %C, i32 addrspace(1)* %P) {
%P2 = select i1 %C, i32 addrspace(1)* null, i32 addrspace(1)* %P
- %V = load i32 addrspace(1)* %P2
+ %V = load i32, i32 addrspace(1)* %P2
ret i32 %V
; CHECK-LABEL: @test16_neg2
; CHECK-NEXT: %P2 = select i1 %C, i32 addrspace(1)* null, i32 addrspace(1)* %P
-; CHECK-NEXT: %V = load i32 addrspace(1)* %P2
+; CHECK-NEXT: %V = load i32, i32 addrspace(1)* %P2
; CHECK: ret i32 %V
}
@@ -813,7 +813,7 @@ define i32 @test59(i32 %x, i32 %y) nounwind {
define i1 @test60(i32 %x, i1* %y) nounwind {
%cmp = icmp eq i32 %x, 0
- %load = load i1* %y, align 1
+ %load = load i1, i1* %y, align 1
%cmp1 = icmp slt i32 %x, 1
%sel = select i1 %cmp, i1 %load, i1 %cmp1
ret i1 %sel
@@ -823,7 +823,7 @@ define i1 @test60(i32 %x, i1* %y) nounwind {
@glbl = constant i32 10
define i32 @test61(i32* %ptr) {
- %A = load i32* %ptr
+ %A = load i32, i32* %ptr
%B = icmp eq i32* %ptr, @glbl
%C = select i1 %B, i32 %A, i32 10
ret i32 %C
@@ -1249,11 +1249,11 @@ define i32 @test76(i1 %flag, i32* %x) {
; CHECK-LABEL: @test76(
; CHECK: store i32 0, i32* %x
; CHECK: %[[P:.*]] = select i1 %flag, i32* @under_aligned, i32* %x
-; CHECK: load i32* %[[P]]
+; CHECK: load i32, i32* %[[P]]
store i32 0, i32* %x
%p = select i1 %flag, i32* @under_aligned, i32* %x
- %v = load i32* %p
+ %v = load i32, i32* %p
ret i32 %v
}
@@ -1268,13 +1268,13 @@ define i32 @test77(i1 %flag, i32* %x) {
; CHECK: call void @scribble_on_i32(i32* %[[A]])
; CHECK: store i32 0, i32* %x
; CHECK: %[[P:.*]] = select i1 %flag, i32* %[[A]], i32* %x
-; CHECK: load i32* %[[P]]
+; CHECK: load i32, i32* %[[P]]
%under_aligned = alloca i32, align 1
call void @scribble_on_i32(i32* %under_aligned)
store i32 0, i32* %x
%p = select i1 %flag, i32* %under_aligned, i32* %x
- %v = load i32* %p
+ %v = load i32, i32* %p
ret i32 %v
}
@@ -1282,8 +1282,8 @@ define i32 @test78(i1 %flag, i32* %x, i32* %y, i32* %z) {
; Test that we can speculate the loads around the select even when we can't
; fold the load completely away.
; CHECK-LABEL: @test78(
-; CHECK: %[[V1:.*]] = load i32* %x
-; CHECK-NEXT: %[[V2:.*]] = load i32* %y
+; CHECK: %[[V1:.*]] = load i32, i32* %x
+; CHECK-NEXT: %[[V2:.*]] = load i32, i32* %y
; CHECK-NEXT: %[[S:.*]] = select i1 %flag, i32 %[[V1]], i32 %[[V2]]
; CHECK-NEXT: ret i32 %[[S]]
entry:
@@ -1292,7 +1292,7 @@ entry:
; Block forwarding by storing to %z which could alias either %x or %y.
store i32 42, i32* %z
%p = select i1 %flag, i32* %x, i32* %y
- %v = load i32* %p
+ %v = load i32, i32* %p
ret i32 %v
}
@@ -1300,8 +1300,8 @@ define float @test79(i1 %flag, float* %x, i32* %y, i32* %z) {
; Test that we can speculate the loads around the select even when we can't
; fold the load completely away.
; CHECK-LABEL: @test79(
-; CHECK: %[[V1:.*]] = load float* %x
-; CHECK-NEXT: %[[V2:.*]] = load float* %y
+; CHECK: %[[V1:.*]] = load float, float* %x
+; CHECK-NEXT: %[[V2:.*]] = load float, float* %y
; CHECK-NEXT: %[[S:.*]] = select i1 %flag, float %[[V1]], float %[[V2]]
; CHECK-NEXT: ret float %[[S]]
entry:
@@ -1312,7 +1312,7 @@ entry:
; Block forwarding by storing to %z which could alias either %x or %y.
store i32 42, i32* %z
%p = select i1 %flag, float* %x, float* %y1
- %v = load float* %p
+ %v = load float, float* %p
ret float %v
}
@@ -1322,7 +1322,7 @@ define i32 @test80(i1 %flag) {
; CHECK-LABEL: @test80(
; CHECK: %[[X:.*]] = alloca i32
; CHECK-NEXT: %[[Y:.*]] = alloca i32
-; CHECK: %[[V:.*]] = load i32* %[[X]]
+; CHECK: %[[V:.*]] = load i32, i32* %[[X]]
; CHECK-NEXT: store i32 %[[V]], i32* %[[Y]]
; CHECK-NEXT: ret i32 %[[V]]
entry:
@@ -1330,10 +1330,10 @@ entry:
%y = alloca i32
call void @scribble_on_i32(i32* %x)
call void @scribble_on_i32(i32* %y)
- %tmp = load i32* %x
+ %tmp = load i32, i32* %x
store i32 %tmp, i32* %y
%p = select i1 %flag, i32* %x, i32* %y
- %v = load i32* %p
+ %v = load i32, i32* %p
ret i32 %v
}
@@ -1343,7 +1343,7 @@ define float @test81(i1 %flag) {
; CHECK-LABEL: @test81(
; CHECK: %[[X:.*]] = alloca i32
; CHECK-NEXT: %[[Y:.*]] = alloca i32
-; CHECK: %[[V:.*]] = load i32* %[[X]]
+; CHECK: %[[V:.*]] = load i32, i32* %[[X]]
; CHECK-NEXT: store i32 %[[V]], i32* %[[Y]]
; CHECK-NEXT: %[[C:.*]] = bitcast i32 %[[V]] to float
; CHECK-NEXT: ret float %[[C]]
@@ -1354,10 +1354,10 @@ entry:
%y1 = bitcast i32* %y to float*
call void @scribble_on_i32(i32* %x1)
call void @scribble_on_i32(i32* %y)
- %tmp = load i32* %x1
+ %tmp = load i32, i32* %x1
store i32 %tmp, i32* %y
%p = select i1 %flag, float* %x, float* %y1
- %v = load float* %p
+ %v = load float, float* %p
ret float %v
}
@@ -1369,7 +1369,7 @@ define i32 @test82(i1 %flag) {
; CHECK-NEXT: %[[Y:.*]] = alloca i32
; CHECK-NEXT: %[[X1:.*]] = bitcast float* %[[X]] to i32*
; CHECK-NEXT: %[[Y1:.*]] = bitcast i32* %[[Y]] to float*
-; CHECK: %[[V:.*]] = load float* %[[X]]
+; CHECK: %[[V:.*]] = load float, float* %[[X]]
; CHECK-NEXT: store float %[[V]], float* %[[Y1]]
; CHECK-NEXT: %[[C:.*]] = bitcast float %[[V]] to i32
; CHECK-NEXT: ret i32 %[[C]]
@@ -1380,10 +1380,10 @@ entry:
%y1 = bitcast i32* %y to float*
call void @scribble_on_i32(i32* %x1)
call void @scribble_on_i32(i32* %y)
- %tmp = load float* %x
+ %tmp = load float, float* %x
store float %tmp, float* %y1
%p = select i1 %flag, i32* %x1, i32* %y
- %v = load i32* %p
+ %v = load i32, i32* %p
ret i32 %v
}
@@ -1398,7 +1398,7 @@ define i8* @test83(i1 %flag) {
; CHECK-NEXT: %[[Y:.*]] = alloca i8*
; CHECK-DAG: %[[X2:.*]] = bitcast i8** %[[X]] to i64*
; CHECK-DAG: %[[Y2:.*]] = bitcast i8** %[[Y]] to i64*
-; CHECK: %[[V:.*]] = load i64* %[[X2]]
+; CHECK: %[[V:.*]] = load i64, i64* %[[X2]]
; CHECK-NEXT: store i64 %[[V]], i64* %[[Y2]]
; CHECK-NEXT: %[[C:.*]] = inttoptr i64 %[[V]] to i8*
; CHECK-NEXT: ret i8* %[[S]]
@@ -1409,10 +1409,10 @@ entry:
%y1 = bitcast i64* %y to i8**
call void @scribble_on_i64(i64* %x1)
call void @scribble_on_i64(i64* %y)
- %tmp = load i64* %x1
+ %tmp = load i64, i64* %x1
store i64 %tmp, i64* %y
%p = select i1 %flag, i8** %x, i8** %y1
- %v = load i8** %p
+ %v = load i8*, i8** %p
ret i8* %v
}
@@ -1422,7 +1422,7 @@ define i64 @test84(i1 %flag) {
; CHECK-LABEL: @test84(
; CHECK: %[[X:.*]] = alloca i8*
; CHECK-NEXT: %[[Y:.*]] = alloca i8*
-; CHECK: %[[V:.*]] = load i8** %[[X]]
+; CHECK: %[[V:.*]] = load i8*, i8** %[[X]]
; CHECK-NEXT: store i8* %[[V]], i8** %[[Y]]
; CHECK-NEXT: %[[C:.*]] = ptrtoint i8* %[[V]] to i64
; CHECK-NEXT: ret i64 %[[C]]
@@ -1433,10 +1433,10 @@ entry:
%y1 = bitcast i64* %y to i8**
call void @scribble_on_i64(i64* %x1)
call void @scribble_on_i64(i64* %y)
- %tmp = load i8** %x
+ %tmp = load i8*, i8** %x
store i8* %tmp, i8** %y1
%p = select i1 %flag, i64* %x1, i64* %y
- %v = load i64* %p
+ %v = load i64, i64* %p
ret i64 %v
}
@@ -1445,10 +1445,10 @@ define i8* @test85(i1 %flag) {
; pointer doesn't load all of the stored integer bits. We could fix this, but it
; would require endianness checks and other nastiness.
; CHECK-LABEL: @test85(
-; CHECK: %[[T:.*]] = load i128*
+; CHECK: %[[T:.*]] = load i128, i128*
; CHECK-NEXT: store i128 %[[T]], i128*
-; CHECK-NEXT: %[[X:.*]] = load i8**
-; CHECK-NEXT: %[[Y:.*]] = load i8**
+; CHECK-NEXT: %[[X:.*]] = load i8*, i8**
+; CHECK-NEXT: %[[Y:.*]] = load i8*, i8**
; CHECK-NEXT: %[[V:.*]] = select i1 %flag, i8* %[[X]], i8* %[[Y]]
; CHECK-NEXT: ret i8* %[[V]]
entry:
@@ -1459,10 +1459,10 @@ entry:
%y1 = bitcast i128* %y to i8**
call void @scribble_on_i128(i128* %x2)
call void @scribble_on_i128(i128* %y)
- %tmp = load i128* %x2
+ %tmp = load i128, i128* %x2
store i128 %tmp, i128* %y
%p = select i1 %flag, i8** %x1, i8** %y1
- %v = load i8** %p
+ %v = load i8*, i8** %p
ret i8* %v
}
@@ -1472,10 +1472,10 @@ define i128 @test86(i1 %flag) {
; the bits of the integer.
;
; CHECK-LABEL: @test86(
-; CHECK: %[[T:.*]] = load i8**
+; CHECK: %[[T:.*]] = load i8*, i8**
; CHECK-NEXT: store i8* %[[T]], i8**
-; CHECK-NEXT: %[[X:.*]] = load i128*
-; CHECK-NEXT: %[[Y:.*]] = load i128*
+; CHECK-NEXT: %[[X:.*]] = load i128, i128*
+; CHECK-NEXT: %[[Y:.*]] = load i128, i128*
; CHECK-NEXT: %[[V:.*]] = select i1 %flag, i128 %[[X]], i128 %[[Y]]
; CHECK-NEXT: ret i128 %[[V]]
entry:
@@ -1486,10 +1486,10 @@ entry:
%y1 = bitcast i128* %y to i8**
call void @scribble_on_i128(i128* %x2)
call void @scribble_on_i128(i128* %y)
- %tmp = load i8** %x1
+ %tmp = load i8*, i8** %x1
store i8* %tmp, i8** %y1
%p = select i1 %flag, i128* %x2, i128* %y
- %v = load i128* %p
+ %v = load i128, i128* %p
ret i128 %v
}
diff --git a/llvm/test/Transforms/InstCombine/shufflemask-undef.ll b/llvm/test/Transforms/InstCombine/shufflemask-undef.ll
index 68183b717a2..10509a92941 100644
--- a/llvm/test/Transforms/InstCombine/shufflemask-undef.ll
+++ b/llvm/test/Transforms/InstCombine/shufflemask-undef.ll
@@ -70,7 +70,7 @@ target triple = "i386-apple-darwin9"
define i32 @foo(%struct.State* %dst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._VMConstants* %cnstn, %struct.PPStreamToken* %pstrm, %struct.PluginBufferData* %gpctx, %struct.VMTextures* %txtrs, %struct.VMGPStack* %gpstk) nounwind {
bb266.i:
getelementptr <4 x float>, <4 x float>* null, i32 11 ; <<4 x float>*>:0 [#uses=1]
- load <4 x float>* %0, align 16 ; <<4 x float>>:1 [#uses=1]
+ load <4 x float>, <4 x float>* %0, align 16 ; <<4 x float>>:1 [#uses=1]
shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 1, i32 1 > ; <<4 x float>>:2 [#uses=1]
shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:3 [#uses=1]
shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:4 [#uses=1]
diff --git a/llvm/test/Transforms/InstCombine/signed-comparison.ll b/llvm/test/Transforms/InstCombine/signed-comparison.ll
index 97d7affc74f..922f4dcb2d4 100644
--- a/llvm/test/Transforms/InstCombine/signed-comparison.ll
+++ b/llvm/test/Transforms/InstCombine/signed-comparison.ll
@@ -13,7 +13,7 @@ bb:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %bb ]
%t0 = and i64 %indvar, 65535
%t1 = getelementptr double, double* %p, i64 %t0
- %t2 = load double* %t1, align 8
+ %t2 = load double, double* %t1, align 8
%t3 = fmul double %t2, 2.2
store double %t3, double* %t1, align 8
%i.04 = trunc i64 %indvar to i16
diff --git a/llvm/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll b/llvm/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll
index 6d2193fe448..db8f17917d1 100644
--- a/llvm/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll
@@ -29,7 +29,7 @@ bb95: ; preds = %bb88
unreachable
bb107: ; preds = %bb88
- %0 = load i16* undef, align 8 ; <i16> [#uses=1]
+ %0 = load i16, i16* undef, align 8 ; <i16> [#uses=1]
%1 = icmp eq i16 %0, 38 ; <i1> [#uses=1]
%src_eqv_here.0 = select i1 %1, %struct.rtx_def* null, %struct.rtx_def* null ; <%struct.rtx_def*> [#uses=1]
br i1 undef, label %bb127, label %bb125
diff --git a/llvm/test/Transforms/InstCombine/simplify-libcalls.ll b/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
index 9059d8dbb5a..6eb474449c9 100644
--- a/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
+++ b/llvm/test/Transforms/InstCombine/simplify-libcalls.ll
@@ -47,7 +47,7 @@ declare i32 @memcmp(i8*, i8*, i32) nounwind readonly
define i1 @PR2341(i8** %start_addr) {
entry:
- %tmp4 = load i8** %start_addr, align 4 ; <i8*> [#uses=1]
+ %tmp4 = load i8*, i8** %start_addr, align 4 ; <i8*> [#uses=1]
%tmp5 = call i32 @memcmp( i8* %tmp4, i8* getelementptr ([5 x i8]* @_2E_str, i32 0, i32 0), i32 4 ) nounwind readonly ; <i32> [#uses=1]
%tmp6 = icmp eq i32 %tmp5, 0 ; <i1> [#uses=1]
ret i1 %tmp6
diff --git a/llvm/test/Transforms/InstCombine/sincospi.ll b/llvm/test/Transforms/InstCombine/sincospi.ll
index 739827f1962..f49fb35cb76 100644
--- a/llvm/test/Transforms/InstCombine/sincospi.ll
+++ b/llvm/test/Transforms/InstCombine/sincospi.ll
@@ -18,17 +18,17 @@ declare double @__cospi(double %x) #0
@var64 = global double 0.0
define float @test_instbased_f32() {
- %val = load float* @var32
+ %val = load float, float* @var32
%sin = call float @__sinpif(float %val) #0
%cos = call float @__cospif(float %val) #0
%res = fadd float %sin, %cos
ret float %res
-; CHECK-FLOAT-IN-VEC: [[VAL:%[a-z0-9]+]] = load float* @var32
+; CHECK-FLOAT-IN-VEC: [[VAL:%[a-z0-9]+]] = load float, float* @var32
; CHECK-FLOAT-IN-VEC: [[SINCOS:%[a-z0-9]+]] = call <2 x float> @__sincospif_stret(float [[VAL]])
; CHECK-FLOAT-IN-VEC: extractelement <2 x float> [[SINCOS]], i32 0
; CHECK-FLOAT-IN-VEC: extractelement <2 x float> [[SINCOS]], i32 1
-; CHECK: [[VAL:%[a-z0-9]+]] = load float* @var32
+; CHECK: [[VAL:%[a-z0-9]+]] = load float, float* @var32
; CHECK: [[SINCOS:%[a-z0-9]+]] = call { float, float } @__sincospif_stret(float [[VAL]])
; CHECK: extractvalue { float, float } [[SINCOS]], 0
; CHECK: extractvalue { float, float } [[SINCOS]], 1
@@ -55,17 +55,17 @@ define float @test_constant_f32() {
}
define double @test_instbased_f64() {
- %val = load double* @var64
+ %val = load double, double* @var64
%sin = call double @__sinpi(double %val) #0
%cos = call double @__cospi(double %val) #0
%res = fadd double %sin, %cos
ret double %res
-; CHECK-FLOAT-IN-VEC: [[VAL:%[a-z0-9]+]] = load double* @var64
+; CHECK-FLOAT-IN-VEC: [[VAL:%[a-z0-9]+]] = load double, double* @var64
; CHECK-FLOAT-IN-VEC: [[SINCOS:%[a-z0-9]+]] = call { double, double } @__sincospi_stret(double [[VAL]])
; CHECK-FLOAT-IN-VEC: extractvalue { double, double } [[SINCOS]], 0
; CHECK-FLOAT-IN-VEC: extractvalue { double, double } [[SINCOS]], 1
-; CHECK: [[VAL:%[a-z0-9]+]] = load double* @var64
+; CHECK: [[VAL:%[a-z0-9]+]] = load double, double* @var64
; CHECK: [[SINCOS:%[a-z0-9]+]] = call { double, double } @__sincospi_stret(double [[VAL]])
; CHECK: extractvalue { double, double } [[SINCOS]], 0
; CHECK: extractvalue { double, double } [[SINCOS]], 1
diff --git a/llvm/test/Transforms/InstCombine/sqrt.ll b/llvm/test/Transforms/InstCombine/sqrt.ll
index 0c4019d06bf..24c2e00a08d 100644
--- a/llvm/test/Transforms/InstCombine/sqrt.ll
+++ b/llvm/test/Transforms/InstCombine/sqrt.ll
@@ -39,7 +39,7 @@ entry:
; CHECK-NOT: sqrtf(
; CHECK: fptrunc
%arrayidx13 = getelementptr inbounds float, float* %v, i64 2
- %tmp14 = load float* %arrayidx13
+ %tmp14 = load float, float* %arrayidx13
%mul18 = fmul float %tmp14, %tmp14
%add19 = fadd float undef, %mul18
%conv = fpext float %add19 to double
diff --git a/llvm/test/Transforms/InstCombine/store.ll b/llvm/test/Transforms/InstCombine/store.ll
index 1d15d8930f1..c087a733185 100644
--- a/llvm/test/Transforms/InstCombine/store.ll
+++ b/llvm/test/Transforms/InstCombine/store.ll
@@ -12,7 +12,7 @@ define void @test1(i32* %P) {
}
define void @test2(i32* %P) {
- %X = load i32* %P ; <i32> [#uses=1]
+ %X = load i32, i32* %P ; <i32> [#uses=1]
%Y = add i32 %X, 0 ; <i32> [#uses=1]
store i32 %Y, i32* %P
ret void
@@ -36,7 +36,7 @@ Cond2:
br label %Cont
Cont:
- %V = load i32* %A
+ %V = load i32, i32* %A
ret i32 %V
; CHECK-LABEL: @test3(
; CHECK-NOT: alloca
@@ -56,7 +56,7 @@ Cond:
br label %Cont
Cont:
- %V = load i32* %A
+ %V = load i32, i32* %A
ret i32 %V
; CHECK-LABEL: @test4(
; CHECK-NOT: alloca
@@ -92,7 +92,7 @@ entry:
for.cond: ; preds = %for.body, %entry
%storemerge = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i32* %gi, align 4, !tbaa !0
+ %0 = load i32, i32* %gi, align 4, !tbaa !0
%cmp = icmp slt i32 %0, %n
br i1 %cmp, label %for.body, label %for.end
@@ -100,7 +100,7 @@ for.body: ; preds = %for.cond
%idxprom = sext i32 %0 to i64
%arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
store float 0.000000e+00, float* %arrayidx, align 4, !tbaa !3
- %1 = load i32* %gi, align 4, !tbaa !0
+ %1 = load i32, i32* %gi, align 4, !tbaa !0
%inc = add nsw i32 %1, 1
store i32 %inc, i32* %gi, align 4, !tbaa !0
br label %for.cond
diff --git a/llvm/test/Transforms/InstCombine/strcmp-1.ll b/llvm/test/Transforms/InstCombine/strcmp-1.ll
index 4d80f9b7e7c..0de26b12dd2 100644
--- a/llvm/test/Transforms/InstCombine/strcmp-1.ll
+++ b/llvm/test/Transforms/InstCombine/strcmp-1.ll
@@ -13,7 +13,7 @@ declare i32 @strcmp(i8*, i8*)
; strcmp("", x) -> -*x
define i32 @test1(i8* %str2) {
; CHECK-LABEL: @test1(
-; CHECK: %strcmpload = load i8* %str
+; CHECK: %strcmpload = load i8, i8* %str
; CHECK: %1 = zext i8 %strcmpload to i32
; CHECK: %2 = sub nsw i32 0, %1
; CHECK: ret i32 %2
@@ -27,7 +27,7 @@ define i32 @test1(i8* %str2) {
; strcmp(x, "") -> *x
define i32 @test2(i8* %str1) {
; CHECK-LABEL: @test2(
-; CHECK: %strcmpload = load i8* %str
+; CHECK: %strcmpload = load i8, i8* %str
; CHECK: %1 = zext i8 %strcmpload to i32
; CHECK: ret i32 %1
diff --git a/llvm/test/Transforms/InstCombine/strncmp-1.ll b/llvm/test/Transforms/InstCombine/strncmp-1.ll
index 78bcc80a03e..a1121821fdf 100644
--- a/llvm/test/Transforms/InstCombine/strncmp-1.ll
+++ b/llvm/test/Transforms/InstCombine/strncmp-1.ll
@@ -13,7 +13,7 @@ declare i32 @strncmp(i8*, i8*, i32)
; strncmp("", x, n) -> -*x
define i32 @test1(i8* %str2) {
; CHECK-LABEL: @test1(
-; CHECK: %strcmpload = load i8* %str
+; CHECK: %strcmpload = load i8, i8* %str
; CHECK: %1 = zext i8 %strcmpload to i32
; CHECK: %2 = sub nsw i32 0, %1
; CHECK: ret i32 %2
@@ -26,7 +26,7 @@ define i32 @test1(i8* %str2) {
; strncmp(x, "", n) -> *x
define i32 @test2(i8* %str1) {
; CHECK-LABEL: @test2(
-; CHECK: %strcmpload = load i8* %str1
+; CHECK: %strcmpload = load i8, i8* %str1
; CHECK: %1 = zext i8 %strcmpload to i32
; CHECK: ret i32 %1
@@ -69,9 +69,9 @@ define i32 @test5() {
; strncmp(x,y,1) -> memcmp(x,y,1)
define i32 @test6(i8* %str1, i8* %str2) {
; CHECK-LABEL: @test6(
-; CHECK: [[LOAD1:%[a-z]+]] = load i8* %str1, align 1
+; CHECK: [[LOAD1:%[a-z]+]] = load i8, i8* %str1, align 1
; CHECK: [[ZEXT1:%[a-z]+]] = zext i8 [[LOAD1]] to i32
-; CHECK: [[LOAD2:%[a-z]+]] = load i8* %str2, align 1
+; CHECK: [[LOAD2:%[a-z]+]] = load i8, i8* %str2, align 1
; CHECK: [[ZEXT2:%[a-z]+]] = zext i8 [[LOAD2]] to i32
; CHECK: [[RET:%[a-z]+]] = sub nsw i32 [[ZEXT1]], [[ZEXT2]]
; CHECK: ret i32 [[RET]]
diff --git a/llvm/test/Transforms/InstCombine/struct-assign-tbaa.ll b/llvm/test/Transforms/InstCombine/struct-assign-tbaa.ll
index 8d42d9e27f3..c75a839f3fb 100644
--- a/llvm/test/Transforms/InstCombine/struct-assign-tbaa.ll
+++ b/llvm/test/Transforms/InstCombine/struct-assign-tbaa.ll
@@ -10,7 +10,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32,
%struct.test1 = type { float }
; CHECK: @test
-; CHECK: %[[LOAD:.*]] = load i32* %{{.*}}, align 4, !tbaa !0
+; CHECK: %[[LOAD:.*]] = load i32, i32* %{{.*}}, align 4, !tbaa !0
; CHECK: store i32 %[[LOAD:.*]], i32* %{{.*}}, align 4, !tbaa !0
; CHECK: ret
define void @test1(%struct.test1* nocapture %a, %struct.test1* nocapture %b) {
@@ -31,7 +31,7 @@ define i32 (i8*, i32*, double*)*** @test2() {
%tmp1 = bitcast %struct.test2* %tmp to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* undef, i64 8, i32 8, i1 false), !tbaa.struct !4
%tmp2 = getelementptr %struct.test2, %struct.test2* %tmp, i32 0, i32 0
- %tmp3 = load i32 (i8*, i32*, double*)*** %tmp2
+ %tmp3 = load i32 (i8*, i32*, double*)**, i32 (i8*, i32*, double*)*** %tmp2
ret i32 (i8*, i32*, double*)*** %tmp2
}
diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
index 00a029aeab7..8a8b834fa9e 100644
--- a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -110,7 +110,7 @@ define void @vac(<4 x float>* nocapture %a) nounwind {
; CHECK-NOT: load
; CHECK: ret
entry:
- %tmp1 = load <4 x float>* %a ; <<4 x float>> [#uses=1]
+ %tmp1 = load <4 x float>, <4 x float>* %a ; <<4 x float>> [#uses=1]
%vecins = insertelement <4 x float> %tmp1, float 0.000000e+00, i32 0 ; <<4 x float>> [#uses=1]
%vecins4 = insertelement <4 x float> %vecins, float 0.000000e+00, i32 1; <<4 x float>> [#uses=1]
%vecins6 = insertelement <4 x float> %vecins4, float 0.000000e+00, i32 2; <<4 x float>> [#uses=1]
@@ -141,8 +141,8 @@ declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>)
; <rdar://problem/6945110>
define <4 x i32> @kernel3_vertical(<4 x i16> * %src, <8 x i16> * %foo) nounwind {
entry:
- %tmp = load <4 x i16>* %src
- %tmp1 = load <8 x i16>* %foo
+ %tmp = load <4 x i16>, <4 x i16>* %src
+ %tmp1 = load <8 x i16>, <8 x i16>* %foo
; CHECK: %tmp2 = shufflevector
%tmp2 = shufflevector <4 x i16> %tmp, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
; pmovzxwd ignores the upper 64-bits of its input; -instcombine should remove this shuffle:
diff --git a/llvm/test/Transforms/InstCombine/vec_extract_var_elt.ll b/llvm/test/Transforms/InstCombine/vec_extract_var_elt.ll
index f6f9e0134a1..5c0610ff48b 100644
--- a/llvm/test/Transforms/InstCombine/vec_extract_var_elt.ll
+++ b/llvm/test/Transforms/InstCombine/vec_extract_var_elt.ll
@@ -3,7 +3,7 @@
define void @test (float %b, <8 x float> * %p) {
; CHECK: extractelement
; CHECK: fptosi
- %1 = load <8 x float> * %p
+ %1 = load <8 x float> , <8 x float> * %p
%2 = bitcast <8 x float> %1 to <8 x i32>
%3 = bitcast <8 x i32> %2 to <8 x float>
%a = fptosi <8 x float> %3 to <8 x i32>
diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle.ll b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
index eb4e9d6f8c3..164e315c46a 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
@@ -190,11 +190,11 @@ define void @test14(i16 %conv10) {
%tmp = alloca <4 x i16>, align 8
%vecinit6 = insertelement <4 x i16> undef, i16 23, i32 3
store <4 x i16> %vecinit6, <4 x i16>* undef
- %tmp1 = load <4 x i16>* undef
+ %tmp1 = load <4 x i16>, <4 x i16>* undef
%vecinit11 = insertelement <4 x i16> undef, i16 %conv10, i32 3
%div = udiv <4 x i16> %tmp1, %vecinit11
store <4 x i16> %div, <4 x i16>* %tmp
- %tmp4 = load <4 x i16>* %tmp
+ %tmp4 = load <4 x i16>, <4 x i16>* %tmp
%tmp5 = shufflevector <4 x i16> %tmp4, <4 x i16> undef, <2 x i32> <i32 2, i32 0>
%cmp = icmp ule <2 x i16> %tmp5, undef
%sext = sext <2 x i1> %cmp to <2 x i16>
diff --git a/llvm/test/Transforms/InstCombine/volatile_store.ll b/llvm/test/Transforms/InstCombine/volatile_store.ll
index 7cab199a58f..7377b6815e2 100644
--- a/llvm/test/Transforms/InstCombine/volatile_store.ll
+++ b/llvm/test/Transforms/InstCombine/volatile_store.ll
@@ -5,7 +5,7 @@
define void @self_assign_1() {
entry:
- %tmp = load volatile i32* @x ; <i32> [#uses=1]
+ %tmp = load volatile i32, i32* @x ; <i32> [#uses=1]
store volatile i32 %tmp, i32* @x
br label %return
diff --git a/llvm/test/Transforms/InstCombine/vsx-unaligned.ll b/llvm/test/Transforms/InstCombine/vsx-unaligned.ll
index 26e04268f44..ad264fb15b3 100644
--- a/llvm/test/Transforms/InstCombine/vsx-unaligned.ll
+++ b/llvm/test/Transforms/InstCombine/vsx-unaligned.ll
@@ -14,28 +14,28 @@ entry:
%t1 = alloca <4 x float>*, align 8
%t2 = alloca <2 x double>*, align 8
store <4 x float>* @vf, <4 x float>** %t1, align 8
- %0 = load <4 x float>** %t1, align 8
+ %0 = load <4 x float>*, <4 x float>** %t1, align 8
%1 = bitcast <4 x float>* %0 to i8*
%2 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %1)
store <4 x float>* @res_vf, <4 x float>** %t1, align 8
- %3 = load <4 x float>** %t1, align 8
+ %3 = load <4 x float>*, <4 x float>** %t1, align 8
%4 = bitcast <4 x float>* %3 to i8*
call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %2, i8* %4)
store <2 x double>* @vd, <2 x double>** %t2, align 8
- %5 = load <2 x double>** %t2, align 8
+ %5 = load <2 x double>*, <2 x double>** %t2, align 8
%6 = bitcast <2 x double>* %5 to i8*
%7 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %6)
store <2 x double>* @res_vd, <2 x double>** %t2, align 8
- %8 = load <2 x double>** %t2, align 8
+ %8 = load <2 x double>*, <2 x double>** %t2, align 8
%9 = bitcast <2 x double>* %8 to i8*
call void @llvm.ppc.vsx.stxvd2x(<2 x double> %7, i8* %9)
ret void
}
; CHECK-LABEL: @test1
-; CHECK: %0 = load <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 1
+; CHECK: %0 = load <4 x i32>, <4 x i32>* bitcast (<4 x float>* @vf to <4 x i32>*), align 1
; CHECK: store <4 x i32> %0, <4 x i32>* bitcast (<4 x float>* @res_vf to <4 x i32>*), align 1
-; CHECK: %1 = load <2 x double>* @vd, align 1
+; CHECK: %1 = load <2 x double>, <2 x double>* @vd, align 1
; CHECK: store <2 x double> %1, <2 x double>* @res_vd, align 1
declare <4 x i32> @llvm.ppc.vsx.lxvw4x(i8*)
diff --git a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
index cadf664c70b..3a27f9a124c 100644
--- a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
@@ -11,18 +11,18 @@ entry:
%tmp5 = and i32 %blk_i, 1 ; <i32> [#uses=1]
%tmp6 = or i32 %tmp3, %tmp5 ; <i32> [#uses=1]
%tmp8 = getelementptr %struct.FooBar, %struct.FooBar* %up, i32 0, i32 7 ; <i16*> [#uses=1]
- %tmp9 = load i16* %tmp8, align 1 ; <i16> [#uses=1]
+ %tmp9 = load i16, i16* %tmp8, align 1 ; <i16> [#uses=1]
%tmp910 = zext i16 %tmp9 to i32 ; <i32> [#uses=1]
%tmp12 = getelementptr [4 x i8], [4 x i8]* @some_idx, i32 0, i32 %tmp6 ; <i8*> [#uses=1]
- %tmp13 = load i8* %tmp12, align 1 ; <i8> [#uses=1]
+ %tmp13 = load i8, i8* %tmp12, align 1 ; <i8> [#uses=1]
%tmp1314 = zext i8 %tmp13 to i32 ; <i32> [#uses=1]
%tmp151 = lshr i32 %tmp910, %tmp1314 ; <i32> [#uses=1]
%tmp1516 = trunc i32 %tmp151 to i8 ; <i8> [#uses=1]
%tmp18 = getelementptr %struct.FooBar, %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp19 = load i8* %tmp18, align 1 ; <i8> [#uses=1]
+ %tmp19 = load i8, i8* %tmp18, align 1 ; <i8> [#uses=1]
%tmp22 = and i8 %tmp1516, %tmp19 ; <i8> [#uses=1]
%tmp24 = getelementptr %struct.FooBar, %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp25 = load i8* %tmp24, align 1 ; <i8> [#uses=1]
+ %tmp25 = load i8, i8* %tmp24, align 1 ; <i8> [#uses=1]
%tmp26.mask = and i8 %tmp25, 1 ; <i8> [#uses=1]
%toBool = icmp eq i8 %tmp26.mask, 0 ; <i1> [#uses=1]
%toBool.not = xor i1 %toBool, true ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/InstMerge/ld_hoist1.ll b/llvm/test/Transforms/InstMerge/ld_hoist1.ll
index 571f535a217..74c8900b8ab 100644
--- a/llvm/test/Transforms/InstMerge/ld_hoist1.ll
+++ b/llvm/test/Transforms/InstMerge/ld_hoist1.ll
@@ -16,12 +16,12 @@ for.body.lr.ph: ; preds = %entry
; CHECK-LABEL: for.body
; CHECK: load
; CHECK: %2 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
-; CHECK: %3 = load i32* %2, align 4
+; CHECK: %3 = load i32, i32* %2, align 4
for.body: ; preds = %for.body.lr.ph, %for.inc
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.inc ]
%arrayidx = getelementptr inbounds i32, i32* %trigger, i64 %indvars.iv
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %1, 0
br i1 %cmp1, label %if.then, label %if.else
@@ -29,7 +29,7 @@ for.body: ; preds = %for.body.lr.ph, %fo
if.then: ; preds = %for.body
; This load should be hoisted
%arrayidx3 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%conv = sitofp i32 %2 to float
%add = fadd float %conv, 5.000000e-01
%arrayidx5 = getelementptr inbounds float, float* %out, i64 %indvars.iv
@@ -38,12 +38,12 @@ if.then: ; preds = %for.body
if.else: ; preds = %for.body
%arrayidx7 = getelementptr inbounds float, float* %out, i64 %indvars.iv
- %3 = load float* %arrayidx7, align 4
+ %3 = load float, float* %arrayidx7, align 4
%div = fdiv float %3, 3.000000e+00
store float %div, float* %arrayidx7, align 4
; This load should be hoisted in spite of store
%arrayidx9 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
- %4 = load i32* %arrayidx9, align 4
+ %4 = load i32, i32* %arrayidx9, align 4
%conv10 = sitofp i32 %4 to float
%add13 = fadd float %div, %conv10
store float %add13, float* %arrayidx7, align 4
diff --git a/llvm/test/Transforms/InstMerge/ld_hoist_st_sink.ll b/llvm/test/Transforms/InstMerge/ld_hoist_st_sink.ll
index 83970275b19..1d3f941882e 100644
--- a/llvm/test/Transforms/InstMerge/ld_hoist_st_sink.ll
+++ b/llvm/test/Transforms/InstMerge/ld_hoist_st_sink.ll
@@ -9,7 +9,7 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
define i64 @foo(%struct.node* nocapture readonly %r) nounwind {
entry:
%node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
- %node.017 = load %struct.node** %node.0.in16, align 8
+ %node.017 = load %struct.node*, %struct.node** %node.0.in16, align 8
%tobool18 = icmp eq %struct.node* %node.017, null
br i1 %tobool18, label %while.end, label %while.body.preheader
@@ -22,23 +22,23 @@ while.body: ; preds = %while.body.preheade
%node.020 = phi %struct.node* [ %node.0, %if.end ], [ %node.017, %while.body.preheader ]
%sum.019 = phi i64 [ %inc, %if.end ], [ 0, %while.body.preheader ]
%orientation = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 4
- %0 = load i64* %orientation, align 8
+ %0 = load i64, i64* %orientation, align 8
%cmp = icmp eq i64 %0, 1
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %while.body
%a = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 5
; CHECK-NOT: load %struct.arc
- %1 = load %struct.arc** %a, align 8
+ %1 = load %struct.arc*, %struct.arc** %a, align 8
%cost = getelementptr inbounds %struct.arc, %struct.arc* %1, i64 0, i32 0
-; CHECK-NOT: load i64*
- %2 = load i64* %cost, align 8
+; CHECK-NOT: load i64, i64*
+ %2 = load i64, i64* %cost, align 8
%pred = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 1
-; CHECK-NOT: load %struct.node**
- %3 = load %struct.node** %pred, align 8
+; CHECK-NOT: load %struct.node*, %struct.node**
+ %3 = load %struct.node*, %struct.node** %pred, align 8
%p = getelementptr inbounds %struct.node, %struct.node* %3, i64 0, i32 6
-; CHECK-NOT: load i64*
- %4 = load i64* %p, align 8
+; CHECK-NOT: load i64, i64*
+ %4 = load i64, i64* %p, align 8
%add = add nsw i64 %4, %2
%p1 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 6
; CHECK-NOT: store i64
@@ -48,17 +48,17 @@ if.then: ; preds = %while.body
; CHECK: if.else
if.else: ; preds = %while.body
%pred2 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 1
-; CHECK-NOT: load %struct.node**
- %5 = load %struct.node** %pred2, align 8
+; CHECK-NOT: load %struct.node*, %struct.node**
+ %5 = load %struct.node*, %struct.node** %pred2, align 8
%p3 = getelementptr inbounds %struct.node, %struct.node* %5, i64 0, i32 6
-; CHECK-NOT: load i64*
- %6 = load i64* %p3, align 8
+; CHECK-NOT: load i64, i64*
+ %6 = load i64, i64* %p3, align 8
%a4 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 5
-; CHECK-NOT: load %struct.arc**
- %7 = load %struct.arc** %a4, align 8
+; CHECK-NOT: load %struct.arc*, %struct.arc**
+ %7 = load %struct.arc*, %struct.arc** %a4, align 8
%cost5 = getelementptr inbounds %struct.arc, %struct.arc* %7, i64 0, i32 0
-; CHECK-NOT: load i64*
- %8 = load i64* %cost5, align 8
+; CHECK-NOT: load i64, i64*
+ %8 = load i64, i64* %cost5, align 8
%sub = sub nsw i64 %6, %8
%p6 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 6
; CHECK-NOT: store i64
@@ -70,7 +70,7 @@ if.end: ; preds = %if.else, %if.then
; CHECK: store
%inc = add nsw i64 %sum.019, 1
%node.0.in = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 2
- %node.0 = load %struct.node** %node.0.in, align 8
+ %node.0 = load %struct.node*, %struct.node** %node.0.in, align 8
%tobool = icmp eq %struct.node* %node.0, null
br i1 %tobool, label %while.end.loopexit, label %while.body
diff --git a/llvm/test/Transforms/InstMerge/st_sink_barrier_call.ll b/llvm/test/Transforms/InstMerge/st_sink_barrier_call.ll
index d0be479a423..cdcc34624d5 100644
--- a/llvm/test/Transforms/InstMerge/st_sink_barrier_call.ll
+++ b/llvm/test/Transforms/InstMerge/st_sink_barrier_call.ll
@@ -11,16 +11,16 @@ declare i32 @foo(i32 %x)
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
%node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
- %node.017 = load %struct.node** %node.0.in16, align 8
+ %node.017 = load %struct.node*, %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
- %0 = load i32* %index.addr, align 4
+ %0 = load i32, i32* %index.addr, align 4
%cmp = icmp slt i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %entry
- %1 = load i32* %index.addr, align 4
+ %1 = load i32, i32* %index.addr, align 4
%p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: store i32
store i32 %1, i32* %p1, align 4
@@ -28,7 +28,7 @@ if.then: ; preds = %entry
; CHECK: if.else
if.else: ; preds = %entry
- %2 = load i32* %index.addr, align 4
+ %2 = load i32, i32* %index.addr, align 4
%add = add nsw i32 %2, 1
%p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: store i32
diff --git a/llvm/test/Transforms/InstMerge/st_sink_bugfix_22613.ll b/llvm/test/Transforms/InstMerge/st_sink_bugfix_22613.ll
index 1f548773811..1ec95f15d13 100644
--- a/llvm/test/Transforms/InstMerge/st_sink_bugfix_22613.ll
+++ b/llvm/test/Transforms/InstMerge/st_sink_bugfix_22613.ll
@@ -26,14 +26,14 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc8, %entry
- %0 = load i32* @d, align 4
+ %0 = load i32, i32* @d, align 4
%cmp = icmp slt i32 %0, 2
br i1 %cmp, label %for.body, label %for.end10
for.body: ; preds = %for.cond
- %1 = load i32* @d, align 4
+ %1 = load i32, i32* @d, align 4
%idxprom = sext i32 %1 to i64
- %2 = load i32* @b, align 4
+ %2 = load i32, i32* @b, align 4
%idxprom1 = sext i32 %2 to i64
%arrayidx = getelementptr inbounds [1 x [3 x i8]], [1 x [3 x i8]]* @f, i32 0, i64 %idxprom1
%arrayidx2 = getelementptr inbounds [3 x i8], [3 x i8]* %arrayidx, i32 0, i64 %idxprom
@@ -42,30 +42,30 @@ for.body: ; preds = %for.cond
br label %for.cond3
for.cond3: ; preds = %for.inc, %for.body
- %3 = load i32* @e, align 4
+ %3 = load i32, i32* @e, align 4
%cmp4 = icmp slt i32 %3, 3
br i1 %cmp4, label %for.body5, label %for.end
for.body5: ; preds = %for.cond3
- %4 = load i32* @c, align 4
+ %4 = load i32, i32* @c, align 4
%tobool = icmp ne i32 %4, 0
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %for.body5
- %5 = load i32* @a, align 4
+ %5 = load i32, i32* @a, align 4
%dec = add nsw i32 %5, -1
store i32 %dec, i32* @a, align 4
br label %if.end
if.end: ; preds = %if.then, %for.body5
- %6 = load i32* @e, align 4
+ %6 = load i32, i32* @e, align 4
%idxprom6 = sext i32 %6 to i64
%arrayidx7 = getelementptr inbounds [3 x i8], [3 x i8]* getelementptr inbounds ([1 x [3 x i8]]* @f, i32 0, i64 0), i32 0, i64 %idxprom6
store i8 1, i8* %arrayidx7, align 1
br label %for.inc
for.inc: ; preds = %if.end
- %7 = load i32* @e, align 4
+ %7 = load i32, i32* @e, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* @e, align 4
br label %for.cond3
@@ -74,7 +74,7 @@ for.end: ; preds = %for.cond3
br label %for.inc8
for.inc8: ; preds = %for.end
- %8 = load i32* @d, align 4
+ %8 = load i32, i32* @d, align 4
%inc9 = add nsw i32 %8, 1
store i32 %inc9, i32* @d, align 4
br label %for.cond
@@ -89,7 +89,7 @@ entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
call void @fn1()
- %0 = load i8* getelementptr inbounds ([1 x [3 x i8]]* @f, i32 0, i64 0, i64 1), align 1
+ %0 = load i8, i8* getelementptr inbounds ([1 x [3 x i8]]* @f, i32 0, i64 0, i64 1), align 1
%conv = sext i8 %0 to i32
%cmp = icmp ne i32 %conv, 1
br i1 %cmp, label %if.then, label %if.end
diff --git a/llvm/test/Transforms/InstMerge/st_sink_no_barrier_call.ll b/llvm/test/Transforms/InstMerge/st_sink_no_barrier_call.ll
index f20bad3e844..0ad90f8581e 100644
--- a/llvm/test/Transforms/InstMerge/st_sink_no_barrier_call.ll
+++ b/llvm/test/Transforms/InstMerge/st_sink_no_barrier_call.ll
@@ -11,16 +11,16 @@ declare i32 @foo(i32 %x) #0
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
%node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
- %node.017 = load %struct.node** %node.0.in16, align 8
+ %node.017 = load %struct.node*, %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
- %0 = load i32* %index.addr, align 4
+ %0 = load i32, i32* %index.addr, align 4
%cmp = icmp slt i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %entry
- %1 = load i32* %index.addr, align 4
+ %1 = load i32, i32* %index.addr, align 4
%p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p1, align 4
@@ -28,7 +28,7 @@ if.then: ; preds = %entry
; CHECK: if.else
if.else: ; preds = %entry
- %2 = load i32* %index.addr, align 4
+ %2 = load i32, i32* %index.addr, align 4
%add = add nsw i32 %2, 1
%p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
diff --git a/llvm/test/Transforms/InstMerge/st_sink_no_barrier_load.ll b/llvm/test/Transforms/InstMerge/st_sink_no_barrier_load.ll
index 1b9aa96d947..b7236e4c6a0 100644
--- a/llvm/test/Transforms/InstMerge/st_sink_no_barrier_load.ll
+++ b/llvm/test/Transforms/InstMerge/st_sink_no_barrier_load.ll
@@ -9,27 +9,27 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
%node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
- %node.017 = load %struct.node** %node.0.in16, align 8
+ %node.017 = load %struct.node*, %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
- %0 = load i32* %index.addr, align 4
+ %0 = load i32, i32* %index.addr, align 4
%cmp = icmp slt i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %entry
- %1 = load i32* %index.addr, align 4
+ %1 = load i32, i32* %index.addr, align 4
%p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p1, align 4
%p2 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 5, i32 6
- ; CHECK: load i32*
- %not_barrier = load i32 * %p2, align 4
+ ; CHECK: load i32, i32*
+ %not_barrier = load i32 , i32 * %p2, align 4
br label %if.end
; CHECK: if.else
if.else: ; preds = %entry
- %2 = load i32* %index.addr, align 4
+ %2 = load i32, i32* %index.addr, align 4
%add = add nsw i32 %2, 1
%p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
diff --git a/llvm/test/Transforms/InstMerge/st_sink_no_barrier_store.ll b/llvm/test/Transforms/InstMerge/st_sink_no_barrier_store.ll
index 791ccc49e55..e13f28aa5e1 100644
--- a/llvm/test/Transforms/InstMerge/st_sink_no_barrier_store.ll
+++ b/llvm/test/Transforms/InstMerge/st_sink_no_barrier_store.ll
@@ -9,16 +9,16 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
%node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
- %node.017 = load %struct.node** %node.0.in16, align 8
+ %node.017 = load %struct.node*, %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
- %0 = load i32* %index.addr, align 4
+ %0 = load i32, i32* %index.addr, align 4
%cmp = icmp slt i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %entry
- %1 = load i32* %index.addr, align 4
+ %1 = load i32, i32* %index.addr, align 4
%p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p1, align 4
@@ -26,7 +26,7 @@ if.then: ; preds = %entry
; CHECK: if.else
if.else: ; preds = %entry
- %2 = load i32* %index.addr, align 4
+ %2 = load i32, i32* %index.addr, align 4
%add = add nsw i32 %2, 1
%p2 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
store i32 %add, i32* %p2, align 4
diff --git a/llvm/test/Transforms/InstMerge/st_sink_two_stores.ll b/llvm/test/Transforms/InstMerge/st_sink_two_stores.ll
index b8e2f90261c..5b5582f438c 100644
--- a/llvm/test/Transforms/InstMerge/st_sink_two_stores.ll
+++ b/llvm/test/Transforms/InstMerge/st_sink_two_stores.ll
@@ -9,16 +9,16 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
%node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
- %node.017 = load %struct.node** %node.0.in16, align 8
+ %node.017 = load %struct.node*, %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
- %0 = load i32* %index.addr, align 4
+ %0 = load i32, i32* %index.addr, align 4
%cmp = icmp slt i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %entry
- %1 = load i32* %index.addr, align 4
+ %1 = load i32, i32* %index.addr, align 4
%p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p1, align 4
@@ -29,7 +29,7 @@ if.then: ; preds = %entry
; CHECK: if.else
if.else: ; preds = %entry
- %2 = load i32* %index.addr, align 4
+ %2 = load i32, i32* %index.addr, align 4
%add = add nsw i32 %2, 1
%p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
diff --git a/llvm/test/Transforms/InstMerge/st_sink_with_barrier.ll b/llvm/test/Transforms/InstMerge/st_sink_with_barrier.ll
index 4b302eca6cb..a05ae88e384 100644
--- a/llvm/test/Transforms/InstMerge/st_sink_with_barrier.ll
+++ b/llvm/test/Transforms/InstMerge/st_sink_with_barrier.ll
@@ -8,27 +8,27 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
%node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
- %node.017 = load %struct.node** %node.0.in16, align 8
+ %node.017 = load %struct.node*, %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
- %0 = load i32* %index.addr, align 4
+ %0 = load i32, i32* %index.addr, align 4
%cmp = icmp slt i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %entry
- %1 = load i32* %index.addr, align 4
+ %1 = load i32, i32* %index.addr, align 4
%p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: store i32
store i32 %1, i32* %p1, align 4
%p2 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
- ; CHECK: load i32*
- %barrier = load i32 * %p2, align 4
+ ; CHECK: load i32, i32*
+ %barrier = load i32 , i32 * %p2, align 4
br label %if.end
; CHECK: if.else
if.else: ; preds = %entry
- %2 = load i32* %index.addr, align 4
+ %2 = load i32, i32* %index.addr, align 4
%add = add nsw i32 %2, 1
%p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: store i32
diff --git a/llvm/test/Transforms/InstSimplify/call-callconv.ll b/llvm/test/Transforms/InstSimplify/call-callconv.ll
index e475be781db..77015116d88 100644
--- a/llvm/test/Transforms/InstSimplify/call-callconv.ll
+++ b/llvm/test/Transforms/InstSimplify/call-callconv.ll
@@ -42,7 +42,7 @@ define arm_aapcscc zeroext i1 @_strlen2(i8* %str) {
%cmp = icmp ne i32 %call, 0
ret i1 %cmp
-; CHECK: %[[STRLENFIRST:.*]] = load i8* %str
+; CHECK: %[[STRLENFIRST:.*]] = load i8, i8* %str
; CHECK: %[[CMP:.*]] = icmp ne i8 %[[STRLENFIRST]], 0
; CHECK: ret i1 %[[CMP]]
}
diff --git a/llvm/test/Transforms/InstSimplify/compare.ll b/llvm/test/Transforms/InstSimplify/compare.ll
index cbc0125ced0..376f6ea730b 100644
--- a/llvm/test/Transforms/InstSimplify/compare.ll
+++ b/llvm/test/Transforms/InstSimplify/compare.ll
@@ -950,7 +950,7 @@ define i1 @returns_nonnull_as_deref() {
}
define i1 @nonnull_load(i32** %addr) {
- %ptr = load i32** %addr, !nonnull !{}
+ %ptr = load i32*, i32** %addr, !nonnull !{}
%cmp = icmp eq i32* %ptr, null
ret i1 %cmp
; CHECK-LABEL: @nonnull_load
@@ -958,14 +958,14 @@ define i1 @nonnull_load(i32** %addr) {
}
define i1 @nonnull_load_as_outer(i32* addrspace(1)* %addr) {
- %ptr = load i32* addrspace(1)* %addr, !nonnull !{}
+ %ptr = load i32*, i32* addrspace(1)* %addr, !nonnull !{}
%cmp = icmp eq i32* %ptr, null
ret i1 %cmp
; CHECK-LABEL: @nonnull_load_as_outer
; CHECK: ret i1 false
}
define i1 @nonnull_load_as_inner(i32 addrspace(1)** %addr) {
- %ptr = load i32 addrspace(1)** %addr, !nonnull !{}
+ %ptr = load i32 addrspace(1)*, i32 addrspace(1)** %addr, !nonnull !{}
%cmp = icmp eq i32 addrspace(1)* %ptr, null
ret i1 %cmp
; CHECK-LABEL: @nonnull_load_as_inner
diff --git a/llvm/test/Transforms/InstSimplify/load.ll b/llvm/test/Transforms/InstSimplify/load.ll
index 92953cd0ebf..ab87d4b9c53 100644
--- a/llvm/test/Transforms/InstSimplify/load.ll
+++ b/llvm/test/Transforms/InstSimplify/load.ll
@@ -6,14 +6,14 @@
define i32 @crash_on_zeroinit() {
; CHECK-LABEL: @crash_on_zeroinit
; CHECK: ret i32 0
- %load = load i32* bitcast ({}* @zeroinit to i32*)
+ %load = load i32, i32* bitcast ({}* @zeroinit to i32*)
ret i32 %load
}
define i32 @crash_on_undef() {
; CHECK-LABEL: @crash_on_undef
; CHECK: ret i32 undef
- %load = load i32* bitcast ({}* @undef to i32*)
+ %load = load i32, i32* bitcast ({}* @undef to i32*)
ret i32 %load
}
diff --git a/llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll b/llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll
index 607892ae74d..97c8343bfb9 100644
--- a/llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll
+++ b/llvm/test/Transforms/InstSimplify/vector_ptr_bitcast.ll
@@ -14,7 +14,7 @@ target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
align 8
define i64 @fn() {
- %x = load <2 x i8*>* bitcast (%mst* @a to <2 x i8*>*), align 8
+ %x = load <2 x i8*>, <2 x i8*>* bitcast (%mst* @a to <2 x i8*>*), align 8
%b = extractelement <2 x i8*> %x, i32 0
%c = ptrtoint i8* %b to i64
; CHECK-LABEL: @fn
@@ -23,7 +23,7 @@ define i64 @fn() {
}
define i64 @fn2() {
- %x = load <4 x i32*>* bitcast (%mst2* @b to <4 x i32*>*), align 8
+ %x = load <4 x i32*>, <4 x i32*>* bitcast (%mst2* @b to <4 x i32*>*), align 8
%b = extractelement <4 x i32*> %x, i32 0
%c = extractelement <4 x i32*> %x, i32 3
%d = ptrtoint i32* %b to i64
diff --git a/llvm/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll b/llvm/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
index 16523886a06..c50b6fc61c8 100644
--- a/llvm/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
+++ b/llvm/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
@@ -10,7 +10,7 @@
; CHECK: @C = internal alias i32* @A
define i32 @main() {
- %tmp = load i32* @C
+ %tmp = load i32, i32* @C
ret i32 %tmp
}
diff --git a/llvm/test/Transforms/JumpThreading/2010-08-26-and.ll b/llvm/test/Transforms/JumpThreading/2010-08-26-and.ll
index 694bc8f8421..b6e19cbdbc0 100644
--- a/llvm/test/Transforms/JumpThreading/2010-08-26-and.ll
+++ b/llvm/test/Transforms/JumpThreading/2010-08-26-and.ll
@@ -30,7 +30,7 @@ land.lhs.true.i: ; preds = %_ZN12StringSwitchI5
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp146, %_ZN12StringSwitchI5ColorE4CaseILj7EEERS1_RAT__KcRKS0_.exit134 ] ; <i64> [#uses=1]
%tmp146 = add i64 %indvar, 1 ; <i64> [#uses=3]
%arrayidx = getelementptr i8*, i8** %argv, i64 %tmp146 ; <i8**> [#uses=1]
- %tmp6 = load i8** %arrayidx, align 8 ; <i8*> [#uses=8]
+ %tmp6 = load i8*, i8** %arrayidx, align 8 ; <i8*> [#uses=8]
%call.i.i = call i64 @strlen(i8* %tmp6) nounwind ; <i64> [#uses=1]
%conv.i.i = trunc i64 %call.i.i to i32 ; <i32> [#uses=6]\
; CHECK: switch i32 %conv.i.i
diff --git a/llvm/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll b/llvm/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll
index 86a1321c354..ea9cc7f8fc6 100644
--- a/llvm/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll
+++ b/llvm/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll
@@ -15,7 +15,7 @@ for.cond1177:
br i1 %cmp1179, label %for.cond1177, label %land.rhs1320
land.rhs1320:
- %tmp1324 = load volatile i64* getelementptr inbounds (%0* @g_338, i64 0, i32 2), align 1
+ %tmp1324 = load volatile i64, i64* getelementptr inbounds (%0* @g_338, i64 0, i32 2), align 1
br label %if.end.i
if.end.i:
diff --git a/llvm/test/Transforms/JumpThreading/crash.ll b/llvm/test/Transforms/JumpThreading/crash.ll
index 2fe87464c11..900a7738d3b 100644
--- a/llvm/test/Transforms/JumpThreading/crash.ll
+++ b/llvm/test/Transforms/JumpThreading/crash.ll
@@ -356,7 +356,7 @@ B2:
br label %BrBlock
BrBlock:
- %L = load i32* %P
+ %L = load i32, i32* %P
%C = icmp eq i32 %L, 42
br i1 %C, label %T, label %F
diff --git a/llvm/test/Transforms/JumpThreading/landing-pad.ll b/llvm/test/Transforms/JumpThreading/landing-pad.ll
index 0237e74f571..b25f5fd9481 100644
--- a/llvm/test/Transforms/JumpThreading/landing-pad.ll
+++ b/llvm/test/Transforms/JumpThreading/landing-pad.ll
@@ -13,8 +13,8 @@
define void @_ZN15EditCommandImpl5applyEv(%class.E* %this) uwtable align 2 {
entry:
%0 = bitcast %class.E* %this to void (%class.E*)***
- %vtable = load void (%class.E*)*** %0, align 8
- %1 = load void (%class.E*)** %vtable, align 8
+ %vtable = load void (%class.E*)**, void (%class.E*)*** %0, align 8
+ %1 = load void (%class.E*)*, void (%class.E*)** %vtable, align 8
call void %1(%class.E* %this)
ret void
}
@@ -53,7 +53,7 @@ _ZN1DC1Ev.exit: ; preds = %entry
store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*]* @_ZTV1D, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
%_ref.i.i.i = getelementptr inbounds i8, i8* %call, i64 8
%1 = bitcast i8* %_ref.i.i.i to i32*
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%inc.i.i.i = add nsw i32 %2, 1
store i32 %inc.i.i.i, i32* %1, align 4
%3 = bitcast i8* %call to %class.D*
@@ -76,7 +76,7 @@ lpad: ; preds = %entry
lpad1: ; preds = %_ZN1DC1Ev.exit, %_ZN15EditCommandImpl5applyEv.exit
%5 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
cleanup
- %6 = load i32* %1, align 4
+ %6 = load i32, i32* %1, align 4
%tobool.i.i.i = icmp eq i32 %6, 0
br i1 %tobool.i.i.i, label %_ZN1BI1DED1Ev.exit, label %if.then.i.i.i
@@ -101,7 +101,7 @@ entry:
%m_ptr.i = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
store %class.D* %p1, %class.D** %m_ptr.i, align 8
%_ref.i.i = getelementptr inbounds %class.D, %class.D* %p1, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
- %0 = load i32* %_ref.i.i, align 4
+ %0 = load i32, i32* %_ref.i.i, align 4
%inc.i.i = add nsw i32 %0, 1
store i32 %inc.i.i, i32* %_ref.i.i, align 4
ret void
@@ -116,7 +116,7 @@ declare void @_ZdlPv()
define %class.D* @_ZN1BI1DEptEv(%class.B* nocapture readonly %this) nounwind readonly uwtable align 2 {
entry:
%m_ptr = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
- %0 = load %class.D** %m_ptr, align 8
+ %0 = load %class.D*, %class.D** %m_ptr, align 8
ret %class.D* %0
}
@@ -125,9 +125,9 @@ declare void @_ZN1D16deleteKeyPressedEv()
define void @_ZN1BI1DED1Ev(%class.B* nocapture readonly %this) unnamed_addr uwtable align 2 {
entry:
%m_ptr.i = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
- %0 = load %class.D** %m_ptr.i, align 8
+ %0 = load %class.D*, %class.D** %m_ptr.i, align 8
%_ref.i.i = getelementptr inbounds %class.D, %class.D* %0, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
- %1 = load i32* %_ref.i.i, align 4
+ %1 = load i32, i32* %_ref.i.i, align 4
%tobool.i.i = icmp eq i32 %1, 0
br i1 %tobool.i.i, label %_ZN1BI1DED2Ev.exit, label %if.then.i.i
@@ -147,9 +147,9 @@ declare hidden void @__clang_call_terminate()
define void @_ZN1BI1DED2Ev(%class.B* nocapture readonly %this) unnamed_addr uwtable align 2 {
entry:
%m_ptr = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
- %0 = load %class.D** %m_ptr, align 8
+ %0 = load %class.D*, %class.D** %m_ptr, align 8
%_ref.i = getelementptr inbounds %class.D, %class.D* %0, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
- %1 = load i32* %_ref.i, align 4
+ %1 = load i32, i32* %_ref.i, align 4
%tobool.i = icmp eq i32 %1, 0
br i1 %tobool.i, label %_ZN1AI1CE5derefEv.exit, label %if.then.i
@@ -167,7 +167,7 @@ _ZN1AI1CE5derefEv.exit: ; preds = %entry, %if.then.i
define void @_ZN1AI1CE5derefEv(%class.A* nocapture readonly %this) nounwind uwtable align 2 {
entry:
%_ref = getelementptr inbounds %class.A, %class.A* %this, i64 0, i32 0
- %0 = load i32* %_ref, align 4
+ %0 = load i32, i32* %_ref, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
@@ -187,7 +187,7 @@ entry:
%m_ptr = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
store %class.D* %p1, %class.D** %m_ptr, align 8
%_ref.i = getelementptr inbounds %class.D, %class.D* %p1, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
- %0 = load i32* %_ref.i, align 4
+ %0 = load i32, i32* %_ref.i, align 4
%inc.i = add nsw i32 %0, 1
store i32 %inc.i, i32* %_ref.i, align 4
ret void
@@ -196,7 +196,7 @@ entry:
define void @_ZN1AI1CE3refEv(%class.A* nocapture %this) nounwind uwtable align 2 {
entry:
%_ref = getelementptr inbounds %class.A, %class.A* %this, i64 0, i32 0
- %0 = load i32* %_ref, align 4
+ %0 = load i32, i32* %_ref, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %_ref, align 4
ret void
diff --git a/llvm/test/Transforms/JumpThreading/lvi-load.ll b/llvm/test/Transforms/JumpThreading/lvi-load.ll
index d2c4fa4b116..8c993aedfc3 100644
--- a/llvm/test/Transforms/JumpThreading/lvi-load.ll
+++ b/llvm/test/Transforms/JumpThreading/lvi-load.ll
@@ -17,7 +17,7 @@ target triple = "x86_64-apple-darwin10.4"
define zeroext i8 @_Z3fooPN4llvm5ValueE(%"struct.llvm::Value"* %V) ssp {
entry:
%0 = getelementptr inbounds %"struct.llvm::Value", %"struct.llvm::Value"* %V, i64 0, i32 1 ; <i8*> [#uses=1]
- %1 = load i8* %0, align 8 ; <i8> [#uses=2]
+ %1 = load i8, i8* %0, align 8 ; <i8> [#uses=2]
%2 = icmp ugt i8 %1, 20 ; <i1> [#uses=1]
br i1 %2, label %bb.i, label %bb2
diff --git a/llvm/test/Transforms/JumpThreading/or-undef.ll b/llvm/test/Transforms/JumpThreading/or-undef.ll
index 6311b6df437..b55bdddc196 100644
--- a/llvm/test/Transforms/JumpThreading/or-undef.ll
+++ b/llvm/test/Transforms/JumpThreading/or-undef.ll
@@ -32,13 +32,13 @@ bb2: ; preds = %bb1, %bb, %entry
br i1 %tmp7, label %bb7, label %bb5
bb5: ; preds = %bb2
- %tmp8 = load i8** %argv.0, align 8 ; <i8*> [#uses=1]
+ %tmp8 = load i8*, i8** %argv.0, align 8 ; <i8*> [#uses=1]
%tmp9 = icmp eq i8* %tmp8, null ; <i1> [#uses=1]
br i1 %tmp9, label %bb7, label %bb6
bb6: ; preds = %bb5
- %tmp10 = load i8** %argv.0, align 8 ; <i8*> [#uses=1]
- %tmp11 = load i8* %tmp10, align 1 ; <i8> [#uses=1]
+ %tmp10 = load i8*, i8** %argv.0, align 8 ; <i8*> [#uses=1]
+ %tmp11 = load i8, i8* %tmp10, align 1 ; <i8> [#uses=1]
%tmp12 = icmp eq i8 %tmp11, 0 ; <i1> [#uses=1]
br i1 %tmp12, label %bb7, label %bb8
@@ -47,7 +47,7 @@ bb7: ; preds = %bb6, %bb5, %bb2
br label %bb9
bb8: ; preds = %bb6
- %tmp13 = load i8** %argv.0, align 8 ; <i8*> [#uses=1]
+ %tmp13 = load i8*, i8** %argv.0, align 8 ; <i8*> [#uses=1]
%tmp14 = call i64 @f5(i8* %tmp13) nounwind ; <i64> [#uses=0]
br label %bb9
diff --git a/llvm/test/Transforms/JumpThreading/phi-eq.ll b/llvm/test/Transforms/JumpThreading/phi-eq.ll
index 78351ad76ad..f158d7e0dac 100644
--- a/llvm/test/Transforms/JumpThreading/phi-eq.ll
+++ b/llvm/test/Transforms/JumpThreading/phi-eq.ll
@@ -98,33 +98,33 @@ sw.default.i5: ; preds = %get_filter_list.exi
get_filter_list.exit6: ; preds = %sw.bb3.i4, %sw.bb2.i3, %sw.bb1.i2, %sw.bb.i1
%1 = phi %struct._GList** [ @display_edited_filters, %sw.bb3.i4 ], [ @capture_edited_filters, %sw.bb2.i3 ], [ @display_filters, %sw.bb1.i2 ], [ @capture_filters, %sw.bb.i1 ]
; CHECK: %2 = load
- %2 = load %struct._GList** %1, align 8
+ %2 = load %struct._GList*, %struct._GList** %1, align 8
; We should have jump-threading insert an additional load here for the value
; coming out of the first switch, which is picked up by a subsequent phi
-; CHECK: %.pr = load %struct._GList** %0
+; CHECK: %.pr = load %struct._GList*, %struct._GList** %0
; CHECK-NEXT: br label %while.cond
br label %while.cond
; CHECK: while.cond
while.cond: ; preds = %while.body, %get_filter_list.exit6
; CHECK: {{= phi .*%.pr}}
- %3 = load %struct._GList** %0, align 8
+ %3 = load %struct._GList*, %struct._GList** %0, align 8
; CHECK: tobool
%tobool = icmp ne %struct._GList* %3, null
br i1 %tobool, label %while.body, label %while.end
while.body: ; preds = %while.cond
- %4 = load %struct._GList** %0, align 8
- %5 = load %struct._GList** %0, align 8
+ %4 = load %struct._GList*, %struct._GList** %0, align 8
+ %5 = load %struct._GList*, %struct._GList** %0, align 8
%call2 = call %struct._GList* @g_list_first(%struct._GList* %5)
%data.i = getelementptr inbounds %struct._GList, %struct._GList* %call2, i32 0, i32 0
- %6 = load i8** %data.i, align 8
+ %6 = load i8*, i8** %data.i, align 8
%7 = bitcast i8* %6 to %struct.filter_def*
%name.i = getelementptr inbounds %struct.filter_def, %struct.filter_def* %7, i32 0, i32 0
- %8 = load i8** %name.i, align 8
+ %8 = load i8*, i8** %name.i, align 8
call void @g_free(i8* %8) nounwind
%strval.i = getelementptr inbounds %struct.filter_def, %struct.filter_def* %7, i32 0, i32 1
- %9 = load i8** %strval.i, align 8
+ %9 = load i8*, i8** %strval.i, align 8
call void @g_free(i8* %9) nounwind
%10 = bitcast %struct.filter_def* %7 to i8*
call void @g_free(i8* %10) nounwind
@@ -136,7 +136,7 @@ while.end: ; preds = %while.cond
br label %do.body4
do.body4: ; preds = %while.end
- %11 = load %struct._GList** %0, align 8
+ %11 = load %struct._GList*, %struct._GList** %0, align 8
%call5 = call i32 @g_list_length(%struct._GList* %11)
%cmp6 = icmp eq i32 %call5, 0
br i1 %cmp6, label %if.then7, label %if.else8
@@ -161,13 +161,13 @@ while.cond11: ; preds = %cond.end, %do.end10
while.body13: ; preds = %while.cond11
%data = getelementptr inbounds %struct._GList, %struct._GList* %cond10, i32 0, i32 0
- %12 = load i8** %data, align 8
+ %12 = load i8*, i8** %data, align 8
%13 = bitcast i8* %12 to %struct.filter_def*
- %14 = load %struct._GList** %0, align 8
+ %14 = load %struct._GList*, %struct._GList** %0, align 8
%name = getelementptr inbounds %struct.filter_def, %struct.filter_def* %13, i32 0, i32 0
- %15 = load i8** %name, align 8
+ %15 = load i8*, i8** %name, align 8
%strval = getelementptr inbounds %struct.filter_def, %struct.filter_def* %13, i32 0, i32 1
- %16 = load i8** %strval, align 8
+ %16 = load i8*, i8** %strval, align 8
%call.i7 = call noalias i8* @g_malloc(i64 16) nounwind
%17 = bitcast i8* %call.i7 to %struct.filter_def*
%call1.i = call noalias i8* @g_strdup(i8* %15) nounwind
@@ -184,7 +184,7 @@ while.body13: ; preds = %while.cond11
cond.true: ; preds = %while.body13
%next = getelementptr inbounds %struct._GList, %struct._GList* %cond10, i32 0, i32 1
- %19 = load %struct._GList** %next, align 8
+ %19 = load %struct._GList*, %struct._GList** %next, align 8
br label %cond.end
cond.false: ; preds = %while.body13
diff --git a/llvm/test/Transforms/JumpThreading/select.ll b/llvm/test/Transforms/JumpThreading/select.ll
index 545e86c082f..d0df7725f72 100644
--- a/llvm/test/Transforms/JumpThreading/select.ll
+++ b/llvm/test/Transforms/JumpThreading/select.ll
@@ -135,7 +135,7 @@ L4:
; CHECK: icmp
define void @test_switch_default(i32* nocapture %status) nounwind {
entry:
- %0 = load i32* %status, align 4
+ %0 = load i32, i32* %status, align 4
switch i32 %0, label %L2 [
i32 5061, label %L1
i32 0, label %L2
@@ -146,7 +146,7 @@ L1:
br label %L2
L2:
- %1 = load i32* %status, align 4
+ %1 = load i32, i32* %status, align 4
%cmp57.i = icmp eq i32 %1, 0
br i1 %cmp57.i, label %L3, label %L4
diff --git a/llvm/test/Transforms/JumpThreading/thread-loads.ll b/llvm/test/Transforms/JumpThreading/thread-loads.ll
index 4351f991810..008eac73210 100644
--- a/llvm/test/Transforms/JumpThreading/thread-loads.ll
+++ b/llvm/test/Transforms/JumpThreading/thread-loads.ll
@@ -21,7 +21,7 @@ bb: ; preds = %entry
bb1: ; preds = %entry, %bb
%res.0 = phi i32 [ 1, %bb ], [ 0, %entry ] ; <i32> [#uses=2]
- %2 = load i32* %P, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %P, align 4 ; <i32> [#uses=1]
%3 = icmp sgt i32 %2, 36 ; <i1> [#uses=1]
br i1 %3, label %bb3, label %bb2
@@ -60,7 +60,7 @@ bb: ; preds = %entry
bb1: ; preds = %entry, %bb
%res.0 = phi i32 [ 1, %bb ], [ 0, %entry ]
- %2 = load i32* %P, align 4, !tbaa !0
+ %2 = load i32, i32* %P, align 4, !tbaa !0
%3 = icmp sgt i32 %2, 36
br i1 %3, label %bb3, label %bb2
@@ -83,16 +83,16 @@ define i32 @test3(i8** %x, i1 %f) {
; CHECK-LABEL: @test3(
entry:
%0 = bitcast i8** %x to i32**
- %1 = load i32** %0, align 8
+ %1 = load i32*, i32** %0, align 8
br i1 %f, label %if.end57, label %if.then56
-; CHECK: %[[LOAD:.*]] = load i32**
+; CHECK: %[[LOAD:.*]] = load i32*, i32**
; CHECK: %[[CAST:.*]] = bitcast i32* %[[LOAD]] to i8*
if.then56:
br label %if.end57
if.end57:
- %2 = load i8** %x, align 8
+ %2 = load i8*, i8** %x, align 8
%tobool59 = icmp eq i8* %2, null
br i1 %tobool59, label %return, label %if.then60
; CHECK: %[[PHI:.*]] = phi i8* [ %[[CAST]], %[[PRED:[^ ]+]] ], [ %[[CAST]], %[[PRED]] ]
diff --git a/llvm/test/Transforms/LCSSA/2006-06-03-IncorrectIDFPhis.ll b/llvm/test/Transforms/LCSSA/2006-06-03-IncorrectIDFPhis.ll
index 1e4661164c1..a6abfa5f3c4 100644
--- a/llvm/test/Transforms/LCSSA/2006-06-03-IncorrectIDFPhis.ll
+++ b/llvm/test/Transforms/LCSSA/2006-06-03-IncorrectIDFPhis.ll
@@ -13,7 +13,7 @@ then: ; preds = %no_exit
%tmp.20 = getelementptr %struct.SetJmpMapEntry, %struct.SetJmpMapEntry* %SJE.0.0, i32 0, i32 1 ; <i32*> [#uses=0]
ret void
endif: ; preds = %no_exit
- %tmp.24 = load %struct.SetJmpMapEntry** null ; <%struct.SetJmpMapEntry*> [#uses=1]
+ %tmp.24 = load %struct.SetJmpMapEntry*, %struct.SetJmpMapEntry** null ; <%struct.SetJmpMapEntry*> [#uses=1]
br i1 false, label %UnifiedReturnBlock, label %no_exit
UnifiedReturnBlock: ; preds = %endif, %entry
ret void
diff --git a/llvm/test/Transforms/LCSSA/2006-07-09-NoDominator.ll b/llvm/test/Transforms/LCSSA/2006-07-09-NoDominator.ll
index bffd3deb605..bc3d150fbdf 100644
--- a/llvm/test/Transforms/LCSSA/2006-07-09-NoDominator.ll
+++ b/llvm/test/Transforms/LCSSA/2006-07-09-NoDominator.ll
@@ -14,7 +14,7 @@ then: ; preds = %no_exit
%tmp.21 = getelementptr %struct.SetJmpMapEntry, %struct.SetJmpMapEntry* %SJE.0, i32 0, i32 1 ; <i32*> [#uses=0]
br label %return
endif: ; preds = %no_exit
- %tmp.25 = load %struct.SetJmpMapEntry** null ; <%struct.SetJmpMapEntry*> [#uses=1]
+ %tmp.25 = load %struct.SetJmpMapEntry*, %struct.SetJmpMapEntry** null ; <%struct.SetJmpMapEntry*> [#uses=1]
br label %loopentry
loopexit: ; preds = %loopentry
br label %return
diff --git a/llvm/test/Transforms/LCSSA/2007-07-12-LICM-2.ll b/llvm/test/Transforms/LCSSA/2007-07-12-LICM-2.ll
index 2c5815cef33..f5d3f7e2b1c 100644
--- a/llvm/test/Transforms/LCSSA/2007-07-12-LICM-2.ll
+++ b/llvm/test/Transforms/LCSSA/2007-07-12-LICM-2.ll
@@ -4,7 +4,7 @@ entry:
br label %bb7
bb7: ; preds = %bb7, %entry
- %tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1]
+ %tmp39 = load <4 x float>, <4 x float>* null ; <<4 x float>> [#uses=1]
%tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1]
%tmp43 = fadd <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1]
%tmp46 = fadd <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
diff --git a/llvm/test/Transforms/LCSSA/2007-07-12-LICM-3.ll b/llvm/test/Transforms/LCSSA/2007-07-12-LICM-3.ll
index 7e0d3c63b13..689b6eaff8e 100644
--- a/llvm/test/Transforms/LCSSA/2007-07-12-LICM-3.ll
+++ b/llvm/test/Transforms/LCSSA/2007-07-12-LICM-3.ll
@@ -8,7 +8,7 @@ bb: ; preds = %bb56, %entry
br label %bb7
bb7: ; preds = %bb7, %bb
- %tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1]
+ %tmp39 = load <4 x float>, <4 x float>* null ; <<4 x float>> [#uses=1]
%tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=1]
%tmp43 = fadd <4 x float> %tmp40, < float 1.000000e+00, float 1.000000e+00, float 0.000000e+00, float 2.000000e+00 > ; <<4 x float>> [#uses=1]
%tmp46 = fadd <4 x float> %tmp43, < float 3.000000e+00, float 0.000000e+00, float 2.000000e+00, float 4.000000e+00 > ; <<4 x float>> [#uses=1]
diff --git a/llvm/test/Transforms/LCSSA/2007-07-12-LICM.ll b/llvm/test/Transforms/LCSSA/2007-07-12-LICM.ll
index 8c07aa2ec35..f1785edc2af 100644
--- a/llvm/test/Transforms/LCSSA/2007-07-12-LICM.ll
+++ b/llvm/test/Transforms/LCSSA/2007-07-12-LICM.ll
@@ -4,7 +4,7 @@ entry:
br label %bb7
bb7: ; preds = %bb7, %entry
- %tmp39 = load <4 x float>* null ; <<4 x float>> [#uses=1]
+ %tmp39 = load <4 x float>, <4 x float>* null ; <<4 x float>> [#uses=1]
%tmp40 = fadd <4 x float> %tmp39, < float 2.000000e+00, float 3.000000e+00, float 1.000000e+00, float 0.000000e+00 > ; <<4 x float>> [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* null
br i1 false, label %bb7, label %bb56
diff --git a/llvm/test/Transforms/LCSSA/unreachable-use.ll b/llvm/test/Transforms/LCSSA/unreachable-use.ll
index 2ea7aeb7edf..c9e456c1747 100644
--- a/llvm/test/Transforms/LCSSA/unreachable-use.ll
+++ b/llvm/test/Transforms/LCSSA/unreachable-use.ll
@@ -4,7 +4,7 @@
; LCSSA doesn't need to transform uses in blocks not reachable
; from the entry block.
-; CHECK: %tmp33 = load i1** %tmp
+; CHECK: %tmp33 = load i1*, i1** %tmp
define fastcc void @dfs() nounwind {
bb:
@@ -21,7 +21,7 @@ bb15:
br label %bb44
bb32:
- %tmp33 = load i1** %tmp, align 8
+ %tmp33 = load i1*, i1** %tmp, align 8
br label %bb45
bb45:
diff --git a/llvm/test/Transforms/LICM/2003-05-02-LoadHoist.ll b/llvm/test/Transforms/LICM/2003-05-02-LoadHoist.ll
index 71d3e789aa0..2f94dff4efd 100644
--- a/llvm/test/Transforms/LICM/2003-05-02-LoadHoist.ll
+++ b/llvm/test/Transforms/LICM/2003-05-02-LoadHoist.ll
@@ -10,12 +10,12 @@
declare void @foo()
define i32 @test(i1 %c) {
- %A = load i32* @X ; <i32> [#uses=1]
+ %A = load i32, i32* @X ; <i32> [#uses=1]
br label %Loop
Loop: ; preds = %Loop, %0
call void @foo( )
;; Should not hoist this load!
- %B = load i32* @X ; <i32> [#uses=1]
+ %B = load i32, i32* @X ; <i32> [#uses=1]
br i1 %c, label %Loop, label %Out
Out: ; preds = %Loop
%C = sub i32 %A, %B ; <i32> [#uses=1]
diff --git a/llvm/test/Transforms/LICM/2004-09-14-AliasAnalysisInvalidate.ll b/llvm/test/Transforms/LICM/2004-09-14-AliasAnalysisInvalidate.ll
index 16f4fed34ec..73862db6981 100644
--- a/llvm/test/Transforms/LICM/2004-09-14-AliasAnalysisInvalidate.ll
+++ b/llvm/test/Transforms/LICM/2004-09-14-AliasAnalysisInvalidate.ll
@@ -7,8 +7,8 @@ define void @test() {
Outer: ; preds = %Next, %0
br label %Inner
Inner: ; preds = %Inner, %Outer
- %tmp.114.i.i.i = load i8** @PL_regcomp_parse ; <i8*> [#uses=1]
- %tmp.115.i.i.i = load i8* %tmp.114.i.i.i ; <i8> [#uses=0]
+ %tmp.114.i.i.i = load i8*, i8** @PL_regcomp_parse ; <i8*> [#uses=1]
+ %tmp.115.i.i.i = load i8, i8* %tmp.114.i.i.i ; <i8> [#uses=0]
store i8* null, i8** @PL_regcomp_parse
br i1 false, label %Inner, label %Next
Next: ; preds = %Inner
diff --git a/llvm/test/Transforms/LICM/2007-05-22-VolatileSink.ll b/llvm/test/Transforms/LICM/2007-05-22-VolatileSink.ll
index a05b6342a11..f5ce86b7245 100644
--- a/llvm/test/Transforms/LICM/2007-05-22-VolatileSink.ll
+++ b/llvm/test/Transforms/LICM/2007-05-22-VolatileSink.ll
@@ -10,7 +10,7 @@ entry:
br label %bb6
bb: ; preds = %bb6
- %tmp2 = load volatile i32* %DataIn ; <i32> [#uses=1]
+ %tmp2 = load volatile i32, i32* %DataIn ; <i32> [#uses=1]
%tmp3 = getelementptr [64 x i32], [64 x i32]* %buffer, i32 0, i32 %i.0 ; <i32*> [#uses=1]
store i32 %tmp2, i32* %tmp3
%tmp5 = add i32 %i.0, 1 ; <i32> [#uses=1]
@@ -27,7 +27,7 @@ bb12: ; preds = %bb22
%tmp14 = mul i32 %j.1, 8 ; <i32> [#uses=1]
%tmp16 = add i32 %tmp14, %i.1 ; <i32> [#uses=1]
%tmp17 = getelementptr [64 x i32], [64 x i32]* %buffer, i32 0, i32 %tmp16 ; <i32*> [#uses=1]
- %tmp18 = load i32* %tmp17 ; <i32> [#uses=1]
+ %tmp18 = load i32, i32* %tmp17 ; <i32> [#uses=1]
store volatile i32 %tmp18, i32* %DataOut
%tmp21 = add i32 %j.1, 1 ; <i32> [#uses=1]
br label %bb22
diff --git a/llvm/test/Transforms/LICM/2007-07-30-AliasSet.ll b/llvm/test/Transforms/LICM/2007-07-30-AliasSet.ll
index 7585fb6ac6f..3e4fbb0a06e 100644
--- a/llvm/test/Transforms/LICM/2007-07-30-AliasSet.ll
+++ b/llvm/test/Transforms/LICM/2007-07-30-AliasSet.ll
@@ -18,7 +18,7 @@ bb63.outer: ; preds = %bb73, %bb28
bb35: ; preds = %cond_next60, %bb63.outer
%window.34 = phi i32 [ %tmp62, %cond_next60 ], [ 0, %bb63.outer ] ; <i32> [#uses=1]
%tmp44 = getelementptr [4 x i32], [4 x i32]* null, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp46 = load i32* %tmp44, align 4 ; <i32> [#uses=0]
+ %tmp46 = load i32, i32* %tmp44, align 4 ; <i32> [#uses=0]
br i1 false, label %cond_true50, label %cond_next60
cond_true50: ; preds = %bb35
diff --git a/llvm/test/Transforms/LICM/2008-07-22-LoadGlobalConstant.ll b/llvm/test/Transforms/LICM/2008-07-22-LoadGlobalConstant.ll
index f6ab77d1206..a715af032a9 100644
--- a/llvm/test/Transforms/LICM/2008-07-22-LoadGlobalConstant.ll
+++ b/llvm/test/Transforms/LICM/2008-07-22-LoadGlobalConstant.ll
@@ -6,7 +6,7 @@ define void @test(i32 %count) {
entry:
br label %forcond
-; CHECK: %tmp3 = load float** @a
+; CHECK: %tmp3 = load float*, float** @a
; CHECK: br label %forcond
forcond:
@@ -19,7 +19,7 @@ forcond:
; CHECK: br i1 %cmp, label %forbody, label %afterfor
forbody:
- %tmp3 = load float** @a
+ %tmp3 = load float*, float** @a
%arrayidx = getelementptr float, float* %tmp3, i32 %i.0
%tmp7 = uitofp i32 %i.0 to float
store float %tmp7, float* %arrayidx
diff --git a/llvm/test/Transforms/LICM/2009-12-10-LICM-Indbr-Crash.ll b/llvm/test/Transforms/LICM/2009-12-10-LICM-Indbr-Crash.ll
index e3cdbb3d2a4..1b3ff5bbb31 100644
--- a/llvm/test/Transforms/LICM/2009-12-10-LICM-Indbr-Crash.ll
+++ b/llvm/test/Transforms/LICM/2009-12-10-LICM-Indbr-Crash.ll
@@ -12,7 +12,7 @@ define void @foo (i8* %v)
indirectbr i8* undef, [label %preheader, label %stuff]
stuff:
- %0 = load i8* undef, align 1
+ %0 = load i8, i8* undef, align 1
br label %loop
return:
diff --git a/llvm/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll b/llvm/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
index 2bbc6ab0414..b462885e5fb 100644
--- a/llvm/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
+++ b/llvm/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
@@ -15,11 +15,11 @@ for.body4.lr.ph:
br label %for.body4
; CHECK: for.body4:
-; CHECK: load volatile i16* @g_39
+; CHECK: load volatile i16, i16* @g_39
for.body4:
%l_612.11 = phi i32* [ undef, %for.body4.lr.ph ], [ %call19, %for.body4 ]
- %tmp7 = load volatile i16* @g_39, align 2
+ %tmp7 = load volatile i16, i16* @g_39, align 2
%call = call i32** @func_108(i32*** undef)
%call19 = call i32* @func_84(i32** %call)
br i1 false, label %for.body4, label %for.cond.loopexit
diff --git a/llvm/test/Transforms/LICM/2011-04-06-PromoteResultOfPromotion.ll b/llvm/test/Transforms/LICM/2011-04-06-PromoteResultOfPromotion.ll
index 5587142dcf7..370491eab98 100644
--- a/llvm/test/Transforms/LICM/2011-04-06-PromoteResultOfPromotion.ll
+++ b/llvm/test/Transforms/LICM/2011-04-06-PromoteResultOfPromotion.ll
@@ -8,7 +8,7 @@ define void @f() nounwind {
; CHECK: entry:
; CHECK: alloca [9 x i16]
-; CHECK: load i32* @g_58
+; CHECK: load i32, i32* @g_58
; CHECK: br label %for.body
entry:
@@ -18,8 +18,8 @@ entry:
for.body: ; preds = %entry, %for.inc
%inc12 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
store i32* @g_58, i32** @g_116, align 8, !tbaa !0
- %tmp2 = load i32** @g_116, align 8, !tbaa !0
- %tmp3 = load i32* %tmp2, !tbaa !4
+ %tmp2 = load i32*, i32** @g_116, align 8, !tbaa !0
+ %tmp3 = load i32, i32* %tmp2, !tbaa !4
%or = or i32 %tmp3, 10
store i32 %or, i32* %tmp2, !tbaa !4
%inc = add nsw i32 %inc12, 1
diff --git a/llvm/test/Transforms/LICM/2011-04-09-RAUW-AST.ll b/llvm/test/Transforms/LICM/2011-04-09-RAUW-AST.ll
index 4285bd19e5f..501191726e5 100644
--- a/llvm/test/Transforms/LICM/2011-04-09-RAUW-AST.ll
+++ b/llvm/test/Transforms/LICM/2011-04-09-RAUW-AST.ll
@@ -6,7 +6,7 @@
define i32 @main() nounwind {
entry:
- %tmp = load i32* @g_3, align 4
+ %tmp = load i32, i32* @g_3, align 4
%tobool = icmp eq i32 %tmp, 0
br i1 %tobool, label %for.cond, label %if.then
@@ -40,7 +40,7 @@ for.inc10: ; preds = %for.cond4
br label %for.cond
for.end13: ; preds = %for.cond
- %tmp14 = load i32* @g_3, align 4
+ %tmp14 = load i32, i32* @g_3, align 4
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %tmp14) nounwind
ret i32 0
}
diff --git a/llvm/test/Transforms/LICM/PR21582.ll b/llvm/test/Transforms/LICM/PR21582.ll
index c918d1af013..5664f2e2b6e 100644
--- a/llvm/test/Transforms/LICM/PR21582.ll
+++ b/llvm/test/Transforms/LICM/PR21582.ll
@@ -20,7 +20,7 @@ for.body.preheader: ; preds = %for.cond
for.body: ; preds = %for.body, %for.body.preheader
%g.15 = phi i32 [ undef, %for.body ], [ 0, %for.body.preheader ]
%arrayidx2 = getelementptr inbounds i32, i32* @fn3.i, i64 0
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%call = call i32 @g()
br i1 false, label %for.body, label %for.end.loopexit
diff --git a/llvm/test/Transforms/LICM/atomics.ll b/llvm/test/Transforms/LICM/atomics.ll
index acf605d2dad..4fe197abf5d 100644
--- a/llvm/test/Transforms/LICM/atomics.ll
+++ b/llvm/test/Transforms/LICM/atomics.ll
@@ -7,7 +7,7 @@ entry:
loop:
%i = phi i32 [ %inc, %loop ], [ 0, %entry ]
- %val = load atomic i32* %y unordered, align 4
+ %val = load atomic i32, i32* %y unordered, align 4
%inc = add nsw i32 %i, 1
%exitcond = icmp eq i32 %inc, %val
br i1 %exitcond, label %end, label %loop
@@ -27,7 +27,7 @@ entry:
br label %loop
loop:
- %val = load atomic i32* %y monotonic, align 4
+ %val = load atomic i32, i32* %y monotonic, align 4
%exitcond = icmp ne i32 %val, 0
br i1 %exitcond, label %end, label %loop
@@ -47,15 +47,15 @@ entry:
br label %loop
loop:
- %vala = load atomic i32* %y monotonic, align 4
- %valb = load atomic i32* %x unordered, align 4
+ %vala = load atomic i32, i32* %y monotonic, align 4
+ %valb = load atomic i32, i32* %x unordered, align 4
%exitcond = icmp ne i32 %vala, %valb
br i1 %exitcond, label %end, label %loop
end:
ret i32 %vala
; CHECK-LABEL: define i32 @test3(
-; CHECK: load atomic i32* %x unordered
+; CHECK: load atomic i32, i32* %x unordered
; CHECK-NEXT: br label %loop
}
@@ -66,7 +66,7 @@ entry:
br label %loop
loop:
- %vala = load atomic i32* %y monotonic, align 4
+ %vala = load atomic i32, i32* %y monotonic, align 4
store atomic i32 %vala, i32* %x unordered, align 4
%exitcond = icmp ne i32 %vala, 0
br i1 %exitcond, label %end, label %loop
@@ -74,6 +74,6 @@ loop:
end:
ret i32 %vala
; CHECK-LABEL: define i32 @test4(
-; CHECK: load atomic i32* %y monotonic
+; CHECK: load atomic i32, i32* %y monotonic
; CHECK-NEXT: store atomic
}
diff --git a/llvm/test/Transforms/LICM/constexpr.ll b/llvm/test/Transforms/LICM/constexpr.ll
index f78878716aa..506721f25f8 100644
--- a/llvm/test/Transforms/LICM/constexpr.ll
+++ b/llvm/test/Transforms/LICM/constexpr.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-pc-windows-msvc"
; CHECK-LABEL: @bar
; CHECK: entry:
-; CHECK: load i64* bitcast (i32** @in to i64*)
+; CHECK: load i64, i64* bitcast (i32** @in to i64*)
; CHECK: do.body:
; CHECK-NOT: load
@@ -24,11 +24,11 @@ do.body: ; preds = %l2, %entry
br i1 %c, label %l1, label %do.body.l2_crit_edge
do.body.l2_crit_edge: ; preds = %do.body
- %inval.pre = load i32** @in, align 8
+ %inval.pre = load i32*, i32** @in, align 8
br label %l2
l1: ; preds = %do.body
- %v1 = load i64* bitcast (i32** @in to i64*), align 8
+ %v1 = load i64, i64* bitcast (i32** @in to i64*), align 8
store i64 %v1, i64* bitcast (i32** @out to i64*), align 8
%0 = inttoptr i64 %v1 to i32*
br label %l2
diff --git a/llvm/test/Transforms/LICM/crash.ll b/llvm/test/Transforms/LICM/crash.ll
index 8dfa5869a20..7fa41157338 100644
--- a/llvm/test/Transforms/LICM/crash.ll
+++ b/llvm/test/Transforms/LICM/crash.ll
@@ -12,7 +12,7 @@ entry:
for.body: ; preds = %for.cond, %bb.nph
store i8 0, i8* @g_12, align 1
- %tmp6 = load i8* @g_12, align 1
+ %tmp6 = load i8, i8* @g_12, align 1
br label %for.cond
for.cond: ; preds = %for.body
@@ -34,7 +34,7 @@ entry:
br label %for.body
for.body: ; preds = %for.body, %entry
- %tmp7 = load i32* @g_8, align 4
+ %tmp7 = load i32, i32* @g_8, align 4
store i32* @g_8, i32** undef, align 16
store i32 undef, i32* @g_8, align 4
br label %for.body
@@ -48,7 +48,7 @@ entry:
for.cond: ; preds = %for.cond, %entry
%tmp1 = getelementptr { i32*}, { i32*}* %__first, i32 0, i32 0
- %tmp2 = load i32** %tmp1, align 4
+ %tmp2 = load i32*, i32** %tmp1, align 4
%call = tail call i32* @test3helper(i32* %tmp2)
%tmp3 = getelementptr { i32*}, { i32*}* %__first, i32 0, i32 0
store i32* %call, i32** %tmp3, align 4
diff --git a/llvm/test/Transforms/LICM/hoist-bitcast-load.ll b/llvm/test/Transforms/LICM/hoist-bitcast-load.ll
index 4a5874f278d..47c474c17dd 100644
--- a/llvm/test/Transforms/LICM/hoist-bitcast-load.ll
+++ b/llvm/test/Transforms/LICM/hoist-bitcast-load.ll
@@ -4,7 +4,7 @@ target triple = "x86_64-unknown-linux-gnu"
; Make sure the basic alloca pointer hoisting works:
; CHECK-LABEL: @test1
-; CHECK: load i32* %c, align 4
+; CHECK: load i32, i32* %c, align 4
; CHECK: for.body:
; Function Attrs: nounwind uwtable
@@ -17,14 +17,14 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
@@ -42,7 +42,7 @@ for.end: ; preds = %for.inc, %entry
; Make sure the basic alloca pointer hoisting works through a bitcast to a
; pointer to a smaller type:
; CHECK-LABEL: @test2
-; CHECK: load i32* %c, align 4
+; CHECK: load i32, i32* %c, align 4
; CHECK: for.body:
; Function Attrs: nounwind uwtable
@@ -56,14 +56,14 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
@@ -80,7 +80,7 @@ for.end: ; preds = %for.inc, %entry
; Make sure the basic alloca pointer hoisting works through an addrspacecast
; CHECK-LABEL: @test2_addrspacecast
-; CHECK: load i32 addrspace(1)* %c, align 4
+; CHECK: load i32, i32 addrspace(1)* %c, align 4
; CHECK: for.body:
; Function Attrs: nounwind uwtable
@@ -94,14 +94,14 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %indvars.iv
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %1 = load i32 addrspace(1)* %c, align 4
+ %1 = load i32, i32 addrspace(1)* %c, align 4
%arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %indvars.iv
- %2 = load i32 addrspace(1)* %arrayidx3, align 4
+ %2 = load i32, i32 addrspace(1)* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32 addrspace(1)* %arrayidx, align 4
br label %for.inc
@@ -119,7 +119,7 @@ for.end: ; preds = %for.inc, %entry
; Make sure the basic alloca pointer hoisting works through a bitcast to a
; pointer to a smaller type (where the bitcast also needs to be hoisted):
; CHECK-LABEL: @test3
-; CHECK: load i32* %c, align 4
+; CHECK: load i32, i32* %c, align 4
; CHECK: for.body:
; Function Attrs: nounwind uwtable
@@ -132,15 +132,15 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%c = bitcast i64* %ca to i32*
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
@@ -159,7 +159,7 @@ for.end: ; preds = %for.inc, %entry
; to a pointer to a larger type:
; CHECK-LABEL: @test4
; CHECK: for.body:
-; CHECK: load i32* %c, align 4
+; CHECK: load i32, i32* %c, align 4
; Function Attrs: nounwind uwtable
define void @test4(i32* nocapture %a, i32* nocapture readonly %b, i32 %n) #0 {
@@ -172,14 +172,14 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
@@ -197,7 +197,7 @@ for.end: ; preds = %for.inc, %entry
; Don't crash on bitcasts to unsized types.
; CHECK-LABEL: @test5
; CHECK: for.body:
-; CHECK: load i32* %c, align 4
+; CHECK: load i32, i32* %c, align 4
%atype = type opaque
@@ -213,14 +213,14 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
diff --git a/llvm/test/Transforms/LICM/hoist-deref-load.ll b/llvm/test/Transforms/LICM/hoist-deref-load.ll
index fc4efb2385a..4d0ae452921 100644
--- a/llvm/test/Transforms/LICM/hoist-deref-load.ll
+++ b/llvm/test/Transforms/LICM/hoist-deref-load.ll
@@ -12,7 +12,7 @@ target triple = "x86_64-unknown-linux-gnu"
; because the dereferenceable attribute is on %c.
; CHECK-LABEL: @test1
-; CHECK: load i32* %c, align 4
+; CHECK: load i32, i32* %c, align 4
; CHECK: for.body:
define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly nonnull dereferenceable(4) %c, i32 %n) #0 {
@@ -23,14 +23,14 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
@@ -50,7 +50,7 @@ for.end: ; preds = %for.inc, %entry
; CHECK-LABEL: @test2
; CHECK: if.then:
-; CHECK: load i32* %c, align 4
+; CHECK: load i32, i32* %c, align 4
define void @test2(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly nonnull %c, i32 %n) #0 {
entry:
@@ -60,14 +60,14 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
@@ -92,7 +92,7 @@ for.end: ; preds = %for.inc, %entry
; because the dereferenceable attribute is on %c.
; CHECK-LABEL: @test3
-; CHECK: load i32* %c2, align 4
+; CHECK: load i32, i32* %c2, align 4
; CHECK: for.body:
define void @test3(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly dereferenceable(12) %c, i32 %n) #0 {
@@ -103,15 +103,15 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%c2 = getelementptr inbounds i32, i32* %c, i64 2
- %1 = load i32* %c2, align 4
+ %1 = load i32, i32* %c2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
@@ -131,7 +131,7 @@ for.end: ; preds = %for.inc, %entry
; CHECK-LABEL: @test4
; CHECK: if.then:
-; CHECK: load i32* %c2, align 4
+; CHECK: load i32, i32* %c2, align 4
define void @test4(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly dereferenceable(11) %c, i32 %n) #0 {
entry:
@@ -141,15 +141,15 @@ entry:
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%c2 = getelementptr inbounds i32, i32* %c, i64 2
- %1 = load i32* %c2, align 4
+ %1 = load i32, i32* %c2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
br label %for.inc
diff --git a/llvm/test/Transforms/LICM/hoist-invariant-load.ll b/llvm/test/Transforms/LICM/hoist-invariant-load.ll
index 59904bad4ae..5d96896a891 100644
--- a/llvm/test/Transforms/LICM/hoist-invariant-load.ll
+++ b/llvm/test/Transforms/LICM/hoist-invariant-load.ll
@@ -15,18 +15,18 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp ult i32 %0, 10000
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i8** %x.addr, align 8
- %2 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", !invariant.load !0
+ %1 = load i8*, i8** %x.addr, align 8
+ %2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", !invariant.load !0
%call = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %1, i8* %2)
br label %for.inc
for.inc: ; preds = %for.body
- %3 = load i32* %i, align 4
+ %3 = load i32, i32* %i, align 4
%inc = add i32 %3, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
diff --git a/llvm/test/Transforms/LICM/hoisting.ll b/llvm/test/Transforms/LICM/hoisting.ll
index b4d297ac27b..8609407cc59 100644
--- a/llvm/test/Transforms/LICM/hoisting.ll
+++ b/llvm/test/Transforms/LICM/hoisting.ll
@@ -8,7 +8,7 @@ declare void @foo()
; potentially trapping instructions when they are not guaranteed to execute.
define i32 @test1(i1 %c) {
; CHECK-LABEL: @test1(
- %A = load i32* @X ; <i32> [#uses=2]
+ %A = load i32, i32* @X ; <i32> [#uses=2]
br label %Loop
Loop: ; preds = %LoopTail, %0
call void @foo( )
@@ -35,9 +35,9 @@ declare void @foo2(i32) nounwind
;; It is ok and desirable to hoist this potentially trapping instruction.
define i32 @test2(i1 %c) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: load i32* @X
+; CHECK-NEXT: load i32, i32* @X
; CHECK-NEXT: %B = sdiv i32 4, %A
- %A = load i32* @X ; <i32> [#uses=2]
+ %A = load i32, i32* @X ; <i32> [#uses=2]
br label %Loop
Loop:
;; Should have hoisted this div!
@@ -54,7 +54,7 @@ Out: ; preds = %Loop
define i32 @test3(i1 %c) {
; CHECK-LABEL: define i32 @test3(
; CHECK: call void @foo2(i32 6)
- %A = load i32* @X ; <i32> [#uses=2]
+ %A = load i32, i32* @X ; <i32> [#uses=2]
br label %Loop
Loop:
%B = add i32 4, 2 ; <i32> [#uses=2]
diff --git a/llvm/test/Transforms/LICM/lcssa-ssa-promoter.ll b/llvm/test/Transforms/LICM/lcssa-ssa-promoter.ll
index 5df3ef12181..b0cae8772f3 100644
--- a/llvm/test/Transforms/LICM/lcssa-ssa-promoter.ll
+++ b/llvm/test/Transforms/LICM/lcssa-ssa-promoter.ll
@@ -44,7 +44,7 @@ inner.body.rhs:
; CHECK-NEXT: br label %inner.latch
inner.latch:
- %y_val = load i32* @y, align 4
+ %y_val = load i32, i32* @y, align 4
%icmp = icmp eq i32 %y_val, 0
br i1 %icmp, label %inner.exit, label %inner.header
; CHECK: inner.latch:
diff --git a/llvm/test/Transforms/LICM/scalar-promote-memmodel.ll b/llvm/test/Transforms/LICM/scalar-promote-memmodel.ll
index 23d70f51351..3603c25ba23 100644
--- a/llvm/test/Transforms/LICM/scalar-promote-memmodel.ll
+++ b/llvm/test/Transforms/LICM/scalar-promote-memmodel.ll
@@ -19,12 +19,12 @@ for.body: ; preds = %for.cond
br i1 %tobool, label %for.inc, label %if.then
if.then: ; preds = %for.body
- %tmp3 = load i32* @g, align 4
+ %tmp3 = load i32, i32* @g, align 4
%inc = add nsw i32 %tmp3, 1
store i32 %inc, i32* @g, align 4
br label %for.inc
-; CHECK: load i32*
+; CHECK: load i32, i32*
; CHECK-NEXT: add
; CHECK-NEXT: store i32
diff --git a/llvm/test/Transforms/LICM/scalar_promote.ll b/llvm/test/Transforms/LICM/scalar_promote.ll
index a49b98065ac..584d69a0886 100644
--- a/llvm/test/Transforms/LICM/scalar_promote.ll
+++ b/llvm/test/Transforms/LICM/scalar_promote.ll
@@ -8,13 +8,13 @@ Entry:
br label %Loop
; CHECK-LABEL: @test1(
; CHECK: Entry:
-; CHECK-NEXT: load i32* @X
+; CHECK-NEXT: load i32, i32* @X
; CHECK-NEXT: br label %Loop
Loop: ; preds = %Loop, %0
%j = phi i32 [ 0, %Entry ], [ %Next, %Loop ] ; <i32> [#uses=1]
- %x = load i32* @X ; <i32> [#uses=1]
+ %x = load i32, i32* @X ; <i32> [#uses=1]
%x2 = add i32 %x, 1 ; <i32> [#uses=1]
store i32 %x2, i32* @X
%Next = add i32 %j, 1 ; <i32> [#uses=2]
@@ -35,12 +35,12 @@ Entry:
br label %Loop
; CHECK-LABEL: @test2(
; CHECK: Entry:
-; CHECK-NEXT: %.promoted = load i32* getelementptr inbounds (i32* @X, i64 1)
+; CHECK-NEXT: %.promoted = load i32, i32* getelementptr inbounds (i32* @X, i64 1)
; CHECK-NEXT: br label %Loop
Loop: ; preds = %Loop, %0
%X1 = getelementptr i32, i32* @X, i64 1 ; <i32*> [#uses=1]
- %A = load i32* %X1 ; <i32> [#uses=1]
+ %A = load i32, i32* %X1 ; <i32> [#uses=1]
%V = add i32 %A, 1 ; <i32> [#uses=1]
%X2 = getelementptr i32, i32* @X, i64 1 ; <i32*> [#uses=1]
store i32 %V, i32* %X2
@@ -61,7 +61,7 @@ define void @test3(i32 %i) {
br label %Loop
Loop:
; Should not promote this to a register
- %x = load volatile i32* @X
+ %x = load volatile i32, i32* @X
%x2 = add i32 %x, 1
store i32 %x2, i32* @X
br i1 true, label %Out, label %Loop
@@ -88,7 +88,7 @@ loop:
subloop:
%count = phi i8 [ 0, %loop ], [ %nextcount, %subloop ]
- %offsetx2 = load i8** %handle2
+ %offsetx2 = load i8*, i8** %handle2
store i8 %n, i8* %offsetx2
%newoffsetx2 = getelementptr i8, i8* %offsetx2, i64 -1
store i8* %newoffsetx2, i8** %handle2
@@ -105,14 +105,14 @@ subloop:
; CHECK: br i1
innerexit:
- %offsetx1 = load i8** %handle1
- %val = load i8* %offsetx1
+ %offsetx1 = load i8*, i8** %handle1
+ %val = load i8, i8* %offsetx1
%cond = icmp eq i8 %val, %n
br i1 %cond, label %exit, label %loop
; Should not have promoted offsetx1 loads.
; CHECK: innerexit:
-; CHECK: %val = load i8* %offsetx1
+; CHECK: %val = load i8, i8* %offsetx1
; CHECK: %cond = icmp eq i8 %val, %n
; CHECK: br i1 %cond, label %exit, label %loop
@@ -125,13 +125,13 @@ Entry:
br label %Loop
; CHECK-LABEL: @test5(
; CHECK: Entry:
-; CHECK-NEXT: load i32* @X
+; CHECK-NEXT: load i32, i32* @X
; CHECK-NEXT: br label %Loop
Loop: ; preds = %Loop, %0
%j = phi i32 [ 0, %Entry ], [ %Next, %Loop ] ; <i32> [#uses=1]
- %x = load i32* @X ; <i32> [#uses=1]
+ %x = load i32, i32* @X ; <i32> [#uses=1]
%x2 = add i32 %x, 1 ; <i32> [#uses=1]
store i32 %x2, i32* @X
@@ -166,7 +166,7 @@ for.body: ; preds = %for.body.lr.ph, %fo
%idxprom = sext i32 %storemerge2 to i64
%arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
store float 0.000000e+00, float* %arrayidx, align 4, !tbaa !3
- %0 = load i32* %gi, align 4, !tbaa !0
+ %0 = load i32, i32* %gi, align 4, !tbaa !0
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %gi, align 4, !tbaa !0
%cmp = icmp slt i32 %inc, %n
@@ -179,7 +179,7 @@ for.end: ; preds = %for.cond.for.end_cr
ret void
; CHECK: for.body.lr.ph:
-; CHECK-NEXT: %gi.promoted = load i32* %gi, align 4, !tbaa !0
+; CHECK-NEXT: %gi.promoted = load i32, i32* %gi, align 4, !tbaa !0
; CHECK: for.cond.for.end_crit_edge:
; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %inc
; CHECK-NEXT: store i32 %[[LCSSAPHI]], i32* %gi, align 4, !tbaa !0
diff --git a/llvm/test/Transforms/LICM/sinking.ll b/llvm/test/Transforms/LICM/sinking.ll
index a3df8192528..02bf5846a64 100644
--- a/llvm/test/Transforms/LICM/sinking.ll
+++ b/llvm/test/Transforms/LICM/sinking.ll
@@ -93,7 +93,7 @@ Entry:
br label %Loop
Loop: ; preds = %Loop, %Entry
%N_addr.0.pn = phi i32 [ %dec, %Loop ], [ %N, %Entry ]
- %tmp.6 = load i32* @X ; <i32> [#uses=1]
+ %tmp.6 = load i32, i32* @X ; <i32> [#uses=1]
%dec = add i32 %N_addr.0.pn, -1 ; <i32> [#uses=1]
%tmp.1 = icmp ne i32 %N_addr.0.pn, 1 ; <i1> [#uses=1]
br i1 %tmp.1, label %Loop, label %Out
@@ -101,7 +101,7 @@ Out: ; preds = %Loop
ret i32 %tmp.6
; CHECK-LABEL: @test5(
; CHECK: Out:
-; CHECK-NEXT: %tmp.6.le = load i32* @X
+; CHECK-NEXT: %tmp.6.le = load i32, i32* @X
; CHECK-NEXT: ret i32 %tmp.6.le
}
@@ -119,14 +119,14 @@ define i32 @test6() {
br label %Loop
Loop:
%dead = getelementptr %Ty, %Ty* @X2, i64 0, i32 0
- %sunk2 = load i32* %dead
+ %sunk2 = load i32, i32* %dead
br i1 false, label %Loop, label %Out
Out: ; preds = %Loop
ret i32 %sunk2
; CHECK-LABEL: @test6(
; CHECK: Out:
; CHECK-NEXT: %dead.le = getelementptr %Ty, %Ty* @X2, i64 0, i32 0
-; CHECK-NEXT: %sunk2.le = load i32* %dead.le
+; CHECK-NEXT: %sunk2.le = load i32, i32* %dead.le
; CHECK-NEXT: ret i32 %sunk2.le
}
@@ -174,7 +174,7 @@ Entry:
Loop: ; preds = %Cont, %Entry
br i1 %C1, label %Cont, label %exit1
Cont: ; preds = %Loop
- %X = load i32* %P ; <i32> [#uses=2]
+ %X = load i32, i32* %P ; <i32> [#uses=2]
store i32 %X, i32* %Q
%V = add i32 %X, 1 ; <i32> [#uses=1]
br i1 %C2, label %Loop, label %exit2
@@ -265,32 +265,32 @@ l1.header:
br label %l2.header
l2.header:
- %x0 = load i1* %c, align 4
+ %x0 = load i1, i1* %c, align 4
br i1 %x0, label %l1.latch, label %l3.preheader
l3.preheader:
br label %l3.header
l3.header:
- %x1 = load i1* %d, align 4
+ %x1 = load i1, i1* %d, align 4
br i1 %x1, label %l2.latch, label %l4.preheader
l4.preheader:
br label %l4.header
l4.header:
- %x2 = load i1* %a
+ %x2 = load i1, i1* %a
br i1 %x2, label %l3.latch, label %l4.body
l4.body:
call void @f(i32* %arrayidx.i)
- %x3 = load i1* %b
+ %x3 = load i1, i1* %b
%l = trunc i64 %iv to i32
br i1 %x3, label %l4.latch, label %exit
l4.latch:
call void @g()
- %x4 = load i1* %b, align 4
+ %x4 = load i1, i1* %b, align 4
br i1 %x4, label %l4.header, label %exit
l3.latch:
diff --git a/llvm/test/Transforms/LICM/speculate.ll b/llvm/test/Transforms/LICM/speculate.ll
index 4e933adf6e1..91b5a25ac0f 100644
--- a/llvm/test/Transforms/LICM/speculate.ll
+++ b/llvm/test/Transforms/LICM/speculate.ll
@@ -13,7 +13,7 @@ entry:
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
@@ -45,7 +45,7 @@ entry:
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
@@ -79,7 +79,7 @@ entry:
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
@@ -112,7 +112,7 @@ entry:
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
@@ -145,7 +145,7 @@ entry:
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
diff --git a/llvm/test/Transforms/LICM/volatile-alias.ll b/llvm/test/Transforms/LICM/volatile-alias.ll
index df7f0a931eb..fda930df933 100644
--- a/llvm/test/Transforms/LICM/volatile-alias.ll
+++ b/llvm/test/Transforms/LICM/volatile-alias.ll
@@ -2,9 +2,9 @@
; The objects *p and *q are aliased to each other, but even though *q is
; volatile, *p can be considered invariant in the loop. Check if it is moved
; out of the loop.
-; CHECK: load i32* %p
+; CHECK: load i32, i32* %p
; CHECK: for.body:
-; CHECK: load volatile i32* %q
+; CHECK: load volatile i32, i32* %q
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -24,30 +24,30 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
- %1 = load i32* %n.addr, align 4
+ %0 = load i32, i32* %i, align 4
+ %1 = load i32, i32* %n.addr, align 4
%cmp = icmp slt i32 %0, %1
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %2 = load i32** %p.addr, align 8
- %3 = load i32* %2, align 4
- %4 = load i32** %q.addr, align 8
- %5 = load volatile i32* %4, align 4
+ %2 = load i32*, i32** %p.addr, align 8
+ %3 = load i32, i32* %2, align 4
+ %4 = load i32*, i32** %q.addr, align 8
+ %5 = load volatile i32, i32* %4, align 4
%add = add nsw i32 %3, %5
- %6 = load i32* %s, align 4
+ %6 = load i32, i32* %s, align 4
%add1 = add nsw i32 %6, %add
store i32 %add1, i32* %s, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %8 = load i32* %s, align 4
+ %8 = load i32, i32* %s, align 4
ret i32 %8
}
diff --git a/llvm/test/Transforms/LoadCombine/load-combine-aa.ll b/llvm/test/Transforms/LoadCombine/load-combine-aa.ll
index 714e13d92e6..fc639c0bc05 100644
--- a/llvm/test/Transforms/LoadCombine/load-combine-aa.ll
+++ b/llvm/test/Transforms/LoadCombine/load-combine-aa.ll
@@ -5,14 +5,14 @@ target triple = "x86_64-unknown-linux-gnu"
define i64 @test1(i32* nocapture readonly noalias %a, i32* nocapture readonly noalias %b) {
; CHECK-LABEL: @test1
-; CHECK: load i64*
+; CHECK: load i64, i64*
; CHECK: ret i64
- %load1 = load i32* %a, align 4
+ %load1 = load i32, i32* %a, align 4
%conv = zext i32 %load1 to i64
%arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %load1, i32* %b, align 4
- %load2 = load i32* %arrayidx1, align 4
+ %load2 = load i32, i32* %arrayidx1, align 4
%conv2 = zext i32 %load2 to i64
%shl = shl nuw i64 %conv2, 32
%add = or i64 %shl, %conv
@@ -22,15 +22,15 @@ define i64 @test1(i32* nocapture readonly noalias %a, i32* nocapture readonly no
define i64 @test2(i32* nocapture readonly %a, i32* nocapture readonly %b) {
; CHECK-LABEL: @test2
-; CHECK: load i32*
-; CHECK: load i32*
+; CHECK: load i32, i32*
+; CHECK: load i32, i32*
; CHECK: ret i64
- %load1 = load i32* %a, align 4
+ %load1 = load i32, i32* %a, align 4
%conv = zext i32 %load1 to i64
%arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %load1, i32* %b, align 4
- %load2 = load i32* %arrayidx1, align 4
+ %load2 = load i32, i32* %arrayidx1, align 4
%conv2 = zext i32 %load2 to i64
%shl = shl nuw i64 %conv2, 32
%add = or i64 %shl, %conv
diff --git a/llvm/test/Transforms/LoadCombine/load-combine-assume.ll b/llvm/test/Transforms/LoadCombine/load-combine-assume.ll
index ff0a0d318d4..2d6d160f12f 100644
--- a/llvm/test/Transforms/LoadCombine/load-combine-assume.ll
+++ b/llvm/test/Transforms/LoadCombine/load-combine-assume.ll
@@ -8,14 +8,14 @@ declare void @llvm.assume(i1) nounwind
define i64 @test1(i32* nocapture readonly %a, i1 %b) {
; CHECK-LABEL: @test1
-; CHECK-DAG: load i64* %1, align 4
+; CHECK-DAG: load i64, i64* %1, align 4
; CHECK-DAG: tail call void @llvm.assume(i1 %b)
; CHECK: ret i64
- %load1 = load i32* %a, align 4
+ %load1 = load i32, i32* %a, align 4
%conv = zext i32 %load1 to i64
%arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
- %load2 = load i32* %arrayidx1, align 4
+ %load2 = load i32, i32* %arrayidx1, align 4
tail call void @llvm.assume(i1 %b)
%conv2 = zext i32 %load2 to i64
%shl = shl nuw i64 %conv2, 32
@@ -27,15 +27,15 @@ define i64 @test1(i32* nocapture readonly %a, i1 %b) {
define i64 @test2(i32* nocapture readonly %a, i1 %b) {
; CHECK-LABEL: @test2
-; CHECK-DAG: load i64* %1, align 4
+; CHECK-DAG: load i64, i64* %1, align 4
; CHECK-DAG: tail call void @llvm.assume(i1 %b)
; CHECK: ret i64
- %load1 = load i32* %a, align 4
+ %load1 = load i32, i32* %a, align 4
%conv = zext i32 %load1 to i64
%arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
tail call void @llvm.assume(i1 %b)
- %load2 = load i32* %arrayidx1, align 4
+ %load2 = load i32, i32* %arrayidx1, align 4
%conv2 = zext i32 %load2 to i64
%shl = shl nuw i64 %conv2, 32
%add = or i64 %shl, %conv
diff --git a/llvm/test/Transforms/LoadCombine/load-combine.ll b/llvm/test/Transforms/LoadCombine/load-combine.ll
index e0e3c5416e3..d5068787639 100644
--- a/llvm/test/Transforms/LoadCombine/load-combine.ll
+++ b/llvm/test/Transforms/LoadCombine/load-combine.ll
@@ -6,138 +6,138 @@ target triple = "x86_64-unknown-linux-gnu"
; Combine read from char* idiom.
define i64 @LoadU64_x64_0(i64* %pData) {
%1 = bitcast i64* %pData to i8*
- %2 = load i8* %1, align 1
+ %2 = load i8, i8* %1, align 1
%3 = zext i8 %2 to i64
%4 = shl nuw i64 %3, 56
%5 = getelementptr inbounds i8, i8* %1, i64 1
- %6 = load i8* %5, align 1
+ %6 = load i8, i8* %5, align 1
%7 = zext i8 %6 to i64
%8 = shl nuw nsw i64 %7, 48
%9 = or i64 %8, %4
%10 = getelementptr inbounds i8, i8* %1, i64 2
- %11 = load i8* %10, align 1
+ %11 = load i8, i8* %10, align 1
%12 = zext i8 %11 to i64
%13 = shl nuw nsw i64 %12, 40
%14 = or i64 %9, %13
%15 = getelementptr inbounds i8, i8* %1, i64 3
- %16 = load i8* %15, align 1
+ %16 = load i8, i8* %15, align 1
%17 = zext i8 %16 to i64
%18 = shl nuw nsw i64 %17, 32
%19 = or i64 %14, %18
%20 = getelementptr inbounds i8, i8* %1, i64 4
- %21 = load i8* %20, align 1
+ %21 = load i8, i8* %20, align 1
%22 = zext i8 %21 to i64
%23 = shl nuw nsw i64 %22, 24
%24 = or i64 %19, %23
%25 = getelementptr inbounds i8, i8* %1, i64 5
- %26 = load i8* %25, align 1
+ %26 = load i8, i8* %25, align 1
%27 = zext i8 %26 to i64
%28 = shl nuw nsw i64 %27, 16
%29 = or i64 %24, %28
%30 = getelementptr inbounds i8, i8* %1, i64 6
- %31 = load i8* %30, align 1
+ %31 = load i8, i8* %30, align 1
%32 = zext i8 %31 to i64
%33 = shl nuw nsw i64 %32, 8
%34 = or i64 %29, %33
%35 = getelementptr inbounds i8, i8* %1, i64 7
- %36 = load i8* %35, align 1
+ %36 = load i8, i8* %35, align 1
%37 = zext i8 %36 to i64
%38 = or i64 %34, %37
ret i64 %38
; CHECK-LABEL: @LoadU64_x64_0(
-; CHECK: load i64* %{{.*}}, align 1
+; CHECK: load i64, i64* %{{.*}}, align 1
; CHECK-NOT: load
}
; Combine simple adjacent loads.
define i32 @"2xi16_i32"(i16* %x) {
- %1 = load i16* %x, align 2
+ %1 = load i16, i16* %x, align 2
%2 = getelementptr inbounds i16, i16* %x, i64 1
- %3 = load i16* %2, align 2
+ %3 = load i16, i16* %2, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw i32 %4, 16
%6 = zext i16 %1 to i32
%7 = or i32 %5, %6
ret i32 %7
; CHECK-LABEL: @"2xi16_i32"(
-; CHECK: load i32* %{{.*}}, align 2
+; CHECK: load i32, i32* %{{.*}}, align 2
; CHECK-NOT: load
}
; Don't combine loads across stores.
define i32 @"2xi16_i32_store"(i16* %x, i16* %y) {
- %1 = load i16* %x, align 2
+ %1 = load i16, i16* %x, align 2
store i16 0, i16* %y, align 2
%2 = getelementptr inbounds i16, i16* %x, i64 1
- %3 = load i16* %2, align 2
+ %3 = load i16, i16* %2, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw i32 %4, 16
%6 = zext i16 %1 to i32
%7 = or i32 %5, %6
ret i32 %7
; CHECK-LABEL: @"2xi16_i32_store"(
-; CHECK: load i16* %{{.*}}, align 2
+; CHECK: load i16, i16* %{{.*}}, align 2
; CHECK: store
-; CHECK: load i16* %{{.*}}, align 2
+; CHECK: load i16, i16* %{{.*}}, align 2
}
; Don't combine loads with a gap.
define i32 @"2xi16_i32_gap"(i16* %x) {
- %1 = load i16* %x, align 2
+ %1 = load i16, i16* %x, align 2
%2 = getelementptr inbounds i16, i16* %x, i64 2
- %3 = load i16* %2, align 2
+ %3 = load i16, i16* %2, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw i32 %4, 16
%6 = zext i16 %1 to i32
%7 = or i32 %5, %6
ret i32 %7
; CHECK-LABEL: @"2xi16_i32_gap"(
-; CHECK: load i16* %{{.*}}, align 2
-; CHECK: load i16* %{{.*}}, align 2
+; CHECK: load i16, i16* %{{.*}}, align 2
+; CHECK: load i16, i16* %{{.*}}, align 2
}
; Combine out of order loads.
define i32 @"2xi16_i32_order"(i16* %x) {
%1 = getelementptr inbounds i16, i16* %x, i64 1
- %2 = load i16* %1, align 2
+ %2 = load i16, i16* %1, align 2
%3 = zext i16 %2 to i32
- %4 = load i16* %x, align 2
+ %4 = load i16, i16* %x, align 2
%5 = shl nuw i32 %3, 16
%6 = zext i16 %4 to i32
%7 = or i32 %5, %6
ret i32 %7
; CHECK-LABEL: @"2xi16_i32_order"(
-; CHECK: load i32* %{{.*}}, align 2
+; CHECK: load i32, i32* %{{.*}}, align 2
; CHECK-NOT: load
}
; Overlapping loads.
define i32 @"2xi16_i32_overlap"(i8* %x) {
%1 = bitcast i8* %x to i16*
- %2 = load i16* %1, align 2
+ %2 = load i16, i16* %1, align 2
%3 = getelementptr inbounds i8, i8* %x, i64 1
%4 = bitcast i8* %3 to i16*
- %5 = load i16* %4, align 2
+ %5 = load i16, i16* %4, align 2
%6 = zext i16 %5 to i32
%7 = shl nuw i32 %6, 16
%8 = zext i16 %2 to i32
%9 = or i32 %7, %8
ret i32 %9
; CHECK-LABEL: @"2xi16_i32_overlap"(
-; CHECK: load i16* %{{.*}}, align 2
-; CHECK: load i16* %{{.*}}, align 2
+; CHECK: load i16, i16* %{{.*}}, align 2
+; CHECK: load i16, i16* %{{.*}}, align 2
}
; Combine valid alignments.
define i64 @"2xi16_i64_align"(i8* %x) {
%1 = bitcast i8* %x to i32*
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr inbounds i8, i8* %x, i64 4
%4 = bitcast i8* %3 to i16*
- %5 = load i16* %4, align 2
+ %5 = load i16, i16* %4, align 2
%6 = getelementptr inbounds i8, i8* %x, i64 6
%7 = bitcast i8* %6 to i16*
- %8 = load i16* %7, align 2
+ %8 = load i16, i16* %7, align 2
%9 = zext i16 %8 to i64
%10 = shl nuw i64 %9, 48
%11 = zext i16 %5 to i64
@@ -147,44 +147,44 @@ define i64 @"2xi16_i64_align"(i8* %x) {
%15 = or i64 %14, %10
ret i64 %15
; CHECK-LABEL: @"2xi16_i64_align"(
-; CHECK: load i64* %{{.*}}, align 4
+; CHECK: load i64, i64* %{{.*}}, align 4
}
; Non power of two.
define i64 @"2xi16_i64_npo2"(i8* %x) {
- %1 = load i8* %x, align 1
+ %1 = load i8, i8* %x, align 1
%2 = zext i8 %1 to i64
%3 = getelementptr inbounds i8, i8* %x, i64 1
- %4 = load i8* %3, align 1
+ %4 = load i8, i8* %3, align 1
%5 = zext i8 %4 to i64
%6 = shl nuw nsw i64 %5, 8
%7 = or i64 %6, %2
%8 = getelementptr inbounds i8, i8* %x, i64 2
- %9 = load i8* %8, align 1
+ %9 = load i8, i8* %8, align 1
%10 = zext i8 %9 to i64
%11 = shl nuw nsw i64 %10, 16
%12 = or i64 %11, %7
%13 = getelementptr inbounds i8, i8* %x, i64 3
- %14 = load i8* %13, align 1
+ %14 = load i8, i8* %13, align 1
%15 = zext i8 %14 to i64
%16 = shl nuw nsw i64 %15, 24
%17 = or i64 %16, %12
%18 = getelementptr inbounds i8, i8* %x, i64 4
- %19 = load i8* %18, align 1
+ %19 = load i8, i8* %18, align 1
%20 = zext i8 %19 to i64
%21 = shl nuw nsw i64 %20, 32
%22 = or i64 %21, %17
%23 = getelementptr inbounds i8, i8* %x, i64 5
- %24 = load i8* %23, align 1
+ %24 = load i8, i8* %23, align 1
%25 = zext i8 %24 to i64
%26 = shl nuw nsw i64 %25, 40
%27 = or i64 %26, %22
%28 = getelementptr inbounds i8, i8* %x, i64 6
- %29 = load i8* %28, align 1
+ %29 = load i8, i8* %28, align 1
%30 = zext i8 %29 to i64
%31 = shl nuw nsw i64 %30, 48
%32 = or i64 %31, %27
ret i64 %32
; CHECK-LABEL: @"2xi16_i64_npo2"(
-; CHECK: load i32* %{{.*}}, align 1
+; CHECK: load i32, i32* %{{.*}}, align 1
}
diff --git a/llvm/test/Transforms/LoopDeletion/2008-05-06-Phi.ll b/llvm/test/Transforms/LoopDeletion/2008-05-06-Phi.ll
index 32e1eef18f1..fcf5ede76b7 100644
--- a/llvm/test/Transforms/LoopDeletion/2008-05-06-Phi.ll
+++ b/llvm/test/Transforms/LoopDeletion/2008-05-06-Phi.ll
@@ -45,14 +45,14 @@ entry:
define internal fastcc void @encodeMainData(%struct.lame_global_flags* %gfp, [2 x [576 x i32]]* %l3_enc, %struct.III_side_info_t* %si, [2 x %struct.III_scalefac_t]* %scalefac) nounwind {
entry:
%tmp69 = getelementptr %struct.lame_global_flags, %struct.lame_global_flags* %gfp, i32 0, i32 43 ; <i32*> [#uses=1]
- %tmp70 = load i32* %tmp69, align 4 ; <i32> [#uses=1]
+ %tmp70 = load i32, i32* %tmp69, align 4 ; <i32> [#uses=1]
%tmp71 = icmp eq i32 %tmp70, 1 ; <i1> [#uses=1]
br i1 %tmp71, label %bb352, label %bb498
bb113: ; preds = %bb132
%tmp123 = getelementptr [2 x %struct.III_scalefac_t], [2 x %struct.III_scalefac_t]* %scalefac, i32 0, i32 0, i32 1, i32 %sfb.0, i32 %window.0 ; <i32*> [#uses=1]
- %tmp124 = load i32* %tmp123, align 4 ; <i32> [#uses=1]
- %tmp126 = load %struct.BF_PartHolder** %tmp80, align 4 ; <%struct.BF_PartHolder*> [#uses=1]
+ %tmp124 = load i32, i32* %tmp123, align 4 ; <i32> [#uses=1]
+ %tmp126 = load %struct.BF_PartHolder*, %struct.BF_PartHolder** %tmp80, align 4 ; <%struct.BF_PartHolder*> [#uses=1]
%tmp128 = call %struct.BF_PartHolder* @BF_addEntry( %struct.BF_PartHolder* %tmp126, i32 %tmp124, i32 %tmp93 ) nounwind ; <%struct.BF_PartHolder*> [#uses=1]
store %struct.BF_PartHolder* %tmp128, %struct.BF_PartHolder** %tmp80, align 4
%tmp131 = add i32 %window.0, 1 ; <i32> [#uses=1]
@@ -73,7 +73,7 @@ bb140: ; preds = %bb341, %bb137
br i1 %tmp142, label %bb132, label %bb174
bb166: ; preds = %bb174
- %tmp160 = load %struct.BF_PartHolder** %tmp80, align 4 ; <%struct.BF_PartHolder*> [#uses=1]
+ %tmp160 = load %struct.BF_PartHolder*, %struct.BF_PartHolder** %tmp80, align 4 ; <%struct.BF_PartHolder*> [#uses=1]
%tmp162 = call %struct.BF_PartHolder* @BF_addEntry( %struct.BF_PartHolder* %tmp160, i32 0, i32 0 ) nounwind ; <%struct.BF_PartHolder*> [#uses=0]
unreachable
@@ -84,7 +84,7 @@ bb174: ; preds = %bb140
bb341: ; preds = %bb352, %bb174
%tmp80 = getelementptr [2 x [2 x %struct.BF_PartHolder*]], [2 x [2 x %struct.BF_PartHolder*]]* @scaleFactorsPH, i32 0, i32 0, i32 0 ; <%struct.BF_PartHolder**> [#uses=3]
%tmp92 = getelementptr [16 x i32], [16 x i32]* @slen1_tab, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp93 = load i32* %tmp92, align 4 ; <i32> [#uses=1]
+ %tmp93 = load i32, i32* %tmp92, align 4 ; <i32> [#uses=1]
br label %bb140
bb352: ; preds = %entry
diff --git a/llvm/test/Transforms/LoopIdiom/basic-address-space.ll b/llvm/test/Transforms/LoopIdiom/basic-address-space.ll
index b6caa7246d2..3ab874c4d1e 100644
--- a/llvm/test/Transforms/LoopIdiom/basic-address-space.ll
+++ b/llvm/test/Transforms/LoopIdiom/basic-address-space.ll
@@ -75,7 +75,7 @@ for.body: ; preds = %for.inc, %for.body.
%add = add nsw i32 %tmp5, 4
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds [7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 %idxprom
- %tmp2 = load i32 addrspace(2)* %arrayidx, align 4
+ %tmp2 = load i32, i32 addrspace(2)* %arrayidx, align 4
%add4 = add nsw i32 %tmp5, 5
%idxprom5 = sext i32 %add4 to i64
%arrayidx6 = getelementptr inbounds [7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 %idxprom5
@@ -85,7 +85,7 @@ for.body: ; preds = %for.inc, %for.body.
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.inc
- %tmp8 = load i32 addrspace(2)* getelementptr inbounds ([7 x i32] addrspace(2)* @g_50, i32 0, i64 6), align 4
+ %tmp8 = load i32, i32 addrspace(2)* getelementptr inbounds ([7 x i32] addrspace(2)* @g_50, i32 0, i64 6), align 4
ret i32 %tmp8
}
diff --git a/llvm/test/Transforms/LoopIdiom/basic.ll b/llvm/test/Transforms/LoopIdiom/basic.ll
index 7344f5dc355..c0aafc20844 100644
--- a/llvm/test/Transforms/LoopIdiom/basic.ll
+++ b/llvm/test/Transforms/LoopIdiom/basic.ll
@@ -150,7 +150,7 @@ for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
%I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
%DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load i8* %I.0.014, align 1
+ %V = load i8, i8* %I.0.014, align 1
store i8 %V, i8* %DestI, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
@@ -223,7 +223,7 @@ for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
%I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
%DestI = getelementptr i8, i8* %Dest, i64 %indvar
- %V = load i8* %I.0.014, align 1
+ %V = load i8, i8* %I.0.014, align 1
store i8 %V, i8* %DestI, align 1
;; This store can clobber the input.
@@ -363,7 +363,7 @@ for.body: ; preds = %for.inc, %for.body.
%add = add nsw i32 %tmp5, 4
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom
- %tmp2 = load i32* %arrayidx, align 4
+ %tmp2 = load i32, i32* %arrayidx, align 4
%add4 = add nsw i32 %tmp5, 5
%idxprom5 = sext i32 %add4 to i64
%arrayidx6 = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom5
@@ -373,7 +373,7 @@ for.body: ; preds = %for.inc, %for.body.
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.inc
- %tmp8 = load i32* getelementptr inbounds ([7 x i32]* @g_50, i32 0, i64 6), align 4
+ %tmp8 = load i32, i32* getelementptr inbounds ([7 x i32]* @g_50, i32 0, i64 6), align 4
ret i32 %tmp8
; CHECK-LABEL: @test14(
; CHECK: for.body:
@@ -404,7 +404,7 @@ entry:
while.body:
%phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
%src.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
- %val = load i32* %src.ptr, align 4
+ %val = load i32, i32* %src.ptr, align 4
; CHECK: load
%dst.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 0
store i32 %val, i32* %dst.ptr, align 4
diff --git a/llvm/test/Transforms/LoopIdiom/scev-invalidation.ll b/llvm/test/Transforms/LoopIdiom/scev-invalidation.ll
index 1e72e0ba45e..2fe8a3023c4 100644
--- a/llvm/test/Transforms/LoopIdiom/scev-invalidation.ll
+++ b/llvm/test/Transforms/LoopIdiom/scev-invalidation.ll
@@ -11,7 +11,7 @@ for.cond: ; preds = %for.inc, %entry
%backslashes.0 = phi i32 [ undef, %entry ], [ %backslashes.2, %for.inc ]
%p.0 = phi i8* [ undef, %entry ], [ %incdec.ptr3, %for.inc ]
%q.0 = phi i8* [ undef, %entry ], [ %q.2, %for.inc ]
- %0 = load i8* %p.0, align 1
+ %0 = load i8, i8* %p.0, align 1
switch i8 %0, label %while.cond.preheader [
i8 0, label %for.cond4.preheader
i8 92, label %for.inc
diff --git a/llvm/test/Transforms/LoopReroll/basic.ll b/llvm/test/Transforms/LoopReroll/basic.ll
index 4ae78270eb8..16a6dc81af7 100644
--- a/llvm/test/Transforms/LoopReroll/basic.ll
+++ b/llvm/test/Transforms/LoopReroll/basic.ll
@@ -158,42 +158,42 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%mul = fmul float %0, %alpha
%arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%add = fadd float %1, %mul
store float %add, float* %arrayidx2, align 4
%2 = add nsw i64 %indvars.iv, 1
%arrayidx5 = getelementptr inbounds float, float* %b, i64 %2
- %3 = load float* %arrayidx5, align 4
+ %3 = load float, float* %arrayidx5, align 4
%mul6 = fmul float %3, %alpha
%arrayidx9 = getelementptr inbounds float, float* %a, i64 %2
- %4 = load float* %arrayidx9, align 4
+ %4 = load float, float* %arrayidx9, align 4
%add10 = fadd float %4, %mul6
store float %add10, float* %arrayidx9, align 4
%5 = add nsw i64 %indvars.iv, 2
%arrayidx13 = getelementptr inbounds float, float* %b, i64 %5
- %6 = load float* %arrayidx13, align 4
+ %6 = load float, float* %arrayidx13, align 4
%mul14 = fmul float %6, %alpha
%arrayidx17 = getelementptr inbounds float, float* %a, i64 %5
- %7 = load float* %arrayidx17, align 4
+ %7 = load float, float* %arrayidx17, align 4
%add18 = fadd float %7, %mul14
store float %add18, float* %arrayidx17, align 4
%8 = add nsw i64 %indvars.iv, 3
%arrayidx21 = getelementptr inbounds float, float* %b, i64 %8
- %9 = load float* %arrayidx21, align 4
+ %9 = load float, float* %arrayidx21, align 4
%mul22 = fmul float %9, %alpha
%arrayidx25 = getelementptr inbounds float, float* %a, i64 %8
- %10 = load float* %arrayidx25, align 4
+ %10 = load float, float* %arrayidx25, align 4
%add26 = fadd float %10, %mul22
store float %add26, float* %arrayidx25, align 4
%11 = add nsw i64 %indvars.iv, 4
%arrayidx29 = getelementptr inbounds float, float* %b, i64 %11
- %12 = load float* %arrayidx29, align 4
+ %12 = load float, float* %arrayidx29, align 4
%mul30 = fmul float %12, %alpha
%arrayidx33 = getelementptr inbounds float, float* %a, i64 %11
- %13 = load float* %arrayidx33, align 4
+ %13 = load float, float* %arrayidx33, align 4
%add34 = fadd float %13, %mul30
store float %add34, float* %arrayidx33, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 5
@@ -206,10 +206,10 @@ for.body: ; preds = %entry, %for.body
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %arrayidx = getelementptr inbounds float, float* %b, i64 %indvar
-; CHECK: %0 = load float* %arrayidx, align 4
+; CHECK: %0 = load float, float* %arrayidx, align 4
; CHECK: %mul = fmul float %0, %alpha
; CHECK: %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvar
-; CHECK: %1 = load float* %arrayidx2, align 4
+; CHECK: %1 = load float, float* %arrayidx2, align 4
; CHECK: %add = fadd float %1, %mul
; CHECK: store float %add, float* %arrayidx2, align 4
; CHECK: %indvar.next = add i64 %indvar, 1
@@ -240,57 +240,57 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %ip, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%idxprom1 = sext i32 %0 to i64
%arrayidx2 = getelementptr inbounds float, float* %b, i64 %idxprom1
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%mul = fmul float %1, %alpha
%arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvars.iv
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%add = fadd float %2, %mul
store float %add, float* %arrayidx4, align 4
%3 = add nsw i64 %indvars.iv, 1
%arrayidx7 = getelementptr inbounds i32, i32* %ip, i64 %3
- %4 = load i32* %arrayidx7, align 4
+ %4 = load i32, i32* %arrayidx7, align 4
%idxprom8 = sext i32 %4 to i64
%arrayidx9 = getelementptr inbounds float, float* %b, i64 %idxprom8
- %5 = load float* %arrayidx9, align 4
+ %5 = load float, float* %arrayidx9, align 4
%mul10 = fmul float %5, %alpha
%arrayidx13 = getelementptr inbounds float, float* %a, i64 %3
- %6 = load float* %arrayidx13, align 4
+ %6 = load float, float* %arrayidx13, align 4
%add14 = fadd float %6, %mul10
store float %add14, float* %arrayidx13, align 4
%7 = add nsw i64 %indvars.iv, 2
%arrayidx17 = getelementptr inbounds i32, i32* %ip, i64 %7
- %8 = load i32* %arrayidx17, align 4
+ %8 = load i32, i32* %arrayidx17, align 4
%idxprom18 = sext i32 %8 to i64
%arrayidx19 = getelementptr inbounds float, float* %b, i64 %idxprom18
- %9 = load float* %arrayidx19, align 4
+ %9 = load float, float* %arrayidx19, align 4
%mul20 = fmul float %9, %alpha
%arrayidx23 = getelementptr inbounds float, float* %a, i64 %7
- %10 = load float* %arrayidx23, align 4
+ %10 = load float, float* %arrayidx23, align 4
%add24 = fadd float %10, %mul20
store float %add24, float* %arrayidx23, align 4
%11 = add nsw i64 %indvars.iv, 3
%arrayidx27 = getelementptr inbounds i32, i32* %ip, i64 %11
- %12 = load i32* %arrayidx27, align 4
+ %12 = load i32, i32* %arrayidx27, align 4
%idxprom28 = sext i32 %12 to i64
%arrayidx29 = getelementptr inbounds float, float* %b, i64 %idxprom28
- %13 = load float* %arrayidx29, align 4
+ %13 = load float, float* %arrayidx29, align 4
%mul30 = fmul float %13, %alpha
%arrayidx33 = getelementptr inbounds float, float* %a, i64 %11
- %14 = load float* %arrayidx33, align 4
+ %14 = load float, float* %arrayidx33, align 4
%add34 = fadd float %14, %mul30
store float %add34, float* %arrayidx33, align 4
%15 = add nsw i64 %indvars.iv, 4
%arrayidx37 = getelementptr inbounds i32, i32* %ip, i64 %15
- %16 = load i32* %arrayidx37, align 4
+ %16 = load i32, i32* %arrayidx37, align 4
%idxprom38 = sext i32 %16 to i64
%arrayidx39 = getelementptr inbounds float, float* %b, i64 %idxprom38
- %17 = load float* %arrayidx39, align 4
+ %17 = load float, float* %arrayidx39, align 4
%mul40 = fmul float %17, %alpha
%arrayidx43 = getelementptr inbounds float, float* %a, i64 %15
- %18 = load float* %arrayidx43, align 4
+ %18 = load float, float* %arrayidx43, align 4
%add44 = fadd float %18, %mul40
store float %add44, float* %arrayidx43, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 5
@@ -303,13 +303,13 @@ for.body: ; preds = %entry, %for.body
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %arrayidx = getelementptr inbounds i32, i32* %ip, i64 %indvar
-; CHECK: %0 = load i32* %arrayidx, align 4
+; CHECK: %0 = load i32, i32* %arrayidx, align 4
; CHECK: %idxprom1 = sext i32 %0 to i64
; CHECK: %arrayidx2 = getelementptr inbounds float, float* %b, i64 %idxprom1
-; CHECK: %1 = load float* %arrayidx2, align 4
+; CHECK: %1 = load float, float* %arrayidx2, align 4
; CHECK: %mul = fmul float %1, %alpha
; CHECK: %arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvar
-; CHECK: %2 = load float* %arrayidx4, align 4
+; CHECK: %2 = load float, float* %arrayidx4, align 4
; CHECK: %add = fadd float %2, %mul
; CHECK: store float %add, float* %arrayidx4, align 4
; CHECK: %indvar.next = add i64 %indvar, 1
diff --git a/llvm/test/Transforms/LoopReroll/nonconst_lb.ll b/llvm/test/Transforms/LoopReroll/nonconst_lb.ll
index 45a67016d50..5effa42d07c 100644
--- a/llvm/test/Transforms/LoopReroll/nonconst_lb.ll
+++ b/llvm/test/Transforms/LoopReroll/nonconst_lb.ll
@@ -18,25 +18,25 @@ entry:
for.body: ; preds = %entry, %for.body
%i.035 = phi i32 [ %add18, %for.body ], [ %m, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.035
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%mul = shl nsw i32 %0, 2
%arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.035
store i32 %mul, i32* %arrayidx2, align 4
%add3 = add nsw i32 %i.035, 1
%arrayidx4 = getelementptr inbounds i32, i32* %B, i32 %add3
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
%mul5 = shl nsw i32 %1, 2
%arrayidx7 = getelementptr inbounds i32, i32* %A, i32 %add3
store i32 %mul5, i32* %arrayidx7, align 4
%add8 = add nsw i32 %i.035, 2
%arrayidx9 = getelementptr inbounds i32, i32* %B, i32 %add8
- %2 = load i32* %arrayidx9, align 4
+ %2 = load i32, i32* %arrayidx9, align 4
%mul10 = shl nsw i32 %2, 2
%arrayidx12 = getelementptr inbounds i32, i32* %A, i32 %add8
store i32 %mul10, i32* %arrayidx12, align 4
%add13 = add nsw i32 %i.035, 3
%arrayidx14 = getelementptr inbounds i32, i32* %B, i32 %add13
- %3 = load i32* %arrayidx14, align 4
+ %3 = load i32, i32* %arrayidx14, align 4
%mul15 = shl nsw i32 %3, 2
%arrayidx17 = getelementptr inbounds i32, i32* %A, i32 %add13
store i32 %mul15, i32* %arrayidx17, align 4
@@ -61,7 +61,7 @@ for.end: ; preds = %for.body, %entry
; CHECK: %indvar = phi i32 [ %indvar.next, %for.body ], [ 0, %for.body.preheader ]
; CHECK: %6 = add i32 %m, %indvar
; CHECK: %arrayidx = getelementptr inbounds i32, i32* %B, i32 %6
-; CHECK: %7 = load i32* %arrayidx, align 4
+; CHECK: %7 = load i32, i32* %arrayidx, align 4
; CHECK: %mul = shl nsw i32 %7, 2
; CHECK: %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %6
; CHECK: store i32 %mul, i32* %arrayidx2, align 4
@@ -89,33 +89,33 @@ entry:
for.body: ; preds = %entry, %for.body
%i.056 = phi i32 [ %add27, %for.body ], [ %rem, %entry ]
%arrayidx = getelementptr inbounds float, float* %dy, i32 %i.056
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds float, float* %dx, i32 %i.056
- %1 = load float* %arrayidx1, align 4
+ %1 = load float, float* %arrayidx1, align 4
%mul = fmul float %1, %da
%add = fadd float %0, %mul
store float %add, float* %arrayidx, align 4
%add3 = add nsw i32 %i.056, 1
%arrayidx4 = getelementptr inbounds float, float* %dy, i32 %add3
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%arrayidx6 = getelementptr inbounds float, float* %dx, i32 %add3
- %3 = load float* %arrayidx6, align 4
+ %3 = load float, float* %arrayidx6, align 4
%mul7 = fmul float %3, %da
%add8 = fadd float %2, %mul7
store float %add8, float* %arrayidx4, align 4
%add11 = add nsw i32 %i.056, 2
%arrayidx12 = getelementptr inbounds float, float* %dy, i32 %add11
- %4 = load float* %arrayidx12, align 4
+ %4 = load float, float* %arrayidx12, align 4
%arrayidx14 = getelementptr inbounds float, float* %dx, i32 %add11
- %5 = load float* %arrayidx14, align 4
+ %5 = load float, float* %arrayidx14, align 4
%mul15 = fmul float %5, %da
%add16 = fadd float %4, %mul15
store float %add16, float* %arrayidx12, align 4
%add19 = add nsw i32 %i.056, 3
%arrayidx20 = getelementptr inbounds float, float* %dy, i32 %add19
- %6 = load float* %arrayidx20, align 4
+ %6 = load float, float* %arrayidx20, align 4
%arrayidx22 = getelementptr inbounds float, float* %dx, i32 %add19
- %7 = load float* %arrayidx22, align 4
+ %7 = load float, float* %arrayidx22, align 4
%mul23 = fmul float %7, %da
%add24 = fadd float %6, %mul23
store float %add24, float* %arrayidx20, align 4
@@ -141,9 +141,9 @@ for.end: ; preds = %for.body, %entry
; CHECK: %indvar = phi i32 [ %indvar.next, %for.body ], [ 0, %for.body.preheader ]
; CHECK: %6 = add i32 %rem, %indvar
; CHECK: %arrayidx = getelementptr inbounds float, float* %dy, i32 %6
-; CHECK: %7 = load float* %arrayidx, align 4
+; CHECK: %7 = load float, float* %arrayidx, align 4
; CHECK: %arrayidx1 = getelementptr inbounds float, float* %dx, i32 %6
-; CHECK: %8 = load float* %arrayidx1, align 4
+; CHECK: %8 = load float, float* %arrayidx1, align 4
; CHECK: %mul = fmul float %8, %da
; CHECK: %add = fadd float %7, %mul
; CHECK: store float %add, float* %arrayidx, align 4
diff --git a/llvm/test/Transforms/LoopReroll/reduction.ll b/llvm/test/Transforms/LoopReroll/reduction.ll
index 559f4566e5b..191518514b8 100644
--- a/llvm/test/Transforms/LoopReroll/reduction.ll
+++ b/llvm/test/Transforms/LoopReroll/reduction.ll
@@ -10,19 +10,19 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi i32 [ 0, %entry ], [ %add12, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.029
%1 = or i64 %indvars.iv, 1
%arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %1
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%add4 = add nsw i32 %add, %2
%3 = or i64 %indvars.iv, 2
%arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %3
- %4 = load i32* %arrayidx7, align 4
+ %4 = load i32, i32* %arrayidx7, align 4
%add8 = add nsw i32 %add4, %4
%5 = or i64 %indvars.iv, 3
%arrayidx11 = getelementptr inbounds i32, i32* %x, i64 %5
- %6 = load i32* %arrayidx11, align 4
+ %6 = load i32, i32* %arrayidx11, align 4
%add12 = add nsw i32 %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%7 = trunc i64 %indvars.iv.next to i32
@@ -35,7 +35,7 @@ for.body: ; preds = %entry, %for.body
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %r.029 = phi i32 [ 0, %entry ], [ %add, %for.body ]
; CHECK: %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvar
-; CHECK: %0 = load i32* %arrayidx, align 4
+; CHECK: %0 = load i32, i32* %arrayidx, align 4
; CHECK: %add = add nsw i32 %0, %r.029
; CHECK: %indvar.next = add i64 %indvar, 1
; CHECK: %exitcond = icmp eq i64 %indvar, 399
@@ -55,19 +55,19 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi float [ 0.0, %entry ], [ %add12, %for.body ]
%arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%add = fadd float %0, %r.029
%1 = or i64 %indvars.iv, 1
%arrayidx3 = getelementptr inbounds float, float* %x, i64 %1
- %2 = load float* %arrayidx3, align 4
+ %2 = load float, float* %arrayidx3, align 4
%add4 = fadd float %add, %2
%3 = or i64 %indvars.iv, 2
%arrayidx7 = getelementptr inbounds float, float* %x, i64 %3
- %4 = load float* %arrayidx7, align 4
+ %4 = load float, float* %arrayidx7, align 4
%add8 = fadd float %add4, %4
%5 = or i64 %indvars.iv, 3
%arrayidx11 = getelementptr inbounds float, float* %x, i64 %5
- %6 = load float* %arrayidx11, align 4
+ %6 = load float, float* %arrayidx11, align 4
%add12 = fadd float %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%7 = trunc i64 %indvars.iv.next to i32
@@ -80,7 +80,7 @@ for.body: ; preds = %entry, %for.body
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %r.029 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
; CHECK: %arrayidx = getelementptr inbounds float, float* %x, i64 %indvar
-; CHECK: %0 = load float* %arrayidx, align 4
+; CHECK: %0 = load float, float* %arrayidx, align 4
; CHECK: %add = fadd float %0, %r.029
; CHECK: %indvar.next = add i64 %indvar, 1
; CHECK: %exitcond = icmp eq i64 %indvar, 399
@@ -100,19 +100,19 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi i32 [ 0, %entry ], [ %add12, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %0
%1 = or i64 %indvars.iv, 1
%arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %1
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%add4 = add nsw i32 %add, %2
%3 = or i64 %indvars.iv, 2
%arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %3
- %4 = load i32* %arrayidx7, align 4
+ %4 = load i32, i32* %arrayidx7, align 4
%add8 = add nsw i32 %add4, %4
%5 = or i64 %indvars.iv, 3
%arrayidx11 = getelementptr inbounds i32, i32* %x, i64 %5
- %6 = load i32* %arrayidx11, align 4
+ %6 = load i32, i32* %arrayidx11, align 4
%add12 = add nsw i32 %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%7 = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopRotate/PhiRename-1.ll b/llvm/test/Transforms/LoopRotate/PhiRename-1.ll
index 098fd98275c..8ec0fbf789d 100644
--- a/llvm/test/Transforms/LoopRotate/PhiRename-1.ll
+++ b/llvm/test/Transforms/LoopRotate/PhiRename-1.ll
@@ -36,59 +36,59 @@ entry:
%op = alloca %struct.operator*, align 4 ; <%struct.operator**> [#uses=3]
store i32 %arity, i32* %arity_addr
store i32 0, i32* %c
- %tmp1 = load %struct.list** @operators ; <%struct.list*> [#uses=1]
+ %tmp1 = load %struct.list*, %struct.list** @operators ; <%struct.list*> [#uses=1]
store %struct.list* %tmp1, %struct.list** %l
br label %bb21
bb: ; preds = %bb21
%tmp3 = getelementptr %struct.list, %struct.list* %tmp22, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp4 = load i8** %tmp3 ; <i8*> [#uses=1]
+ %tmp4 = load i8*, i8** %tmp3 ; <i8*> [#uses=1]
%tmp45 = bitcast i8* %tmp4 to %struct.operator* ; <%struct.operator*> [#uses=1]
store %struct.operator* %tmp45, %struct.operator** %op
- %tmp6 = load %struct.operator** %op ; <%struct.operator*> [#uses=1]
+ %tmp6 = load %struct.operator*, %struct.operator** %op ; <%struct.operator*> [#uses=1]
%tmp7 = getelementptr %struct.operator, %struct.operator* %tmp6, i32 0, i32 5 ; <i32*> [#uses=1]
- %tmp8 = load i32* %tmp7 ; <i32> [#uses=1]
- %tmp9 = load i32* %arity_addr ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %tmp7 ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* %arity_addr ; <i32> [#uses=1]
icmp eq i32 %tmp8, %tmp9 ; <i1>:0 [#uses=1]
zext i1 %0 to i8 ; <i8>:1 [#uses=1]
icmp ne i8 %1, 0 ; <i1>:2 [#uses=1]
br i1 %2, label %cond_true, label %cond_next
cond_true: ; preds = %bb
- %tmp10 = load %struct.operator** %op ; <%struct.operator*> [#uses=1]
+ %tmp10 = load %struct.operator*, %struct.operator** %op ; <%struct.operator*> [#uses=1]
%tmp11 = getelementptr %struct.operator, %struct.operator* %tmp10, i32 0, i32 2 ; <i32*> [#uses=1]
- %tmp12 = load i32* %tmp11 ; <i32> [#uses=1]
- %tmp13 = load %struct.FILE** @outfile ; <%struct.FILE*> [#uses=1]
+ %tmp12 = load i32, i32* %tmp11 ; <i32> [#uses=1]
+ %tmp13 = load %struct.FILE*, %struct.FILE** @outfile ; <%struct.FILE*> [#uses=1]
%tmp14 = getelementptr [11 x i8], [11 x i8]* @str1, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp15 = call i32 (%struct.FILE*, i8*, ...)* @fprintf( %struct.FILE* %tmp13, i8* %tmp14, i32 %tmp12 ) ; <i32> [#uses=0]
- %tmp16 = load i32* %c ; <i32> [#uses=1]
+ %tmp16 = load i32, i32* %c ; <i32> [#uses=1]
%tmp17 = add i32 %tmp16, 1 ; <i32> [#uses=1]
store i32 %tmp17, i32* %c
br label %cond_next
cond_next: ; preds = %cond_true, %bb
%tmp19 = getelementptr %struct.list, %struct.list* %tmp22, i32 0, i32 1 ; <%struct.list**> [#uses=1]
- %tmp20 = load %struct.list** %tmp19 ; <%struct.list*> [#uses=1]
+ %tmp20 = load %struct.list*, %struct.list** %tmp19 ; <%struct.list*> [#uses=1]
store %struct.list* %tmp20, %struct.list** %l
br label %bb21
bb21: ; preds = %cond_next, %entry
%l.in = phi %struct.list** [ @operators, %entry ], [ %tmp19, %cond_next ]
- %tmp22 = load %struct.list** %l.in ; <%struct.list*> [#uses=1]
+ %tmp22 = load %struct.list*, %struct.list** %l.in ; <%struct.list*> [#uses=1]
icmp ne %struct.list* %tmp22, null ; <i1>:3 [#uses=1]
zext i1 %3 to i8 ; <i8>:4 [#uses=1]
icmp ne i8 %4, 0 ; <i1>:5 [#uses=1]
br i1 %5, label %bb, label %bb23
bb23: ; preds = %bb21
- %tmp24 = load i32* %c ; <i32> [#uses=1]
+ %tmp24 = load i32, i32* %c ; <i32> [#uses=1]
store i32 %tmp24, i32* %tmp
- %tmp25 = load i32* %tmp ; <i32> [#uses=1]
+ %tmp25 = load i32, i32* %tmp ; <i32> [#uses=1]
store i32 %tmp25, i32* %retval
br label %return
return: ; preds = %bb23
- %retval26 = load i32* %retval ; <i32> [#uses=1]
+ %retval26 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval26
}
diff --git a/llvm/test/Transforms/LoopRotate/alloca.ll b/llvm/test/Transforms/LoopRotate/alloca.ll
index fd217ea8dcf..bbcfb39c372 100644
--- a/llvm/test/Transforms/LoopRotate/alloca.ll
+++ b/llvm/test/Transforms/LoopRotate/alloca.ll
@@ -14,7 +14,7 @@ declare void @use(i8*)
define void @test() {
entry:
- %end = load i16* @e
+ %end = load i16, i16* @e
br label %loop
loop:
diff --git a/llvm/test/Transforms/LoopRotate/dbgvalue.ll b/llvm/test/Transforms/LoopRotate/dbgvalue.ll
index 1b9e9bfe621..1d6a864c1d5 100644
--- a/llvm/test/Transforms/LoopRotate/dbgvalue.ll
+++ b/llvm/test/Transforms/LoopRotate/dbgvalue.ll
@@ -61,12 +61,12 @@ for.cond:
br i1 %cmp, label %for.end, label %for.body
for.body:
- %0 = load i64* @channelColumns, align 8
+ %0 = load i64, i64* @channelColumns, align 8
%mul = mul i64 %0, %row
%add = add i64 %mul, %i.0
- %1 = load i8** @horzPlane, align 8
+ %1 = load i8*, i8** @horzPlane, align 8
%arrayidx = getelementptr inbounds i8, i8* %1, i64 %add
- %2 = load i8* %arrayidx, align 1
+ %2 = load i8, i8* %arrayidx, align 1
%tobool = icmp eq i8 %2, 0
br i1 %tobool, label %for.inc, label %for.end
diff --git a/llvm/test/Transforms/LoopRotate/indirectbr.ll b/llvm/test/Transforms/LoopRotate/indirectbr.ll
index 9c82aa88346..2ccc54697ae 100644
--- a/llvm/test/Transforms/LoopRotate/indirectbr.ll
+++ b/llvm/test/Transforms/LoopRotate/indirectbr.ll
@@ -12,7 +12,7 @@ while.body: ; preds = %while.cond
br label %indirectgoto
run_opcode: ; preds = %indirectgoto
- %tmp276 = load i8* undef ; <i8> [#uses=1]
+ %tmp276 = load i8, i8* undef ; <i8> [#uses=1]
br label %indirectgoto
if.else295: ; preds = %divide_late
diff --git a/llvm/test/Transforms/LoopRotate/multiple-exits.ll b/llvm/test/Transforms/LoopRotate/multiple-exits.ll
index e97d3df2ae8..f31ed7f1c5b 100644
--- a/llvm/test/Transforms/LoopRotate/multiple-exits.ll
+++ b/llvm/test/Transforms/LoopRotate/multiple-exits.ll
@@ -22,7 +22,7 @@ for.cond1: ; preds = %for.cond, %land.rhs
land.rhs: ; preds = %for.cond1
%conv = zext i32 %i.1 to i64
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* %a, i64 0, i64 %conv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add i32 %0, %sum.1
%cmp4 = icmp ugt i32 %add, 1000
%inc = add i32 %i.1, 1
diff --git a/llvm/test/Transforms/LoopRotate/nosimplifylatch.ll b/llvm/test/Transforms/LoopRotate/nosimplifylatch.ll
index da3ec553114..07ff6643c6d 100644
--- a/llvm/test/Transforms/LoopRotate/nosimplifylatch.ll
+++ b/llvm/test/Transforms/LoopRotate/nosimplifylatch.ll
@@ -18,8 +18,8 @@ for.cond: ; preds = %for.inc, %entry
br i1 %lnot.i, label %for.end, label %for.body
for.body: ; preds = %for.cond
- %0 = load i32* %coerce.val.ip9, align 4
- %1 = load i32* %__value_, align 4
+ %0 = load i32, i32* %coerce.val.ip9, align 4
+ %1 = load i32, i32* %__value_, align 4
%cmp = icmp eq i32 %0, %1
br i1 %cmp, label %for.end, label %for.inc
diff --git a/llvm/test/Transforms/LoopRotate/phi-duplicate.ll b/llvm/test/Transforms/LoopRotate/phi-duplicate.ll
index 88170d6bc60..46ee5961ba5 100644
--- a/llvm/test/Transforms/LoopRotate/phi-duplicate.ll
+++ b/llvm/test/Transforms/LoopRotate/phi-duplicate.ll
@@ -14,10 +14,10 @@ for.cond: ; preds = %for.body, %entry
for.body: ; preds = %for.cond
%arrayidx = getelementptr inbounds double, double* %G, i64 %j.0 ; <double*> [#uses=1]
- %tmp3 = load double* %arrayidx ; <double> [#uses=1]
+ %tmp3 = load double, double* %arrayidx ; <double> [#uses=1]
%sub = sub i64 %j.0, 1 ; <i64> [#uses=1]
%arrayidx6 = getelementptr inbounds double, double* %G, i64 %sub ; <double*> [#uses=1]
- %tmp7 = load double* %arrayidx6 ; <double> [#uses=1]
+ %tmp7 = load double, double* %arrayidx6 ; <double> [#uses=1]
%add = fadd double %tmp3, %tmp7 ; <double> [#uses=1]
%arrayidx10 = getelementptr inbounds double, double* %G, i64 %j.0 ; <double*> [#uses=1]
store double %add, double* %arrayidx10
diff --git a/llvm/test/Transforms/LoopRotate/simplifylatch.ll b/llvm/test/Transforms/LoopRotate/simplifylatch.ll
index 4bb3c79fb18..215622fe302 100644
--- a/llvm/test/Transforms/LoopRotate/simplifylatch.ll
+++ b/llvm/test/Transforms/LoopRotate/simplifylatch.ll
@@ -14,7 +14,7 @@ bb: ; preds = %bb4, %entry
%mode.0 = phi i8 [ 0, %entry ], [ %indvar.next, %bb4 ] ; <i8> [#uses=4]
zext i8 %mode.0 to i32 ; <i32>:1 [#uses=1]
getelementptr [4 x i32], [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
- load i32* %2, align 4 ; <i32>:3 [#uses=1]
+ load i32, i32* %2, align 4 ; <i32>:3 [#uses=1]
icmp eq i32 %3, %0 ; <i1>:4 [#uses=1]
br i1 %4, label %bb1, label %bb2
@@ -40,7 +40,7 @@ declare void @raise_exception() noreturn
;CHECK: for.body.lr.ph:
;CHECK-NEXT: %arrayidx1 = getelementptr inbounds i8, i8* %CurPtr, i64 0
-;CHECK-NEXT: %0 = load i8* %arrayidx1, align 1
+;CHECK-NEXT: %0 = load i8, i8* %arrayidx1, align 1
;CHECK-NEXT: %conv2 = sext i8 %0 to i32
;CHECK-NEXT: br label %for.body
@@ -56,10 +56,10 @@ for.cond: ; preds = %for.inc, %entry
for.body: ; preds = %for.cond
%idxprom = zext i32 %i.0 to i64
%arrayidx = getelementptr inbounds i8, i8* %CurPtr, i64 %idxprom
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
%arrayidx1 = getelementptr inbounds i8, i8* %CurPtr, i64 0
- %1 = load i8* %arrayidx1, align 1
+ %1 = load i8, i8* %arrayidx1, align 1
%conv2 = sext i8 %1 to i32
%cmp3 = icmp ne i32 %conv, %conv2
br i1 %cmp3, label %return, label %for.inc
diff --git a/llvm/test/Transforms/LoopSimplify/2003-08-15-PreheadersFail.ll b/llvm/test/Transforms/LoopSimplify/2003-08-15-PreheadersFail.ll
index 772cbb3cd3b..c33652896c4 100644
--- a/llvm/test/Transforms/LoopSimplify/2003-08-15-PreheadersFail.ll
+++ b/llvm/test/Transforms/LoopSimplify/2003-08-15-PreheadersFail.ll
@@ -9,28 +9,28 @@ define i32 @_yylex() {
br label %loopentry.0
loopentry.0: ; preds = %else.26, %0
store i32* getelementptr ([16386 x i32]* @yy_state_buf, i64 0, i64 0), i32** @yy_state_ptr
- %tmp.35 = load i32** @yy_state_ptr ; <i32*> [#uses=2]
+ %tmp.35 = load i32*, i32** @yy_state_ptr ; <i32*> [#uses=2]
%inc.0 = getelementptr i32, i32* %tmp.35, i64 1 ; <i32*> [#uses=1]
store i32* %inc.0, i32** @yy_state_ptr
- %tmp.36 = load i32* null ; <i32> [#uses=1]
+ %tmp.36 = load i32, i32* null ; <i32> [#uses=1]
store i32 %tmp.36, i32* %tmp.35
br label %loopexit.2
loopexit.2: ; preds = %else.26, %loopexit.2, %loopentry.0
store i8* null, i8** null
- %tmp.91 = load i32* null ; <i32> [#uses=1]
+ %tmp.91 = load i32, i32* null ; <i32> [#uses=1]
%tmp.92 = sext i32 %tmp.91 to i64 ; <i64> [#uses=1]
%tmp.93 = getelementptr [787 x i16], [787 x i16]* @yy_base, i64 0, i64 %tmp.92 ; <i16*> [#uses=1]
- %tmp.94 = load i16* %tmp.93 ; <i16> [#uses=1]
+ %tmp.94 = load i16, i16* %tmp.93 ; <i16> [#uses=1]
%tmp.95 = icmp ne i16 %tmp.94, 4394 ; <i1> [#uses=1]
br i1 %tmp.95, label %loopexit.2, label %yy_find_action
yy_find_action: ; preds = %else.26, %loopexit.2
br label %loopentry.3
loopentry.3: ; preds = %then.9, %shortcirc_done.0, %yy_find_action
- %tmp.105 = load i32* @yy_lp ; <i32> [#uses=1]
+ %tmp.105 = load i32, i32* @yy_lp ; <i32> [#uses=1]
%tmp.106 = icmp ne i32 %tmp.105, 0 ; <i1> [#uses=1]
br i1 %tmp.106, label %shortcirc_next.0, label %shortcirc_done.0
shortcirc_next.0: ; preds = %loopentry.3
- %tmp.114 = load i16* null ; <i16> [#uses=1]
+ %tmp.114 = load i16, i16* null ; <i16> [#uses=1]
%tmp.115 = sext i16 %tmp.114 to i32 ; <i32> [#uses=1]
%tmp.116 = icmp slt i32 0, %tmp.115 ; <i1> [#uses=1]
br label %shortcirc_done.0
@@ -38,7 +38,7 @@ shortcirc_done.0: ; preds = %shortcirc_next.0, %loopentry.3
%shortcirc_val.0 = phi i1 [ false, %loopentry.3 ], [ %tmp.116, %shortcirc_next.0 ] ; <i1> [#uses=1]
br i1 %shortcirc_val.0, label %else.0, label %loopentry.3
else.0: ; preds = %shortcirc_done.0
- %tmp.144 = load i32* null ; <i32> [#uses=1]
+ %tmp.144 = load i32, i32* null ; <i32> [#uses=1]
%tmp.145 = and i32 %tmp.144, 8192 ; <i32> [#uses=1]
%tmp.146 = icmp ne i32 %tmp.145, 0 ; <i1> [#uses=1]
br i1 %tmp.146, label %then.9, label %else.26
diff --git a/llvm/test/Transforms/LoopSimplify/2003-12-10-ExitBlocksProblem.ll b/llvm/test/Transforms/LoopSimplify/2003-12-10-ExitBlocksProblem.ll
index fb39f05c6db..32b632220d3 100644
--- a/llvm/test/Transforms/LoopSimplify/2003-12-10-ExitBlocksProblem.ll
+++ b/llvm/test/Transforms/LoopSimplify/2003-12-10-ExitBlocksProblem.ll
@@ -14,15 +14,15 @@ entry:
store i32 123, i32* @G
br label %loopentry.i
loopentry.i: ; preds = %endif.1.i, %entry
- %tmp.0.i = load i32* @G ; <i32> [#uses=1]
+ %tmp.0.i = load i32, i32* @G ; <i32> [#uses=1]
%tmp.1.i = icmp eq i32 %tmp.0.i, 123 ; <i1> [#uses=1]
br i1 %tmp.1.i, label %Out.i, label %endif.0.i
endif.0.i: ; preds = %loopentry.i
- %tmp.3.i = load i32* @G ; <i32> [#uses=1]
+ %tmp.3.i = load i32, i32* @G ; <i32> [#uses=1]
%tmp.4.i = icmp eq i32 %tmp.3.i, 126 ; <i1> [#uses=1]
br i1 %tmp.4.i, label %ExitBlock.i, label %endif.1.i
endif.1.i: ; preds = %endif.0.i
- %tmp.6.i = load i32* @G ; <i32> [#uses=1]
+ %tmp.6.i = load i32, i32* @G ; <i32> [#uses=1]
%inc.i = add i32 %tmp.6.i, 1 ; <i32> [#uses=1]
store i32 %inc.i, i32* @G
br label %loopentry.i
@@ -30,7 +30,7 @@ Out.i: ; preds = %loopentry.i
store i32 0, i32* @G
br label %ExitBlock.i
ExitBlock.i: ; preds = %Out.i, %endif.0.i
- %tmp.7.i = load i32* @G ; <i32> [#uses=1]
+ %tmp.7.i = load i32, i32* @G ; <i32> [#uses=1]
ret i32 %tmp.7.i
}
diff --git a/llvm/test/Transforms/LoopSimplify/ashr-crash.ll b/llvm/test/Transforms/LoopSimplify/ashr-crash.ll
index c58903d49d5..b5cc1449cec 100644
--- a/llvm/test/Transforms/LoopSimplify/ashr-crash.ll
+++ b/llvm/test/Transforms/LoopSimplify/ashr-crash.ll
@@ -51,10 +51,10 @@ for.cond1: ; preds = %for.cond, %for.body
br i1 %cmp2, label %for.body3, label %for.inc7
for.body3: ; preds = %for.cond1
- %0 = load i32* @c, align 4
+ %0 = load i32, i32* @c, align 4
%cmp4 = icmp sge i32 %storemerge1, %0
%conv = zext i1 %cmp4 to i32
- %1 = load i32* @d, align 4
+ %1 = load i32, i32* @d, align 4
%add = add nsw i32 %conv, %1
%sext = shl i32 %add, 16
%conv6 = ashr exact i32 %sext, 16
@@ -63,7 +63,7 @@ for.body3: ; preds = %for.cond1
br label %for.cond1
for.inc7: ; preds = %for.cond1
- %2 = load i32* @d, align 4
+ %2 = load i32, i32* @d, align 4
%inc8 = add nsw i32 %2, 1
br label %for.cond
diff --git a/llvm/test/Transforms/LoopSimplify/merge-exits.ll b/llvm/test/Transforms/LoopSimplify/merge-exits.ll
index 0e1f0be87ce..5cdf8148778 100644
--- a/llvm/test/Transforms/LoopSimplify/merge-exits.ll
+++ b/llvm/test/Transforms/LoopSimplify/merge-exits.ll
@@ -15,18 +15,18 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define float @test1(float* %pTmp1, float* %peakWeight, i32 %bandEdgeIndex) nounwind {
entry:
- %t0 = load float* %peakWeight, align 4
+ %t0 = load float, float* %peakWeight, align 4
br label %bb1
bb: ; preds = %bb2
%t1 = sext i32 %hiPart.0 to i64
%t2 = getelementptr float, float* %pTmp1, i64 %t1
- %t3 = load float* %t2, align 4
+ %t3 = load float, float* %t2, align 4
%t4 = fadd float %t3, %distERBhi.0
%t5 = add i32 %hiPart.0, 1
%t6 = sext i32 %t5 to i64
%t7 = getelementptr float, float* %peakWeight, i64 %t6
- %t8 = load float* %t7, align 4
+ %t8 = load float, float* %t7, align 4
%t9 = fadd float %t8, %peakCount.0
br label %bb1
diff --git a/llvm/test/Transforms/LoopSimplify/phi-node-simplify.ll b/llvm/test/Transforms/LoopSimplify/phi-node-simplify.ll
index 52b7aa58aef..676f467e1b4 100644
--- a/llvm/test/Transforms/LoopSimplify/phi-node-simplify.ll
+++ b/llvm/test/Transforms/LoopSimplify/phi-node-simplify.ll
@@ -28,13 +28,13 @@ no_exit.2: ; preds = %loopexit.2, %no_exit.2, %loopentry.1
%b.1.4 = getelementptr i32, i32* %b.1.4.ph, i64 %gep.upgrd.3 ; <i32*> [#uses=1]
%inc.0.rec = add i32 %b.1.4.rec, 1 ; <i32> [#uses=2]
%inc.0 = getelementptr i32, i32* %a.0.4.ph, i32 %inc.0.rec ; <i32*> [#uses=2]
- %tmp.13 = load i32* %a.0.4 ; <i32> [#uses=1]
+ %tmp.13 = load i32, i32* %a.0.4 ; <i32> [#uses=1]
%inc.1 = getelementptr i32, i32* %b.1.4.ph, i32 %inc.0.rec ; <i32*> [#uses=1]
- %tmp.15 = load i32* %b.1.4 ; <i32> [#uses=1]
- %tmp.18 = load i32* %c.2.4 ; <i32> [#uses=1]
+ %tmp.15 = load i32, i32* %b.1.4 ; <i32> [#uses=1]
+ %tmp.18 = load i32, i32* %c.2.4 ; <i32> [#uses=1]
%tmp.16 = mul i32 %tmp.15, %tmp.13 ; <i32> [#uses=1]
%tmp.19 = mul i32 %tmp.16, %tmp.18 ; <i32> [#uses=1]
- %tmp.20 = load i32* @Z ; <i32> [#uses=1]
+ %tmp.20 = load i32, i32* @Z ; <i32> [#uses=1]
%tmp.21 = add i32 %tmp.19, %tmp.20 ; <i32> [#uses=1]
store i32 %tmp.21, i32* @Z
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/Transforms/LoopSimplify/preserve-scev.ll b/llvm/test/Transforms/LoopSimplify/preserve-scev.ll
index 89626b2af51..bc6d35c3d72 100644
--- a/llvm/test/Transforms/LoopSimplify/preserve-scev.ll
+++ b/llvm/test/Transforms/LoopSimplify/preserve-scev.ll
@@ -15,7 +15,7 @@ for.cond: ; preds = %if.then5, %if.end,
%0 = phi i32 [ 0, %entry ], [ %add, %if.end ], [ %add, %if.then5 ]
%add = add i32 %0, 1
%cmp = icmp slt i32 %0, 1
- %tmp1 = load i32* @maxStat, align 4
+ %tmp1 = load i32, i32* @maxStat, align 4
br i1 %cmp, label %for.body, label %for.cond14.preheader
for.cond14.preheader: ; preds = %for.cond
@@ -39,7 +39,7 @@ for.body18: ; preds = %for.body18, %for.co
%i13.027 = phi i32 [ %1, %for.body18 ], [ 0, %for.cond14.preheader ]
call void @foo() nounwind
%1 = add nsw i32 %i13.027, 1
- %tmp16 = load i32* @maxStat, align 4
+ %tmp16 = load i32, i32* @maxStat, align 4
%cmp17 = icmp slt i32 %1, %tmp16
br i1 %cmp17, label %for.body18, label %return
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2005-08-15-AddRecIV.ll b/llvm/test/Transforms/LoopStrengthReduce/2005-08-15-AddRecIV.ll
index f13483cfd56..7ee1e63ced0 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2005-08-15-AddRecIV.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2005-08-15-AddRecIV.ll
@@ -48,7 +48,7 @@ loopexit.3: ; preds = %loopentry.3
loopentry.4: ; preds = %loopentry.4, %loopexit.3
%indvar340 = phi i32 [ 0, %loopexit.3 ], [ %indvar.next341, %loopentry.4 ] ; <i32> [#uses=2]
%tmp. = add i32 %indvar340, %indvar342 ; <i32> [#uses=1]
- %tmp.526 = load i32** null ; <i32*> [#uses=1]
+ %tmp.526 = load i32*, i32** null ; <i32*> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp. to i64 ; <i64> [#uses=1]
%tmp.528 = getelementptr i32, i32* %tmp.526, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
store i32 0, i32* %tmp.528
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2005-08-17-OutOfLoopVariant.ll b/llvm/test/Transforms/LoopStrengthReduce/2005-08-17-OutOfLoopVariant.ll
index f1c523ae6c6..3e52dff2a87 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2005-08-17-OutOfLoopVariant.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2005-08-17-OutOfLoopVariant.ll
@@ -6,7 +6,7 @@ loopentry.1.outer:
br label %loopentry.1
loopentry.1: ; preds = %loopentry.1, %loopentry.1.outer
%i.3 = phi i32 [ 0, %loopentry.1.outer ], [ %i.3.be, %loopentry.1 ] ; <i32> [#uses=2]
- %tmp.390 = load i32* null ; <i32> [#uses=1]
+ %tmp.390 = load i32, i32* null ; <i32> [#uses=1]
%tmp.392 = mul i32 %tmp.390, %j.2.1.ph ; <i32> [#uses=1]
%tmp.394 = add i32 %tmp.392, %i.3 ; <i32> [#uses=1]
%i.3.be = add i32 %i.3, 1 ; <i32> [#uses=1]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll b/llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
index ce56bd31018..11b3171886e 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
@@ -22,7 +22,7 @@ return: ; preds = %bb
define i32 @main() nounwind {
entry:
tail call void @func_1( ) nounwind
- load volatile i16* @g_3, align 2 ; <i16>:0 [#uses=1]
+ load volatile i16, i16* @g_3, align 2 ; <i16>:0 [#uses=1]
zext i16 %0 to i32 ; <i32>:1 [#uses=1]
tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), i32 %1 ) nounwind ; <i32>:2 [#uses=0]
ret i32 0
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll b/llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll
index 1ee6b5cdf18..5fb157b2070 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll
@@ -21,7 +21,7 @@ bb: ; preds = %bb, %entry
%indvar = phi i16 [ 0, %entry ], [ %indvar.next, %bb ] ; <i16> [#uses=2]
%tmp = sub i16 0, %indvar ; <i16> [#uses=1]
%tmp27 = trunc i16 %tmp to i8 ; <i8> [#uses=1]
- load i32* @g_19, align 4 ; <i32>:0 [#uses=2]
+ load i32, i32* @g_19, align 4 ; <i32>:0 [#uses=2]
add i32 %0, 1 ; <i32>:1 [#uses=1]
store i32 %1, i32* @g_19, align 4
trunc i32 %0 to i8 ; <i8>:2 [#uses=1]
@@ -40,7 +40,7 @@ return: ; preds = %bb
define i32 @main() nounwind {
entry:
tail call void @func_1( ) nounwind
- load i32* @g_19, align 4 ; <i32>:0 [#uses=1]
+ load i32, i32* @g_19, align 4 ; <i32>:0 [#uses=1]
tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), i32 %0 ) nounwind ; <i32>:1 [#uses=0]
ret i32 0
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2009-01-13-nonconstant-stride-outside-loop.ll b/llvm/test/Transforms/LoopStrengthReduce/2009-01-13-nonconstant-stride-outside-loop.ll
index b2cf818dc45..69e2b9842d3 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2009-01-13-nonconstant-stride-outside-loop.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2009-01-13-nonconstant-stride-outside-loop.ll
@@ -26,12 +26,12 @@ bb1.i: ; preds = %bb.i2, %entry
br i1 %0, label %bb2.i3, label %nactive_heaps.exit
bb2.i3: ; preds = %bb1.i
- %1 = load %struct.obj** null, align 4 ; <%struct.obj*> [#uses=1]
+ %1 = load %struct.obj*, %struct.obj** null, align 4 ; <%struct.obj*> [#uses=1]
%2 = icmp eq %struct.obj* %1, null ; <i1> [#uses=1]
br i1 %2, label %nactive_heaps.exit, label %bb.i2
nactive_heaps.exit: ; preds = %bb2.i3, %bb1.i
- %3 = load i32* @heap_size, align 4 ; <i32> [#uses=1]
+ %3 = load i32, i32* @heap_size, align 4 ; <i32> [#uses=1]
%4 = mul i32 %3, %m.0.i ; <i32> [#uses=1]
%5 = sub i32 %4, 0 ; <i32> [#uses=1]
%6 = tail call i32 (i8*, i8*, ...)* @sprintf(i8* null, i8* getelementptr ([39 x i8]* @"\01LC85", i32 0, i32 0), i32 %m.0.i, i32 0, i32 %5, i32 0) nounwind ; <i32> [#uses=0]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2009-04-28-no-reduce-mul.ll b/llvm/test/Transforms/LoopStrengthReduce/2009-04-28-no-reduce-mul.ll
index a0daed50e23..5b5d2cdf3a6 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2009-04-28-no-reduce-mul.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2009-04-28-no-reduce-mul.ll
@@ -33,7 +33,7 @@ bb4: ; preds = %bb2
br i1 %exitcond10, label %bb5, label %bb2
bb5: ; preds = %bb4
- %4 = load i32* getelementptr ([32 x [256 x i32]]* @table, i32 0, i32 9, i32 132), align 16 ; <i32> [#uses=1]
+ %4 = load i32, i32* getelementptr ([32 x [256 x i32]]* @table, i32 0, i32 9, i32 132), align 16 ; <i32> [#uses=1]
%5 = icmp eq i32 %4, -1116 ; <i1> [#uses=1]
br i1 %5, label %bb7, label %bb6
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2011-07-19-CritEdgeBreakCrash.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-07-19-CritEdgeBreakCrash.ll
index e0f6879bffd..cf549fc38ac 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-07-19-CritEdgeBreakCrash.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-07-19-CritEdgeBreakCrash.ll
@@ -29,7 +29,7 @@ while.cond.i: ; preds = %while.body.i15795,
br i1 %boo2, label %indirectgoto, label %while.body.i15795
while.body.i15795: ; preds = %while.cond.i
- %tmp20.i = load i64* %incdec.ptr.i15793, align 8
+ %tmp20.i = load i64, i64* %incdec.ptr.i15793, align 8
%boo1 = call i1 @foo()
br i1 %boo1, label %while.cond.i, label %body_failed
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
index 950d8e20173..4388a334ddb 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
@@ -37,8 +37,8 @@ while.body: ; preds = %while.body.lr.ph, %
%B.addr.04 = phi float* [ %B, %while.body.lr.ph ], [ %add.ptr3, %while.body ]
%N.addr.03 = phi i32 [ %N, %while.body.lr.ph ], [ %sub, %while.body ]
%Sum0.02 = phi float [ 0.000000e+00, %while.body.lr.ph ], [ %add, %while.body ]
- %0 = load float* %A.addr.05, align 4
- %1 = load float* %B.addr.04, align 4
+ %0 = load float, float* %A.addr.05, align 4
+ %1 = load float, float* %B.addr.04, align 4
%mul = fmul float %0, %1
%add = fadd float %Sum0.02, %mul
%add.ptr = getelementptr inbounds float, float* %A.addr.05, i64 %idx.ext
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll b/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
index 8dac9829799..317b0b0b293 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
@@ -33,7 +33,7 @@ for.body43:
%bf.459 = phi i32 [ %inc44, %for.body43 ], [ %t1, %for.body7 ]
%inc44 = add nsw i32 %bf.459, 1
%arrayidx45 = getelementptr inbounds [121 x i32], [121 x i32]* @b, i32 0, i32 %bf.459
- %t2 = load i32* %arrayidx45, align 4
+ %t2 = load i32, i32* %arrayidx45, align 4
br label %for.body43
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll b/llvm/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll
index 2a723c25afa..62064cb7881 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll
@@ -35,7 +35,7 @@ for.cond: ; preds = %for.inc, %lor.lhs.f
%indvar65 = phi i64 [ %indvar.next66, %for.inc ], [ 0, %lor.lhs.false184 ], [ 0, %if.end152 ]
%tmp128 = add i64 %0, %indvar65
%s.4 = getelementptr i8, i8* %cmd, i64 %tmp128
- %tmp195 = load i8* %s.4, align 1
+ %tmp195 = load i8, i8* %s.4, align 1
indirectbr i8* undef, [label %return, label %land.rhs198]
land.rhs198: ; preds = %for.cond
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll b/llvm/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll
index 1baf2654807..ce6161cb1b4 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll
@@ -11,9 +11,9 @@ target datalayout = "e-p:64:64:64-n32:64"
define internal fastcc void @someFunction(%struct.this_structure_s.0.5* nocapture %scratch, i32 %stage, i32 %cbSize) nounwind {
entry:
%0 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 4, i32 %stage
- %1 = load i8** %0, align 4
+ %1 = load i8*, i8** %0, align 4
%2 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 5, i32 %stage
- %3 = load i8** %2, align 4
+ %3 = load i8*, i8** %2, align 4
%4 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 2, i32 0, i32 0
%tmp11 = shl i32 %stage, 1
%tmp1325 = or i32 %tmp11, 1
@@ -31,9 +31,9 @@ __label_D_1608: ; preds = %__label_D_1608, %en
%scevgep10 = getelementptr i32, i32* %4, i32 %tmp928
%scevgep12 = getelementptr %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp11, i32 %i.12
%scevgep14 = getelementptr %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp1325, i32 %i.12
- %5 = load i8* %scevgep12, align 1
+ %5 = load i8, i8* %scevgep12, align 1
%6 = sext i8 %5 to i32
- %7 = load i8* %scevgep14, align 1
+ %7 = load i8, i8* %scevgep14, align 1
%8 = sext i8 %7 to i32
store i32 0, i32* %lvar_g.13, align 4
store i32 %8, i32* %scevgep, align 4
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll b/llvm/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll
index 301c845fd4a..80095c3ac15 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll
@@ -19,10 +19,10 @@ entry:
%l_2 = alloca [1 x i32], align 4
%arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* %l_2, i64 0, i64 0
store i32 0, i32* %arrayidx, align 4
- %tmp = load i32* @g_3, align 4
+ %tmp = load i32, i32* @g_3, align 4
%idxprom = sext i32 %tmp to i64
%arrayidx1 = getelementptr inbounds [1 x i32], [1 x i32]* %l_2, i64 0, i64 %idxprom
- %tmp1 = load i32* %arrayidx1, align 4
+ %tmp1 = load i32, i32* %arrayidx1, align 4
%conv.i.i = and i32 %tmp1, 65535
%tobool.i.i.i = icmp ne i32 %tmp, 0
br label %codeRepl
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll b/llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll
index 3030d3d85d5..7cac15f0ec7 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll
@@ -77,7 +77,7 @@ bb17: ; preds = %bb26, %bb15
bb22: ; preds = %bb17
%tmp23 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 0
- %tmp24 = load i8* %tmp23, align 1
+ %tmp24 = load i8, i8* %tmp23, align 1
%tmp25 = icmp eq i8 %tmp24, 58
br i1 %tmp25, label %bb30, label %bb26
@@ -123,7 +123,7 @@ bb43: ; preds = %bb52, %bb41
bb48: ; preds = %bb43
%tmp49 = add i64 %tmp44, %tmp37
- %tmp50 = load i8* undef, align 1
+ %tmp50 = load i8, i8* undef, align 1
%tmp51 = icmp eq i8 %tmp50, 58
br i1 %tmp51, label %bb55, label %bb52
@@ -166,11 +166,11 @@ bb68: ; preds = %bb59
bb69: ; preds = %bb68
tail call void (...)* @snork(i8* getelementptr inbounds ([52 x i8]* @global1, i64 0, i64 0), i32 2071) nounwind
- %tmp70 = load i32* getelementptr inbounds (%struct.snork* @global, i64 0, i32 2), align 4
+ %tmp70 = load i32, i32* getelementptr inbounds (%struct.snork* @global, i64 0, i32 2), align 4
unreachable
bb71: ; preds = %bb68
- %tmp72 = load i32* getelementptr inbounds (%struct.snork* @global, i64 0, i32 4), align 4
+ %tmp72 = load i32, i32* getelementptr inbounds (%struct.snork* @global, i64 0, i32 4), align 4
%tmp73 = icmp eq i32 undef, 0
br i1 %tmp73, label %bb247, label %bb74
@@ -462,7 +462,7 @@ bb221: ; preds = %bb230, %bb219
bb226: ; preds = %bb221
%tmp227 = add i64 %tmp222, %tmp216
%tmp228 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 %tmp227
- %tmp229 = load i8* %tmp228, align 1
+ %tmp229 = load i8, i8* %tmp228, align 1
br i1 false, label %bb233, label %bb230
bb230: ; preds = %bb226
diff --git a/llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll b/llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll
index 2c2e0a4772f..dcd068191e1 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll
@@ -73,7 +73,7 @@ bb61: ; preds = %bb63, %bb58
bb63: ; preds = %bb61
%tmp64 = getelementptr inbounds i8, i8* %tmp3, i64 %i.0.i
- %tmp65 = load i8* %tmp64, align 1
+ %tmp65 = load i8, i8* %tmp64, align 1
%tmp67 = add i64 %i.0.i, 1
br i1 undef, label %_ZNK4llvm9StringRef4findEcm.exit.loopexit, label %bb61
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll
index c0ebc97b304..2120b2a3bfe 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll
@@ -20,7 +20,7 @@ while.body: ; preds = %while.body, %entry
%pDst.05 = phi i64* [ inttoptr (i64 6442450944 to i64*), %entry ], [ %incdec.ptr1, %while.body ]
%pSrc.04 = phi i64* [ inttoptr (i64 4294967296 to i64*), %entry ], [ %incdec.ptr, %while.body ]
%incdec.ptr = getelementptr inbounds i64, i64* %pSrc.04, i64 1
- %tmp = load volatile i64* %pSrc.04, align 8
+ %tmp = load volatile i64, i64* %pSrc.04, align 8
%incdec.ptr1 = getelementptr inbounds i64, i64* %pDst.05, i64 1
store volatile i64 %tmp, i64* %pDst.05, align 8
%sub = add i64 %len.06, -8
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll
index 217896e55c6..c877ace7abf 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll
@@ -11,7 +11,7 @@ target triple = "arm64-apple-ios"
define void @do_integer_add(i64 %iterations, i8* nocapture readonly %cookie) {
entry:
%N = bitcast i8* %cookie to i32*
- %0 = load i32* %N, align 4
+ %0 = load i32, i32* %N, align 4
%add = add nsw i32 %0, 57
%cmp56 = icmp eq i64 %iterations, 0
br i1 %cmp56, label %while.end, label %for.cond.preheader.preheader
diff --git a/llvm/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll b/llvm/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
index bc654d39f34..56ff69c977e 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
@@ -50,7 +50,7 @@ declare %s* @getstruct() nounwind
; CHECK: ldr{{.*}}lsl #2
define i32 @main() nounwind ssp {
entry:
- %v0 = load i32* @ncol, align 4
+ %v0 = load i32, i32* @ncol, align 4
%v1 = tail call i32* @getptr() nounwind
%cmp10.i = icmp eq i32 %v0, 0
br label %while.cond.outer
@@ -64,12 +64,12 @@ while.cond:
br label %while.body
while.body:
- %v3 = load i32* @ncol, align 4
+ %v3 = load i32, i32* @ncol, align 4
br label %end_of_chain
end_of_chain:
%state.i = getelementptr inbounds %s, %s* %call18, i32 0, i32 0
- %v4 = load i32** %state.i, align 4
+ %v4 = load i32*, i32** %state.i, align 4
br label %while.cond.i.i
while.cond.i.i:
@@ -80,9 +80,9 @@ while.cond.i.i:
land.rhs.i.i:
%arrayidx.i.i = getelementptr inbounds i32, i32* %v4, i32 %dec.i.i
- %v5 = load i32* %arrayidx.i.i, align 4
+ %v5 = load i32, i32* %arrayidx.i.i, align 4
%arrayidx1.i.i = getelementptr inbounds i32, i32* %v1, i32 %dec.i.i
- %v6 = load i32* %arrayidx1.i.i, align 4
+ %v6 = load i32, i32* %arrayidx1.i.i, align 4
%cmp.i.i = icmp eq i32 %v5, %v6
br i1 %cmp.i.i, label %while.cond.i.i, label %equal_data.exit.i
diff --git a/llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll b/llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
index 235394c741d..2ad6c2ea52d 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
@@ -18,13 +18,13 @@ entry:
loop:
%iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
%s = phi i32 [ 0, %entry ], [ %s4, %loop ]
- %v = load i32* %iv
+ %v = load i32, i32* %iv
%iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
- %v1 = load i32* %iv1
+ %v1 = load i32, i32* %iv1
%iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
- %v2 = load i32* %iv2
+ %v2 = load i32, i32* %iv2
%iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
- %v3 = load i32* %iv3
+ %v3 = load i32, i32* %iv3
%s1 = add i32 %s, %v
%s2 = add i32 %s1, %v1
%s3 = add i32 %s2, %v2
@@ -52,13 +52,13 @@ entry:
loop:
%iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
%s = phi i32 [ 0, %entry ], [ %s4, %loop ]
- %v = load i32* %iv
+ %v = load i32, i32* %iv
%iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
- %v1 = load i32* %iv1
+ %v1 = load i32, i32* %iv1
%iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
- %v2 = load i32* %iv2
+ %v2 = load i32, i32* %iv2
%iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
- %v3 = load i32* %iv3
+ %v3 = load i32, i32* %iv3
%s1 = add i32 %s, %v
%s2 = add i32 %s1, %v1
%s3 = add i32 %s2, %v2
@@ -103,19 +103,19 @@ for.body: ; preds = %for.body.lr.ph, %fo
%i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
%0 = bitcast i8* %main.addr.011 to i32*
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%add.ptr = getelementptr inbounds i8, i8* %main.addr.011, i32 %main_stride
%2 = bitcast i8* %add.ptr to i32*
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%add.ptr1 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr.sum
%4 = bitcast i8* %add.ptr1 to i32*
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%add.ptr2 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr1.sum
%6 = bitcast i8* %add.ptr2 to i32*
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
%add.ptr3 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr4.sum
%8 = bitcast i8* %add.ptr3 to i32*
- %9 = load i32* %8, align 4
+ %9 = load i32, i32* %8, align 4
%add = add i32 %3, %1
%add4 = add i32 %add, %5
%add5 = add i32 %add4, %7
@@ -147,10 +147,10 @@ entry:
for.body: ; preds = %for.body, %entry
%i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
%arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.07
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
%conv5 = zext i8 %0 to i32
%arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.07
- %1 = load i8* %arrayidx1, align 1
+ %1 = load i8, i8* %arrayidx1, align 1
%conv26 = zext i8 %1 to i32
%add = add nsw i32 %conv26, %conv5
%conv3 = trunc i32 %add to i8
@@ -158,10 +158,10 @@ for.body: ; preds = %for.body, %entry
store i8 %conv3, i8* %arrayidx4, align 1
%inc1 = or i32 %i.07, 1
%arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc1
- %2 = load i8* %arrayidx.1, align 1
+ %2 = load i8, i8* %arrayidx.1, align 1
%conv5.1 = zext i8 %2 to i32
%arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc1
- %3 = load i8* %arrayidx1.1, align 1
+ %3 = load i8, i8* %arrayidx1.1, align 1
%conv26.1 = zext i8 %3 to i32
%add.1 = add nsw i32 %conv26.1, %conv5.1
%conv3.1 = trunc i32 %add.1 to i8
@@ -169,10 +169,10 @@ for.body: ; preds = %for.body, %entry
store i8 %conv3.1, i8* %arrayidx4.1, align 1
%inc.12 = or i32 %i.07, 2
%arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.12
- %4 = load i8* %arrayidx.2, align 1
+ %4 = load i8, i8* %arrayidx.2, align 1
%conv5.2 = zext i8 %4 to i32
%arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.12
- %5 = load i8* %arrayidx1.2, align 1
+ %5 = load i8, i8* %arrayidx1.2, align 1
%conv26.2 = zext i8 %5 to i32
%add.2 = add nsw i32 %conv26.2, %conv5.2
%conv3.2 = trunc i32 %add.2 to i8
@@ -180,10 +180,10 @@ for.body: ; preds = %for.body, %entry
store i8 %conv3.2, i8* %arrayidx4.2, align 1
%inc.23 = or i32 %i.07, 3
%arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.23
- %6 = load i8* %arrayidx.3, align 1
+ %6 = load i8, i8* %arrayidx.3, align 1
%conv5.3 = zext i8 %6 to i32
%arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.23
- %7 = load i8* %arrayidx1.3, align 1
+ %7 = load i8, i8* %arrayidx1.3, align 1
%conv26.3 = zext i8 %7 to i32
%add.3 = add nsw i32 %conv26.3, %conv5.3
%conv3.3 = trunc i32 %add.3 to i8
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll
index 091e76f505c..862fff29cef 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll
@@ -30,26 +30,26 @@ for.body: ; preds = %entry, %for.body
%dp.036 = phi i32* [ %add.ptr, %for.body ], [ %destrow, %entry ]
%p.035 = phi float* [ %incdec.ptr4, %for.body ], [ %srcrow, %entry ]
%incdec.ptr = getelementptr inbounds float, float* %p.035, i64 1
- %0 = load float* %incdec.ptr, align 4
+ %0 = load float, float* %incdec.ptr, align 4
%incdec.ptr2 = getelementptr inbounds float, float* %p.035, i64 2
- %1 = load float* %incdec.ptr2, align 4
+ %1 = load float, float* %incdec.ptr2, align 4
%incdec.ptr3 = getelementptr inbounds float, float* %p.035, i64 3
- %2 = load float* %incdec.ptr3, align 4
+ %2 = load float, float* %incdec.ptr3, align 4
%incdec.ptr4 = getelementptr inbounds float, float* %p.035, i64 4
- %3 = load float* %incdec.ptr4, align 4
- %4 = load i32* %dp.036, align 4
+ %3 = load float, float* %incdec.ptr4, align 4
+ %4 = load i32, i32* %dp.036, align 4
%conv5 = fptoui float %0 to i32
%or = or i32 %4, %conv5
%arrayidx6 = getelementptr inbounds i32, i32* %dp.036, i64 1
- %5 = load i32* %arrayidx6, align 4
+ %5 = load i32, i32* %arrayidx6, align 4
%conv7 = fptoui float %1 to i32
%or8 = or i32 %5, %conv7
%arrayidx9 = getelementptr inbounds i32, i32* %dp.036, i64 2
- %6 = load i32* %arrayidx9, align 4
+ %6 = load i32, i32* %arrayidx9, align 4
%conv10 = fptoui float %2 to i32
%or11 = or i32 %6, %conv10
%arrayidx12 = getelementptr inbounds i32, i32* %dp.036, i64 3
- %7 = load i32* %arrayidx12, align 4
+ %7 = load i32, i32* %arrayidx12, align 4
%conv13 = fptoui float %3 to i32
%or14 = or i32 %7, %conv13
store i32 %or, i32* %dp.036, align 4
@@ -77,8 +77,8 @@ for.body23: ; preds = %for.body23, %for.bo
%dp.132 = phi i32* [ %add.ptr, %for.body23.lr.ph ], [ %incdec.ptr28, %for.body23 ]
%p.131 = phi float* [ %incdec.ptr4, %for.body23.lr.ph ], [ %incdec.ptr24, %for.body23 ]
%incdec.ptr24 = getelementptr inbounds float, float* %p.131, i64 1
- %9 = load float* %incdec.ptr24, align 4
- %10 = load i32* %dp.132, align 4
+ %9 = load float, float* %incdec.ptr24, align 4
+ %10 = load i32, i32* %dp.132, align 4
%conv25 = fptoui float %9 to i32
%or26 = or i32 %10, %conv25
store i32 %or26, i32* %dp.132, align 4
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
index 33f2a658947..b52700f4728 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
@@ -28,7 +28,7 @@ for.body.i: ; preds = %for.body.i, %while.
%indvars.iv.i = phi i64 [ 0, %while.body.i ], [ %indvars.iv.next.i, %for.body.i ]
%add.ptr.sum = add i64 %add.ptr.sum.i, %indvars.iv.i
%arrayidx22.i = getelementptr inbounds i8, i8* %base, i64 %add.ptr.sum
- %0 = load i8* %arrayidx22.i, align 1
+ %0 = load i8, i8* %arrayidx22.i, align 1
%indvars.iv.next.i = add i64 %indvars.iv.i, 1
%cmp = call i1 @check() nounwind
br i1 %cmp, label %for.end.i, label %for.body.i
@@ -69,14 +69,14 @@ entry:
for.cond468: ; preds = %if.then477, %entry
%indvars.iv1163 = phi i64 [ %indvars.iv.next1164, %if.then477 ], [ 1, %entry ]
%k.0.in = phi i32* [ %last, %if.then477 ], [ getelementptr inbounds ([5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 0, i32 2), %entry ]
- %k.0 = load i32* %k.0.in, align 4
+ %k.0 = load i32, i32* %k.0.in, align 4
%0 = trunc i64 %indvars.iv1163 to i32
%cmp469 = icmp slt i32 %0, %n
br i1 %cmp469, label %for.body471, label %for.inc498
for.body471: ; preds = %for.cond468
%first = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 1
- %1 = load i32* %first, align 4
+ %1 = load i32, i32* %first, align 4
br i1 undef, label %if.then477, label %for.inc498
if.then477: ; preds = %for.body471
@@ -119,7 +119,7 @@ for.body3.us.i: ; preds = %meshBB, %for.body3.
%1 = trunc i64 %0 to i32
%mul.i.us.i = mul nsw i32 0, %1
%arrayidx5.us.i = getelementptr inbounds double, double* %u, i64 %indvars.iv.i.SV.phi
- %2 = load double* %arrayidx5.us.i, align 8
+ %2 = load double, double* %arrayidx5.us.i, align 8
%indvars.iv.next.i = add i64 %indvars.iv.i.SV.phi, 1
br i1 undef, label %for.inc8.us.i, label %meshBB
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index c01cab0e043..c1099b23dcf 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -29,13 +29,13 @@ entry:
loop:
%iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
%s = phi i32 [ 0, %entry ], [ %s4, %loop ]
- %v = load i32* %iv
+ %v = load i32, i32* %iv
%iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
- %v1 = load i32* %iv1
+ %v1 = load i32, i32* %iv1
%iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
- %v2 = load i32* %iv2
+ %v2 = load i32, i32* %iv2
%iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
- %v3 = load i32* %iv3
+ %v3 = load i32, i32* %iv3
%s1 = add i32 %s, %v
%s2 = add i32 %s1, %v1
%s3 = add i32 %s2, %v2
@@ -71,13 +71,13 @@ entry:
loop:
%iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
%s = phi i32 [ 0, %entry ], [ %s4, %loop ]
- %v = load i32* %iv
+ %v = load i32, i32* %iv
%iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
- %v1 = load i32* %iv1
+ %v1 = load i32, i32* %iv1
%iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
- %v2 = load i32* %iv2
+ %v2 = load i32, i32* %iv2
%iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
- %v3 = load i32* %iv3
+ %v3 = load i32, i32* %iv3
%s1 = add i32 %s, %v
%s2 = add i32 %s1, %v1
%s3 = add i32 %s2, %v2
@@ -126,19 +126,19 @@ for.body: ; preds = %for.body.lr.ph, %fo
%i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
%0 = bitcast i8* %main.addr.011 to i32*
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%add.ptr = getelementptr inbounds i8, i8* %main.addr.011, i32 %main_stride
%2 = bitcast i8* %add.ptr to i32*
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%add.ptr1 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr.sum
%4 = bitcast i8* %add.ptr1 to i32*
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%add.ptr2 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr1.sum
%6 = bitcast i8* %add.ptr2 to i32*
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
%add.ptr3 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr4.sum
%8 = bitcast i8* %add.ptr3 to i32*
- %9 = load i32* %8, align 4
+ %9 = load i32, i32* %8, align 4
%add = add i32 %3, %1
%add4 = add i32 %add, %5
%add5 = add i32 %add4, %7
@@ -173,10 +173,10 @@ entry:
for.body: ; preds = %for.body, %entry
%i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
%arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.07
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
%conv5 = zext i8 %0 to i32
%arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.07
- %1 = load i8* %arrayidx1, align 1
+ %1 = load i8, i8* %arrayidx1, align 1
%conv26 = zext i8 %1 to i32
%add = add nsw i32 %conv26, %conv5
%conv3 = trunc i32 %add to i8
@@ -184,10 +184,10 @@ for.body: ; preds = %for.body, %entry
store i8 %conv3, i8* %arrayidx4, align 1
%inc1 = or i32 %i.07, 1
%arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc1
- %2 = load i8* %arrayidx.1, align 1
+ %2 = load i8, i8* %arrayidx.1, align 1
%conv5.1 = zext i8 %2 to i32
%arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc1
- %3 = load i8* %arrayidx1.1, align 1
+ %3 = load i8, i8* %arrayidx1.1, align 1
%conv26.1 = zext i8 %3 to i32
%add.1 = add nsw i32 %conv26.1, %conv5.1
%conv3.1 = trunc i32 %add.1 to i8
@@ -195,10 +195,10 @@ for.body: ; preds = %for.body, %entry
store i8 %conv3.1, i8* %arrayidx4.1, align 1
%inc.12 = or i32 %i.07, 2
%arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.12
- %4 = load i8* %arrayidx.2, align 1
+ %4 = load i8, i8* %arrayidx.2, align 1
%conv5.2 = zext i8 %4 to i32
%arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.12
- %5 = load i8* %arrayidx1.2, align 1
+ %5 = load i8, i8* %arrayidx1.2, align 1
%conv26.2 = zext i8 %5 to i32
%add.2 = add nsw i32 %conv26.2, %conv5.2
%conv3.2 = trunc i32 %add.2 to i8
@@ -206,10 +206,10 @@ for.body: ; preds = %for.body, %entry
store i8 %conv3.2, i8* %arrayidx4.2, align 1
%inc.23 = or i32 %i.07, 3
%arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.23
- %6 = load i8* %arrayidx.3, align 1
+ %6 = load i8, i8* %arrayidx.3, align 1
%conv5.3 = zext i8 %6 to i32
%arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.23
- %7 = load i8* %arrayidx1.3, align 1
+ %7 = load i8, i8* %arrayidx1.3, align 1
%conv26.3 = zext i8 %7 to i32
%add.3 = add nsw i32 %conv26.3, %conv5.3
%conv3.3 = trunc i32 %add.3 to i8
@@ -291,7 +291,7 @@ for.body82.us:
%dest = phi i8* [ %dest0, %entry ], [ %incdec.ptr91.us, %for.body82.us ]
%source = phi i8* [ %source0, %entry ], [ %add.ptr83.us, %for.body82.us ]
%0 = bitcast i8* %source to i32*
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%trunc = trunc i32 %1 to i8
%add.ptr83.us = getelementptr inbounds i8, i8* %source, i32 4
%incdec.ptr91.us = getelementptr inbounds i8, i8* %dest, i32 1
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll
index f4807c5ba0a..7925bf01020 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll
@@ -33,10 +33,10 @@ entry:
for.body: ; preds = %entry, %for.body.3
%i.09 = phi i32 [ %add5.3, %for.body.3 ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.09
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
%conv6 = zext i8 %0 to i32
%arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.09
- %1 = load i8* %arrayidx1, align 1
+ %1 = load i8, i8* %arrayidx1, align 1
%conv27 = zext i8 %1 to i32
%add = add nsw i32 %conv27, %conv6
%conv3 = trunc i32 %add to i8
@@ -51,10 +51,10 @@ for.end: ; preds = %for.body, %for.body
for.body.1: ; preds = %for.body
%arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %add5
- %2 = load i8* %arrayidx.1, align 1
+ %2 = load i8, i8* %arrayidx.1, align 1
%conv6.1 = zext i8 %2 to i32
%arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %add5
- %3 = load i8* %arrayidx1.1, align 1
+ %3 = load i8, i8* %arrayidx1.1, align 1
%conv27.1 = zext i8 %3 to i32
%add.1 = add nsw i32 %conv27.1, %conv6.1
%conv3.1 = trunc i32 %add.1 to i8
@@ -66,10 +66,10 @@ for.body.1: ; preds = %for.body
for.body.2: ; preds = %for.body.1
%arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %add5.1
- %4 = load i8* %arrayidx.2, align 1
+ %4 = load i8, i8* %arrayidx.2, align 1
%conv6.2 = zext i8 %4 to i32
%arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %add5.1
- %5 = load i8* %arrayidx1.2, align 1
+ %5 = load i8, i8* %arrayidx1.2, align 1
%conv27.2 = zext i8 %5 to i32
%add.2 = add nsw i32 %conv27.2, %conv6.2
%conv3.2 = trunc i32 %add.2 to i8
@@ -81,10 +81,10 @@ for.body.2: ; preds = %for.body.1
for.body.3: ; preds = %for.body.2
%arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %add5.2
- %6 = load i8* %arrayidx.3, align 1
+ %6 = load i8, i8* %arrayidx.3, align 1
%conv6.3 = zext i8 %6 to i32
%arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %add5.2
- %7 = load i8* %arrayidx1.3, align 1
+ %7 = load i8, i8* %arrayidx1.3, align 1
%conv27.3 = zext i8 %7 to i32
%add.3 = add nsw i32 %conv27.3, %conv6.3
%conv3.3 = trunc i32 %add.3 to i8
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll
index 20af5487d10..a6613c53d78 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll
@@ -40,7 +40,7 @@ vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ %0, %for.body14.lr.ph ]
%4 = getelementptr inbounds i8, i8* %rowsptr, i64 %index
%5 = bitcast i8* %4 to <4 x i8>*
- %wide.load = load <4 x i8>* %5, align 1
+ %wide.load = load <4 x i8>, <4 x i8>* %5, align 1
%index.next = add i64 %index, 8
%6 = icmp eq i64 %index.next, %end.idx.rnd.down
br i1 %6, label %for.end24, label %vector.body
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
index e7ebaa8ea61..2be2762c508 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
@@ -25,9 +25,9 @@ target triple = "x86_64-apple-macosx10.9.0"
define i32 @main() #0 {
entry:
store i8 0, i8* @h, align 1
- %0 = load i32* @j, align 4
+ %0 = load i32, i32* @j, align 4
%tobool.i = icmp eq i32 %0, 0
- %1 = load i32* @d, align 4
+ %1 = load i32, i32* @d, align 4
%cmp3 = icmp sgt i32 %1, -1
%.lobit = lshr i32 %1, 31
%.lobit.not = xor i32 %.lobit, 1
diff --git a/llvm/test/Transforms/LoopStrengthReduce/addrec-gep-address-space.ll b/llvm/test/Transforms/LoopStrengthReduce/addrec-gep-address-space.ll
index 834b0407722..5650f81a308 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/addrec-gep-address-space.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/addrec-gep-address-space.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s -loop-reduce -S | FileCheck %s
; CHECK: bb1:
-; CHECK: load double addrspace(1)* [[IV:%[^,]+]]
+; CHECK: load double, double addrspace(1)* [[IV:%[^,]+]]
; CHECK: store double {{.*}}, double addrspace(1)* [[IV]]
; CHECK-NOT: cast
@@ -37,7 +37,7 @@ bb1: ; preds = %bb2, %bb.nph
%tmp4 = add i64 %j.01, %tmp2 ; <i64> [#uses=1]
%z0 = add i64 %tmp3, 5203
%tmp5 = getelementptr double, double addrspace(1)* %p, i64 %z0 ; <double addrspace(1)*> [#uses=1]
- %tmp6 = load double addrspace(1)* %tmp5, align 8 ; <double> [#uses=1]
+ %tmp6 = load double, double addrspace(1)* %tmp5, align 8 ; <double> [#uses=1]
%tmp7 = fdiv double %tmp6, 2.100000e+00 ; <double> [#uses=1]
%z1 = add i64 %tmp4, 5203
%tmp8 = getelementptr double, double addrspace(1)* %p, i64 %z1 ; <double addrspace(1)*> [#uses=1]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/addrec-gep.ll b/llvm/test/Transforms/LoopStrengthReduce/addrec-gep.ll
index 114a181a359..6919a33d8a2 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/addrec-gep.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/addrec-gep.ll
@@ -1,6 +1,6 @@
; RUN: opt < %s -loop-reduce -S | FileCheck %s
; CHECK: bb1:
-; CHECK: load double* [[IV:%[^,]+]]
+; CHECK: load double, double* [[IV:%[^,]+]]
; CHECK: store double {{.*}}, double* [[IV]]
; CHECK: getelementptr double, double*
; CHECK-NOT: cast
@@ -31,7 +31,7 @@ bb1: ; preds = %bb2, %bb.nph
%tmp4 = add i64 %j.01, %tmp2 ; <i64> [#uses=1]
%z0 = add i64 %tmp3, 5203
%tmp5 = getelementptr double, double* %p, i64 %z0 ; <double*> [#uses=1]
- %tmp6 = load double* %tmp5, align 8 ; <double> [#uses=1]
+ %tmp6 = load double, double* %tmp5, align 8 ; <double> [#uses=1]
%tmp7 = fdiv double %tmp6, 2.100000e+00 ; <double> [#uses=1]
%z1 = add i64 %tmp4, 5203
%tmp8 = getelementptr double, double* %p, i64 %z1 ; <double*> [#uses=1]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/address-space-loop.ll b/llvm/test/Transforms/LoopStrengthReduce/address-space-loop.ll
index 3ae5f325a7d..57ba665b5f4 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/address-space-loop.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/address-space-loop.ll
@@ -36,7 +36,7 @@ bb10: ; preds = %bb9
; CHECK: bb14:
; CHECK-NEXT: store i8 undef, i8 addrspace(1)* [[SCEVGEP]]
-; CHECK-NEXT: %t6 = load float addrspace(1)* addrspace(1)* undef
+; CHECK-NEXT: %t6 = load float addrspace(1)*, float addrspace(1)* addrspace(1)* undef
; Fold %t3's add within the address.
; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float, float addrspace(1)* %t6, i16 4
; CHECK-NEXT: [[SCEVGEP2:%[^ ]+]] = bitcast float addrspace(1)* [[SCEVGEP1]] to i8 addrspace(1)*
@@ -47,7 +47,7 @@ bb10: ; preds = %bb9
bb14: ; preds = %bb14, %bb10
%t2 = getelementptr inbounds i8, i8 addrspace(1)* undef, i16 %t4 ; <i8*> [#uses=1]
store i8 undef, i8 addrspace(1)* %t2
- %t6 = load float addrspace(1)* addrspace(1)* undef
+ %t6 = load float addrspace(1)*, float addrspace(1)* addrspace(1)* undef
%t8 = bitcast float addrspace(1)* %t6 to i8 addrspace(1)* ; <i8*> [#uses=1]
%t9 = getelementptr inbounds i8, i8 addrspace(1)* %t8, i16 %t3 ; <i8*> [#uses=1]
store i8 undef, i8 addrspace(1)* %t9
diff --git a/llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll b/llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll
index d65213d06af..4809def86ae 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/dont_reverse.ll
@@ -11,7 +11,7 @@ entry:
bb8:
%indvar34 = phi i32 [ 0, %entry ], [ %indvar.next35, %bb8 ]
%indvar3451 = trunc i32 %indvar34 to i2
- %xmp4344 = load i2* %p
+ %xmp4344 = load i2, i2* %p
%xmp104 = icmp eq i2 %indvar3451, %xmp4344
%indvar.next35 = add i32 %indvar34, 1
br i1 %xmp104, label %bb10, label %bb8
diff --git a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
index 3cffa65d6fb..092b274bfc0 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
@@ -35,7 +35,7 @@ do.body: ; preds = %do.body, %entry
%div = udiv i32 %i.addr.0, 10
%idxprom = zext i32 %rem to i64
%arrayidx = getelementptr inbounds [37 x i8], [37 x i8]* @.str, i64 0, i64 %idxprom
- %tmp5 = load i8* %arrayidx, align 1
+ %tmp5 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %tmp5 to i16
store i16 %conv, i16* %incdec.ptr, align 2
%1 = icmp ugt i32 %i.addr.0, 9
@@ -59,9 +59,9 @@ do.end: ; preds = %do.body
br i1 %cmp2740, label %for.end, label %for.body.lr.ph
for.body.lr.ph: ; preds = %do.end
- %tmp16 = load i32* %mLength, align 4
+ %tmp16 = load i32, i32* %mLength, align 4
%mBegin = getelementptr inbounds %struct.Vector2, %struct.Vector2* %result, i64 0, i32 0
- %tmp14 = load i16** %mBegin, align 8
+ %tmp14 = load i16*, i16** %mBegin, align 8
%tmp48 = zext i32 %tmp16 to i64
br label %for.body
@@ -73,7 +73,7 @@ for.body: ; preds = %for.body, %for.body
%incdec.ptr32 = getelementptr [33 x i16], [33 x i16]* %buffer, i64 1, i64 %tmp47
%tmp49 = add i64 %tmp48, %indvar
%dst.041 = getelementptr i16, i16* %tmp14, i64 %tmp49
- %tmp29 = load i16* %p.042, align 2
+ %tmp29 = load i16, i16* %p.042, align 2
store i16 %tmp29, i16* %dst.041, align 2
%cmp27 = icmp eq i16* %incdec.ptr32, %add.ptr22
%indvar.next = add i64 %indvar, 1
@@ -83,7 +83,7 @@ for.end.loopexit: ; preds = %for.body
br label %for.end
for.end: ; preds = %for.end.loopexit, %do.end
- %tmp38 = load i32* %mLength, align 4
+ %tmp38 = load i32, i32* %mLength, align 4
%add = add i32 %tmp38, %conv11
store i32 %add, i32* %mLength, align 4
ret void
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr12691.ll b/llvm/test/Transforms/LoopStrengthReduce/pr12691.ll
index 8399434343e..dfc1343912c 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr12691.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr12691.ll
@@ -13,13 +13,13 @@ for.cond:
br i1 %tobool, label %for.cond, label %for.end
for.end:
-; CHECK: %tmp1 = load i32* @d, align 4
-; CHECK-NEXT: %tmp2 = load i32* @d, align 4
+; CHECK: %tmp1 = load i32, i32* @d, align 4
+; CHECK-NEXT: %tmp2 = load i32, i32* @d, align 4
; CHECK-NEXT: %0 = sub i32 %tmp1, %tmp2
- %tmp1 = load i32* @d, align 4
+ %tmp1 = load i32, i32* @d, align 4
%add = add nsw i32 %tmp1, %g.0
- %tmp2 = load i32* @d, align 4
+ %tmp2 = load i32, i32* @d, align 4
%tobool26 = icmp eq i32 %x, 0
br i1 %tobool26, label %for.end5, label %for.body.lr.ph
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll b/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
index cc878c48c4b..5eb1b982cdf 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
@@ -20,10 +20,10 @@ target triple = "x86_64-apple-macosx10.9.0"
; Function Attrs: nounwind optsize ssp uwtable
define i32 @main() #0 {
entry:
- %0 = load i32* getelementptr inbounds (%struct.anon* @a, i64 0, i32 0), align 4, !tbaa !1
+ %0 = load i32, i32* getelementptr inbounds (%struct.anon* @a, i64 0, i32 0), align 4, !tbaa !1
%tobool7.i = icmp eq i32 %0, 0
- %.promoted.i = load i32* getelementptr inbounds (%struct.anon* @a, i64 0, i32 2), align 4, !tbaa !6
- %f.promoted.i = load i32* @f, align 4, !tbaa !7
+ %.promoted.i = load i32, i32* getelementptr inbounds (%struct.anon* @a, i64 0, i32 2), align 4, !tbaa !6
+ %f.promoted.i = load i32, i32* @f, align 4, !tbaa !7
br label %for.body6.i.outer
for.body6.i.outer: ; preds = %entry, %lor.end.i
@@ -42,7 +42,7 @@ if.end9.i: ; preds = %for.body6.i.outer
br i1 %tobool12.i, label %lor.rhs.i, label %lor.end.i
lor.rhs.i: ; preds = %if.end9.i
- %1 = load i32* @b, align 4, !tbaa !7
+ %1 = load i32, i32* @b, align 4, !tbaa !7
%dec.i = add nsw i32 %1, -1
store i32 %dec.i, i32* @b, align 4, !tbaa !7
%tobool13.i = icmp ne i32 %1, 0
@@ -63,7 +63,7 @@ fn1.exit: ; preds = %lor.end.i
store i32 %or15.i, i32* @f, align 4, !tbaa !7
store i32 %add.i, i32* getelementptr inbounds (%struct.anon* @e, i64 0, i32 1), align 4, !tbaa !8
store i32 0, i32* @h, align 4, !tbaa !7
- %3 = load i32* @b, align 4, !tbaa !7
+ %3 = load i32, i32* @b, align 4, !tbaa !7
%call1 = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %3) #2
ret i32 0
}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr2570.ll b/llvm/test/Transforms/LoopStrengthReduce/pr2570.ll
index 7b569713a93..671ffde671f 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr2570.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr2570.ll
@@ -23,7 +23,7 @@ define i32 @func_44(i32 %p_45, i32 %p_46, i16 zeroext %p_48, i32 %p_49, i8 zero
entry:
tail call i32 @func_116( i8 zeroext 2 ) nounwind ; <i32>:0 [#uses=0]
tail call i32 @func_63( i16 signext 2 ) nounwind ; <i32>:1 [#uses=1]
- load i16* @g_39, align 2 ; <i16>:2 [#uses=1]
+ load i16, i16* @g_39, align 2 ; <i16>:2 [#uses=1]
tail call i32 @func_63( i16 signext %2 ) nounwind ; <i32>:3 [#uses=1]
trunc i32 %3 to i16 ; <i16>:4 [#uses=1]
and i16 %4, 1 ; <i16>:5 [#uses=1]
@@ -32,10 +32,10 @@ entry:
tail call i32 @func_74( i16 zeroext %5, i8 zeroext %6, i16 zeroext %7, i16 zeroext 0 ) nounwind ; <i32>:8 [#uses=0]
tail call i32 @func_124( i32 544824386 ) nounwind ; <i32>:9 [#uses=0]
zext i8 %p_50 to i32 ; <i32>:10 [#uses=1]
- load i32* @g_43, align 4 ; <i32>:11 [#uses=1]
+ load i32, i32* @g_43, align 4 ; <i32>:11 [#uses=1]
icmp sle i32 %10, %11 ; <i1>:12 [#uses=1]
zext i1 %12 to i32 ; <i32>:13 [#uses=2]
- load i8* @g_247, align 1 ; <i8>:14 [#uses=1]
+ load i8, i8* @g_247, align 1 ; <i8>:14 [#uses=1]
trunc i32 %p_45 to i16 ; <i16>:15 [#uses=1]
zext i8 %14 to i16 ; <i16>:16 [#uses=1]
tail call i32 @func_74( i16 zeroext %15, i8 zeroext 0, i16 zeroext %16, i16 zeroext 23618 ) nounwind ; <i32>:17 [#uses=4]
@@ -72,7 +72,7 @@ bb172: ; preds = %bb168, %bb162
icmp eq i32 %.0343, 0 ; <i1>:30 [#uses=1]
%.0341 = select i1 %30, i32 1, i32 %.0343 ; <i32> [#uses=1]
urem i32 %23, %.0341 ; <i32>:31 [#uses=1]
- load i32* @g_137, align 4 ; <i32>:32 [#uses=4]
+ load i32, i32* @g_137, align 4 ; <i32>:32 [#uses=4]
icmp slt i32 %32, 0 ; <i1>:33 [#uses=1]
br i1 %33, label %bb202, label %bb198
@@ -119,13 +119,13 @@ bb222: ; preds = %bb215, %bb205
bb223: ; preds = %bb222, %bb215
%iftmp.437.0 = phi i32 [ 0, %bb222 ], [ 1, %bb215 ] ; <i32> [#uses=1]
- load i32* @g_91, align 4 ; <i32>:55 [#uses=3]
+ load i32, i32* @g_91, align 4 ; <i32>:55 [#uses=3]
tail call i32 @func_103( i16 zeroext 4 ) nounwind ; <i32>:56 [#uses=0]
tail call i32 @func_112( i32 0, i16 zeroext -31374 ) nounwind ; <i32>:57 [#uses=0]
- load i32* @g_197, align 4 ; <i32>:58 [#uses=1]
+ load i32, i32* @g_197, align 4 ; <i32>:58 [#uses=1]
tail call i32 @func_124( i32 28156 ) nounwind ; <i32>:59 [#uses=1]
- load i32* @g_260, align 4 ; <i32>:60 [#uses=1]
- load i32* @g_43, align 4 ; <i32>:61 [#uses=1]
+ load i32, i32* @g_260, align 4 ; <i32>:60 [#uses=1]
+ load i32, i32* @g_43, align 4 ; <i32>:61 [#uses=1]
xor i32 %61, %60 ; <i32>:62 [#uses=1]
mul i32 %62, %59 ; <i32>:63 [#uses=1]
trunc i32 %63 to i8 ; <i8>:64 [#uses=1]
@@ -138,7 +138,7 @@ bb223: ; preds = %bb222, %bb215
%or.cond352 = or i1 %70, %67 ; <i1> [#uses=1]
select i1 %or.cond352, i32 0, i32 %55 ; <i32>:71 [#uses=1]
%.353 = ashr i32 %66, %71 ; <i32> [#uses=2]
- load i16* @g_221, align 2 ; <i16>:72 [#uses=1]
+ load i16, i16* @g_221, align 2 ; <i16>:72 [#uses=1]
zext i16 %72 to i32 ; <i32>:73 [#uses=1]
icmp ugt i32 %.353, 31 ; <i1>:74 [#uses=1]
select i1 %74, i32 0, i32 %.353 ; <i32>:75 [#uses=1]
@@ -146,7 +146,7 @@ bb223: ; preds = %bb222, %bb215
add i32 %.0323, %iftmp.437.0 ; <i32>:76 [#uses=1]
and i32 %48, 255 ; <i32>:77 [#uses=2]
add i32 %77, 2042556439 ; <i32>:78 [#uses=1]
- load i32* @g_207, align 4 ; <i32>:79 [#uses=2]
+ load i32, i32* @g_207, align 4 ; <i32>:79 [#uses=2]
icmp ugt i32 %79, 31 ; <i1>:80 [#uses=1]
select i1 %80, i32 0, i32 %79 ; <i32>:81 [#uses=1]
%.0320 = lshr i32 %77, %81 ; <i32> [#uses=1]
@@ -154,7 +154,7 @@ bb223: ; preds = %bb222, %bb215
zext i1 %82 to i8 ; <i8>:83 [#uses=1]
tail call i32 @func_25( i8 zeroext %83 ) nounwind ; <i32>:84 [#uses=1]
xor i32 %84, 1 ; <i32>:85 [#uses=1]
- load i32* @g_197, align 4 ; <i32>:86 [#uses=1]
+ load i32, i32* @g_197, align 4 ; <i32>:86 [#uses=1]
add i32 %86, 1 ; <i32>:87 [#uses=1]
add i32 %87, %85 ; <i32>:88 [#uses=1]
icmp ugt i32 %76, %88 ; <i1>:89 [#uses=1]
@@ -163,22 +163,22 @@ bb223: ; preds = %bb222, %bb215
bb241: ; preds = %bb223
store i16 -9, i16* @g_221, align 2
udiv i32 %p_52, 1538244727 ; <i32>:90 [#uses=1]
- load i32* @g_207, align 4 ; <i32>:91 [#uses=1]
+ load i32, i32* @g_207, align 4 ; <i32>:91 [#uses=1]
sub i32 %91, %90 ; <i32>:92 [#uses=1]
- load i32* @g_14, align 4 ; <i32>:93 [#uses=1]
+ load i32, i32* @g_14, align 4 ; <i32>:93 [#uses=1]
trunc i32 %93 to i16 ; <i16>:94 [#uses=1]
trunc i32 %p_46 to i16 ; <i16>:95 [#uses=2]
sub i16 %94, %95 ; <i16>:96 [#uses=1]
- load i32* @g_197, align 4 ; <i32>:97 [#uses=1]
+ load i32, i32* @g_197, align 4 ; <i32>:97 [#uses=1]
trunc i32 %97 to i16 ; <i16>:98 [#uses=1]
tail call i32 @func_55( i32 -346178830, i16 zeroext %98, i16 zeroext %95 ) nounwind ; <i32>:99 [#uses=0]
zext i16 %p_48 to i32 ; <i32>:100 [#uses=1]
- load i8* @g_247, align 1 ; <i8>:101 [#uses=1]
+ load i8, i8* @g_247, align 1 ; <i8>:101 [#uses=1]
zext i8 %101 to i32 ; <i32>:102 [#uses=1]
sub i32 %100, %102 ; <i32>:103 [#uses=1]
tail call i32 @func_55( i32 %103, i16 zeroext -2972, i16 zeroext %96 ) nounwind ; <i32>:104 [#uses=0]
xor i32 %92, 2968 ; <i32>:105 [#uses=1]
- load i32* @g_197, align 4 ; <i32>:106 [#uses=1]
+ load i32, i32* @g_197, align 4 ; <i32>:106 [#uses=1]
icmp ugt i32 %105, %106 ; <i1>:107 [#uses=1]
zext i1 %107 to i32 ; <i32>:108 [#uses=1]
store i32 %108, i32* @g_33, align 4
@@ -195,12 +195,12 @@ bb248: ; preds = %bb284, %bb241
bb272.thread: ; preds = %bb248
store i32 1, i32* @g_82
- load i16* @g_267, align 2 ; <i16>:111 [#uses=1]
+ load i16, i16* @g_267, align 2 ; <i16>:111 [#uses=1]
icmp eq i16 %111, 0 ; <i1>:112 [#uses=1]
br i1 %112, label %bb311.loopexit.split, label %bb268
bb255.thread: ; preds = %bb248
- load i32* @g_260, align 4 ; <i32>:113 [#uses=1]
+ load i32, i32* @g_260, align 4 ; <i32>:113 [#uses=1]
sub i32 %113, %p_52 ; <i32>:114 [#uses=1]
and i32 %114, -20753 ; <i32>:115 [#uses=1]
icmp ne i32 %115, 0 ; <i1>:116 [#uses=1]
@@ -237,7 +237,7 @@ bb284: ; preds = %bb279, %bb276, %bb255.thread
%p_49_addr.0 = phi i32 [ %p_49_addr.1.reg2mem.0, %bb279 ], [ %p_49_addr.1.reg2mem.0, %bb276 ], [ 0, %bb255.thread ] ; <i32> [#uses=1]
%p_48_addr.1 = phi i16 [ %124, %bb279 ], [ %118, %bb276 ], [ %p_48_addr.2.reg2mem.0, %bb255.thread ] ; <i16> [#uses=1]
%p_45_addr.0 = phi i32 [ %p_45_addr.1.reg2mem.0, %bb279 ], [ %p_45_addr.1.reg2mem.0, %bb276 ], [ 8, %bb255.thread ] ; <i32> [#uses=3]
- load i32* @g_43, align 4 ; <i32>:125 [#uses=1]
+ load i32, i32* @g_43, align 4 ; <i32>:125 [#uses=1]
trunc i32 %125 to i8 ; <i8>:126 [#uses=1]
tail call i32 @func_116( i8 zeroext %126 ) nounwind ; <i32>:127 [#uses=0]
lshr i32 65255, %p_45_addr.0 ; <i32>:128 [#uses=1]
@@ -245,7 +245,7 @@ bb284: ; preds = %bb279, %bb276, %bb255.thread
%.op = lshr i32 %128, 31 ; <i32> [#uses=1]
%.op.op = xor i32 %.op, 1 ; <i32> [#uses=1]
%.354..lobit.not = select i1 %129, i32 1, i32 %.op.op ; <i32> [#uses=1]
- load i16* @g_39, align 2 ; <i16>:130 [#uses=1]
+ load i16, i16* @g_39, align 2 ; <i16>:130 [#uses=1]
zext i16 %130 to i32 ; <i32>:131 [#uses=1]
icmp slt i32 %.354..lobit.not, %131 ; <i1>:132 [#uses=1]
zext i1 %132 to i32 ; <i32>:133 [#uses=1]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr3086.ll b/llvm/test/Transforms/LoopStrengthReduce/pr3086.ll
index 085cbca4a57..187c14f900a 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr3086.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr3086.ll
@@ -10,7 +10,7 @@ entry:
br label %bb11
bb5: ; preds = %bb9
- %0 = load %struct.Lit** %storemerge, align 8 ; <%struct.Lit*> [#uses=0]
+ %0 = load %struct.Lit*, %struct.Lit** %storemerge, align 8 ; <%struct.Lit*> [#uses=0]
%indvar.next8 = add i64 %storemerge.rec, 1 ; <i64> [#uses=1]
br label %bb9
@@ -21,7 +21,7 @@ bb9: ; preds = %bb22, %bb5
br i1 %1, label %bb5, label %bb22
bb11: ; preds = %bb22, %entry
- %2 = load %struct.Cls** null, align 8 ; <%struct.Cls*> [#uses=0]
+ %2 = load %struct.Cls*, %struct.Cls** null, align 8 ; <%struct.Cls*> [#uses=0]
br label %bb22
bb22: ; preds = %bb11, %bb9
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr3399.ll b/llvm/test/Transforms/LoopStrengthReduce/pr3399.ll
index 26c5002fdec..1037768f95c 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr3399.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr3399.ll
@@ -13,7 +13,7 @@ bb: ; preds = %bb5, %bb5.thread
bb1: ; preds = %bb
%l_2.0.reg2mem.0 = sub i32 0, %indvar ; <i32> [#uses=1]
- %0 = load volatile i32* @g_53, align 4 ; <i32> [#uses=1]
+ %0 = load volatile i32, i32* @g_53, align 4 ; <i32> [#uses=1]
%1 = trunc i32 %l_2.0.reg2mem.0 to i16 ; <i16> [#uses=1]
%2 = trunc i32 %0 to i16 ; <i16> [#uses=1]
%3 = mul i16 %2, %1 ; <i16> [#uses=1]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr3571.ll b/llvm/test/Transforms/LoopStrengthReduce/pr3571.ll
index a23e4db4970..1615a818771 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr3571.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr3571.ll
@@ -12,7 +12,7 @@ bb.i: ; preds = %_ZNK11QModelIndex7isValidEv.exit.i
_ZNK11QModelIndex7isValidEv.exit.i: ; preds = %bb.i, %entry
%result.0.i = phi i32 [ 0, %entry ], [ %indvar.next, %bb.i ] ; <i32> [#uses=2]
- %0 = load i32** null, align 4 ; <%struct.QAbstractItemDelegate*> [#uses=0]
+ %0 = load i32*, i32** null, align 4 ; <%struct.QAbstractItemDelegate*> [#uses=0]
br i1 false, label %_ZN18qdesigner_internalL5levelEP18QAbstractItemModelRK11QModelIndex.exit, label %bb.i
_ZN18qdesigner_internalL5levelEP18QAbstractItemModelRK11QModelIndex.exit: ; preds = %_ZNK11QModelIndex7isValidEv.exit.i
diff --git a/llvm/test/Transforms/LoopStrengthReduce/share_code_in_preheader.ll b/llvm/test/Transforms/LoopStrengthReduce/share_code_in_preheader.ll
index 22f5c5082cf..1035ce19ff1 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/share_code_in_preheader.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/share_code_in_preheader.ll
@@ -13,9 +13,9 @@ no_exit: ; preds = %no_exit, %entry
%N_addr.0.0 = sub i32 %N.s, %indvar ; <i32> [#uses=1]
%tmp.8 = add i32 %N_addr.0.0, %tmp.6 ; <i32> [#uses=2]
%tmp.9 = getelementptr i8, i8* %A, i32 %tmp.8 ; <i8*> [#uses=1]
- %tmp.10 = load i8* %tmp.9 ; <i8> [#uses=1]
+ %tmp.10 = load i8, i8* %tmp.9 ; <i8> [#uses=1]
%tmp.17 = getelementptr i8, i8* %B, i32 %tmp.8 ; <i8*> [#uses=1]
- %tmp.18 = load i8* %tmp.17 ; <i8> [#uses=1]
+ %tmp.18 = load i8, i8* %tmp.17 ; <i8> [#uses=1]
%tmp.19 = sub i8 %tmp.10, %tmp.18 ; <i8> [#uses=1]
%tmp.21 = add i8 %tmp.19, %Sum.0.0 ; <i8> [#uses=2]
%indvar.next = add i32 %indvar.ui, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll b/llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll
index 6ac842d1db2..a81e314bad8 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll
@@ -36,7 +36,7 @@ bb10: ; preds = %bb9
; CHECK: bb14:
; CHECK-NEXT: store i8 undef, i8 addrspace(1)* [[SCEVGEP]]
-; CHECK-NEXT: %t6 = load float addrspace(1)* addrspace(1)* undef
+; CHECK-NEXT: %t6 = load float addrspace(1)*, float addrspace(1)* addrspace(1)* undef
; Fold %t3's add within the address.
; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float, float addrspace(1)* %t6, i16 4
; CHECK-NEXT: [[SCEVGEP2:%[^ ]+]] = bitcast float addrspace(1)* [[SCEVGEP1]] to i8 addrspace(1)*
@@ -47,7 +47,7 @@ bb10: ; preds = %bb9
bb14: ; preds = %bb14, %bb10
%t2 = getelementptr inbounds i8, i8 addrspace(1)* undef, i16 %t4 ; <i8*> [#uses=1]
store i8 undef, i8 addrspace(1)* %t2
- %t6 = load float addrspace(1)* addrspace(1)* undef
+ %t6 = load float addrspace(1)*, float addrspace(1)* addrspace(1)* undef
%t8 = bitcast float addrspace(1)* %t6 to i8 addrspace(1)* ; <i8*> [#uses=1]
%t9 = getelementptr inbounds i8, i8 addrspace(1)* %t8, i16 %t3 ; <i8*> [#uses=1]
store i8 undef, i8 addrspace(1)* %t9
diff --git a/llvm/test/Transforms/LoopStrengthReduce/uglygep.ll b/llvm/test/Transforms/LoopStrengthReduce/uglygep.ll
index b1d9d692692..430127b3cd5 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/uglygep.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/uglygep.ll
@@ -33,7 +33,7 @@ bb10: ; preds = %bb9
; CHECK: bb14:
; CHECK-NEXT: store i8 undef, i8* [[SCEVGEP]]
-; CHECK-NEXT: %t6 = load float** undef
+; CHECK-NEXT: %t6 = load float*, float** undef
; Fold %t3's add within the address.
; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float, float* %t6, i64 4
; CHECK-NEXT: [[SCEVGEP2:%[^ ]+]] = bitcast float* [[SCEVGEP1]] to i8*
@@ -44,7 +44,7 @@ bb10: ; preds = %bb9
bb14: ; preds = %bb14, %bb10
%t2 = getelementptr inbounds i8, i8* undef, i64 %t4 ; <i8*> [#uses=1]
store i8 undef, i8* %t2
- %t6 = load float** undef
+ %t6 = load float*, float** undef
%t8 = bitcast float* %t6 to i8* ; <i8*> [#uses=1]
%t9 = getelementptr inbounds i8, i8* %t8, i64 %t3 ; <i8*> [#uses=1]
store i8 undef, i8* %t9
diff --git a/llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll b/llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll
index c5a7becdd3c..a87b16a28b7 100644
--- a/llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll
+++ b/llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll
@@ -47,7 +47,7 @@ bb: ; preds = %bb.nph, %bb1
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %bb1 ] ; <i64> [#uses=2]
%s.01 = phi i32 [ 0, %bb.nph ], [ %2, %bb1 ] ; <i32> [#uses=1]
%scevgep = getelementptr i32, i32* %p, i64 %indvar ; <i32*> [#uses=1]
- %1 = load i32* %scevgep, align 1 ; <i32> [#uses=1]
+ %1 = load i32, i32* %scevgep, align 1 ; <i32> [#uses=1]
%2 = add nsw i32 %1, %s.01 ; <i32> [#uses=2]
br label %bb1
@@ -84,7 +84,7 @@ do.body: ; preds = %do.cond, %if.end
br i1 %cond2, label %exit, label %do.cond
exit: ; preds = %do.body
- %tmp7.i = load i32* undef, align 8
+ %tmp7.i = load i32, i32* undef, align 8
br i1 undef, label %do.cond, label %land.lhs.true
land.lhs.true: ; preds = %exit
diff --git a/llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll b/llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll
index 2e85d0dd026..0b484095650 100644
--- a/llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll
+++ b/llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll
@@ -29,7 +29,7 @@ while.body:
%rem = and i32 %bit_addr.addr.01, 31
%shl = shl i32 1, %rem
%arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
- %tmp6 = load i32* %arrayidx, align 4
+ %tmp6 = load i32, i32* %arrayidx, align 4
%xor = xor i32 %tmp6, %shl
store i32 %xor, i32* %arrayidx, align 4
%inc = add i32 %bit_addr.addr.01, 1
diff --git a/llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll b/llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll
index c77832d8976..5f9eec72ba5 100644
--- a/llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll
+++ b/llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll
@@ -22,7 +22,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i8, i8* %arr, i64 %indvars.iv
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
%add = add nsw i32 %conv, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll b/llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll
index 23290236e22..e9aa1acd5fe 100644
--- a/llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll
+++ b/llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll
@@ -28,7 +28,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/X86/partial.ll b/llvm/test/Transforms/LoopUnroll/X86/partial.ll
index bb8a04396f1..4566f792deb 100644
--- a/llvm/test/Transforms/LoopUnroll/X86/partial.ll
+++ b/llvm/test/Transforms/LoopUnroll/X86/partial.ll
@@ -11,11 +11,11 @@ vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%0 = getelementptr inbounds double, double* %b, i64 %index
%1 = bitcast double* %0 to <2 x double>*
- %wide.load = load <2 x double>* %1, align 8
+ %wide.load = load <2 x double>, <2 x double>* %1, align 8
%.sum9 = or i64 %index, 2
%2 = getelementptr double, double* %b, i64 %.sum9
%3 = bitcast double* %2 to <2 x double>*
- %wide.load8 = load <2 x double>* %3, align 8
+ %wide.load8 = load <2 x double>, <2 x double>* %3, align 8
%4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
%5 = fadd <2 x double> %wide.load8, <double 1.000000e+00, double 1.000000e+00>
%6 = getelementptr inbounds double, double* %a, i64 %index
@@ -47,7 +47,7 @@ vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%v0 = getelementptr inbounds double, double* %b, i64 %index
%v1 = bitcast double* %v0 to <2 x double>*
- %wide.load = load <2 x double>* %v1, align 8
+ %wide.load = load <2 x double>, <2 x double>* %v1, align 8
%v4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
%v5 = fmul <2 x double> %v4, <double 8.000000e+00, double 8.000000e+00>
%v6 = getelementptr inbounds double, double* %a, i64 %index
@@ -85,17 +85,17 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%reduction.026 = phi i16 [ %add14, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i16, i16* %arr, i64 %indvars.iv
- %0 = load i16* %arrayidx, align 2
+ %0 = load i16, i16* %arrayidx, align 2
%add = add i16 %0, %reduction.026
%sext = mul i64 %indvars.iv, 12884901888
%idxprom3 = ashr exact i64 %sext, 32
%arrayidx4 = getelementptr inbounds i16, i16* %arr, i64 %idxprom3
- %1 = load i16* %arrayidx4, align 2
+ %1 = load i16, i16* %arrayidx4, align 2
%add7 = add i16 %add, %1
%sext28 = mul i64 %indvars.iv, 21474836480
%idxprom10 = ashr exact i64 %sext28, 32
%arrayidx11 = getelementptr inbounds i16, i16* %arr, i64 %idxprom10
- %2 = load i16* %arrayidx11, align 2
+ %2 = load i16, i16* %arrayidx11, align 2
%add14 = add i16 %add7, %2
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/ephemeral.ll b/llvm/test/Transforms/LoopUnroll/ephemeral.ll
index 4190520bd2f..d16eba7a964 100644
--- a/llvm/test/Transforms/LoopUnroll/ephemeral.ll
+++ b/llvm/test/Transforms/LoopUnroll/ephemeral.ll
@@ -13,7 +13,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
; This loop will be completely unrolled, even with these extra instructions,
; but only because they're ephemeral (and, thus, free).
diff --git a/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll b/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
index 458828f17c8..a9104adeb97 100644
--- a/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
+++ b/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
@@ -47,9 +47,9 @@ loop: ; preds = %loop, %entry
%iv = phi i64 [ 0, %entry ], [ %inc, %loop ]
%r = phi i32 [ 0, %entry ], [ %add, %loop ]
%arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
- %src_element = load i32* %arrayidx, align 4
+ %src_element = load i32, i32* %arrayidx, align 4
%array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
- %const_array_element = load i32* %array_const_idx, align 4
+ %const_array_element = load i32, i32* %array_const_idx, align 4
%mul = mul nsw i32 %src_element, %const_array_element
%add = add nsw i32 %mul, %r
%inc = add nuw nsw i64 %iv, 1
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop.ll
index ff63d54e16a..3bec939178a 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-loop.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-loop.ll
@@ -21,7 +21,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -48,7 +48,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.01
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -97,7 +97,7 @@ for.body: ; preds = %for.body, %entry
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %entry ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i64 1
- %0 = load i16* %p.addr.05, align 2
+ %0 = load i16, i16* %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
%sub = add nsw i32 %len.addr.04, -2
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll
index e2fc012b871..7684e394290 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll
@@ -17,7 +17,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll
index 7dc466b934e..7c6bb969055 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll
@@ -17,7 +17,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop3.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop3.ll
index 2bbea1b22c3..fd13ebfa0b8 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-loop3.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-loop3.ll
@@ -24,7 +24,7 @@ for.body3: ; preds = %for.cond1.preheader
%sum.19 = phi i32 [ %add4, %for.body3 ], [ %sum.012, %for.cond1.preheader ]
%0 = add nsw i64 %indvars.iv, %indvars.iv16
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %0
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%add4 = add nsw i32 %1, %sum.19
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/scevunroll.ll b/llvm/test/Transforms/LoopUnroll/scevunroll.ll
index e018878aac2..a5c9a6efacf 100644
--- a/llvm/test/Transforms/LoopUnroll/scevunroll.ll
+++ b/llvm/test/Transforms/LoopUnroll/scevunroll.ll
@@ -20,7 +20,7 @@ while.body:
%sum = phi i32 [ 0, %entry ], [ %sum.next, %while.body ]
%iv.next = add i64 %iv, -1
%adr = getelementptr inbounds i32, i32* %base, i64 %iv.next
- %tmp = load i32* %adr, align 8
+ %tmp = load i32, i32* %adr, align 8
%sum.next = add i32 %sum, %tmp
%iv.narrow = trunc i64 %iv.next to i32
%cmp.i65 = icmp sgt i32 %iv.narrow, 0
@@ -47,7 +47,7 @@ loop:
%iv = phi i64 [ 0, %entry ], [ %inc, %tail ]
%s = phi i64 [ 0, %entry ], [ %s.next, %tail ]
%adr = getelementptr i64, i64* %base, i64 %iv
- %val = load i64* %adr
+ %val = load i64, i64* %adr
%s.next = add i64 %s, %val
%inc = add i64 %iv, 1
%cmp = icmp ne i64 %inc, 4
@@ -68,7 +68,7 @@ exit2:
;
; CHECK-LABEL: @multiExit(
; CHECK: getelementptr i32, i32* %base, i32 10
-; CHECK-NEXT: load i32*
+; CHECK-NEXT: load i32, i32*
; CHECK: br i1 false, label %l2.10, label %exit1
; CHECK: l2.10:
; CHECK-NOT: br
@@ -82,7 +82,7 @@ l1:
%inc1 = add i32 %iv1, 1
%inc2 = add i32 %iv2, 1
%adr = getelementptr i32, i32* %base, i32 %iv1
- %val = load i32* %adr
+ %val = load i32, i32* %adr
%cmp1 = icmp slt i32 %iv1, 5
br i1 %cmp1, label %l2, label %exit1
l2:
@@ -113,7 +113,7 @@ l1:
%inc1 = add i32 %iv1, 1
%inc2 = add i32 %iv2, 1
%adr = getelementptr i32, i32* %base, i32 %iv1
- %val = load i32* %adr
+ %val = load i32, i32* %adr
%cmp1 = icmp slt i32 %iv1, 5
br i1 %cmp1, label %l2, label %exit1
l2:
diff --git a/llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll b/llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll
index fb3d857b374..4c216983af5 100644
--- a/llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll
+++ b/llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll
@@ -16,8 +16,8 @@ for.body: ; preds = %entry, %for.body
%arrayidx7 = getelementptr double, double* %p, i64 %i.013 ; <double*> [#uses=2]
%tmp16 = add i64 %i.013, 1 ; <i64> [#uses=3]
%arrayidx = getelementptr double, double* %p, i64 %tmp16 ; <double*> [#uses=1]
- %tmp4 = load double* %arrayidx ; <double> [#uses=1]
- %tmp8 = load double* %arrayidx7 ; <double> [#uses=1]
+ %tmp4 = load double, double* %arrayidx ; <double> [#uses=1]
+ %tmp8 = load double, double* %arrayidx7 ; <double> [#uses=1]
%mul9 = fmul double %tmp8, %tmp4 ; <double> [#uses=1]
store double %mul9, double* %arrayidx7
%exitcond = icmp eq i64 %tmp16, %mul10 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll b/llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
index e185ddd51fb..dc812fb4065 100644
--- a/llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
+++ b/llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
@@ -20,7 +20,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -52,7 +52,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -79,7 +79,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -111,7 +111,7 @@ entry:
for.body3: ; preds = %for.body3, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body3 ]
%arrayidx = getelementptr inbounds i32, i32* %List, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add4 = add nsw i32 %0, 10
store i32 %add4, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -125,7 +125,7 @@ for.body3.1: ; preds = %for.body3.1.prehead
%indvars.iv.1 = phi i64 [ %1, %for.body3.1 ], [ 0, %for.body3.1.preheader ]
%1 = add nsw i64 %indvars.iv.1, 1
%arrayidx.1 = getelementptr inbounds i32, i32* %List, i64 %1
- %2 = load i32* %arrayidx.1, align 4
+ %2 = load i32, i32* %arrayidx.1, align 4
%add4.1 = add nsw i32 %2, 10
store i32 %add4.1, i32* %arrayidx.1, align 4
%exitcond.1 = icmp eq i64 %1, 4
diff --git a/llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll b/llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll
index 3840f0b02f2..1354181becd 100644
--- a/llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll
+++ b/llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll
@@ -20,7 +20,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -44,7 +44,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -72,7 +72,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -95,7 +95,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -125,7 +125,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -154,7 +154,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -191,7 +191,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -218,7 +218,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -246,7 +246,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopUnswitch/2008-06-17-DomFrontier.ll b/llvm/test/Transforms/LoopUnswitch/2008-06-17-DomFrontier.ll
index f74054a0589..d606ea9c6d2 100644
--- a/llvm/test/Transforms/LoopUnswitch/2008-06-17-DomFrontier.ll
+++ b/llvm/test/Transforms/LoopUnswitch/2008-06-17-DomFrontier.ll
@@ -17,6 +17,6 @@ bb36: ; preds = %bb19
store i16 0, i16* @g_56, align 2
br i1 false, label %bb44, label %bb3
bb44: ; preds = %bb44, %bb36
- %tmp46 = load i16* @g_56, align 2 ; <i16> [#uses=0]
+ %tmp46 = load i16, i16* @g_56, align 2 ; <i16> [#uses=0]
br i1 false, label %bb, label %bb44
}
diff --git a/llvm/test/Transforms/LoopUnswitch/2010-11-18-LCSSA.ll b/llvm/test/Transforms/LoopUnswitch/2010-11-18-LCSSA.ll
index a976d18d444..3b89fa91583 100644
--- a/llvm/test/Transforms/LoopUnswitch/2010-11-18-LCSSA.ll
+++ b/llvm/test/Transforms/LoopUnswitch/2010-11-18-LCSSA.ll
@@ -7,7 +7,7 @@ entry:
br i1 true, label %for.end12, label %bb.nph
bb.nph: ; preds = %entry
- %g_38.promoted = load i32* @g_38
+ %g_38.promoted = load i32, i32* @g_38
br label %for.body
for.body: ; preds = %for.cond, %bb.nph
diff --git a/llvm/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll b/llvm/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll
index 5717dd89e2f..0b7f91fee15 100644
--- a/llvm/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll
+++ b/llvm/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll
@@ -24,7 +24,7 @@ if.then: ; preds = %for.body
%idxprom = sext i32 %inc1 to i64
%array_ = getelementptr inbounds %class.MyContainer.1.3.19.29, %class.MyContainer.1.3.19.29* %this, i32 0, i32 0
%arrayidx = getelementptr inbounds [6 x %class.MyMemVarClass.0.2.18.28*], [6 x %class.MyMemVarClass.0.2.18.28*]* %array_, i32 0, i64 %idxprom
- %tmp4 = load %class.MyMemVarClass.0.2.18.28** %arrayidx, align 8
+ %tmp4 = load %class.MyMemVarClass.0.2.18.28*, %class.MyMemVarClass.0.2.18.28** %arrayidx, align 8
%isnull = icmp eq %class.MyMemVarClass.0.2.18.28* %tmp4, null
br i1 %isnull, label %for.inc, label %delete.notnull
diff --git a/llvm/test/Transforms/LoopUnswitch/2011-11-18-SimpleSwitch.ll b/llvm/test/Transforms/LoopUnswitch/2011-11-18-SimpleSwitch.ll
index a8608b87720..a35596aff11 100644
--- a/llvm/test/Transforms/LoopUnswitch/2011-11-18-SimpleSwitch.ll
+++ b/llvm/test/Transforms/LoopUnswitch/2011-11-18-SimpleSwitch.ll
@@ -15,7 +15,7 @@
; CHECK-NEXT: br label %loop_begin.us
; CHECK: loop_begin.us: ; preds = %loop_begin.backedge.us, %.split.us
-; CHECK-NEXT: %var_val.us = load i32* %var
+; CHECK-NEXT: %var_val.us = load i32, i32* %var
; CHECK-NEXT: switch i32 1, label %default.us-lcssa.us [
; CHECK-NEXT: i32 1, label %inc.us
@@ -34,7 +34,7 @@
; CHECK-NEXT: br label %loop_begin.us1
; CHECK: loop_begin.us1: ; preds = %loop_begin.backedge.us5, %.split.split.us
-; CHECK-NEXT: %var_val.us2 = load i32* %var
+; CHECK-NEXT: %var_val.us2 = load i32, i32* %var
; CHECK-NEXT: switch i32 2, label %default.us-lcssa.us-lcssa.us [
; CHECK-NEXT: i32 1, label %inc.us4
; CHECK-NEXT: i32 2, label %dec.us3
@@ -48,7 +48,7 @@
; CHECK-NEXT: br label %loop_begin
; CHECK: loop_begin: ; preds = %loop_begin.backedge, %.split.split
-; CHECK-NEXT: %var_val = load i32* %var
+; CHECK-NEXT: %var_val = load i32, i32* %var
; CHECK-NEXT: switch i32 %c, label %default.us-lcssa.us-lcssa [
; CHECK-NEXT: i32 1, label %inc
; CHECK-NEXT: i32 2, label %dec
@@ -63,13 +63,13 @@
define i32 @test(i32* %var) {
%mem = alloca i32
store i32 2, i32* %mem
- %c = load i32* %mem
+ %c = load i32, i32* %mem
br label %loop_begin
loop_begin:
- %var_val = load i32* %var
+ %var_val = load i32, i32* %var
switch i32 %c, label %default [
i32 1, label %inc
diff --git a/llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches-Threshold.ll b/llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches-Threshold.ll
index 686cedbbc51..393dd5c313a 100644
--- a/llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches-Threshold.ll
+++ b/llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches-Threshold.ll
@@ -52,14 +52,14 @@
define i32 @test(i32* %var) {
%mem = alloca i32
store i32 2, i32* %mem
- %c = load i32* %mem
- %d = load i32* %mem
+ %c = load i32, i32* %mem
+ %d = load i32, i32* %mem
br label %loop_begin
loop_begin:
- %var_val = load i32* %var
+ %var_val = load i32, i32* %var
switch i32 %c, label %second_switch [
i32 1, label %inc
diff --git a/llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches.ll b/llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches.ll
index 3ba9fc2f5cf..20f03c987eb 100644
--- a/llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches.ll
+++ b/llvm/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches.ll
@@ -22,7 +22,7 @@
; CHECK-NEXT: br label %loop_begin.us.us
; CHECK: loop_begin.us.us: ; preds = %loop_begin.backedge.us.us, %.split.us.split.us
-; CHECK-NEXT: %var_val.us.us = load i32* %var
+; CHECK-NEXT: %var_val.us.us = load i32, i32* %var
; CHECK-NEXT: switch i32 1, label %second_switch.us.us [
; CHECK-NEXT: i32 1, label %inc.us.us
@@ -38,7 +38,7 @@
; CHECK-NEXT: br label %loop_begin.us
; CHECK: loop_begin.us: ; preds = %loop_begin.backedge.us, %.split.us.split
-; CHECK-NEXT: %var_val.us = load i32* %var
+; CHECK-NEXT: %var_val.us = load i32, i32* %var
; CHECK-NEXT: switch i32 1, label %second_switch.us [
; CHECK-NEXT: i32 1, label %inc.us
@@ -65,7 +65,7 @@
; CHECK-NEXT: br label %loop_begin.us1
; CHECK: loop_begin.us1: ; preds = %loop_begin.backedge.us6, %.split.split.us
-; CHECK-NEXT: %var_val.us2 = load i32* %var
+; CHECK-NEXT: %var_val.us2 = load i32, i32* %var
; CHECK-NEXT: switch i32 %c, label %second_switch.us3 [
; CHECK-NEXT: i32 1, label %loop_begin.inc_crit_edge.us
; CHECK-NEXT: ]
@@ -86,7 +86,7 @@
; CHECK-NEXT: br label %loop_begin
; CHECK: loop_begin: ; preds = %loop_begin.backedge, %.split.split
-; CHECK-NEXT: %var_val = load i32* %var
+; CHECK-NEXT: %var_val = load i32, i32* %var
; CHECK-NEXT: switch i32 %c, label %second_switch [
; CHECK-NEXT: i32 1, label %loop_begin.inc_crit_edge
; CHECK-NEXT: ]
@@ -106,14 +106,14 @@
define i32 @test(i32* %var) {
%mem = alloca i32
store i32 2, i32* %mem
- %c = load i32* %mem
- %d = load i32* %mem
+ %c = load i32, i32* %mem
+ %d = load i32, i32* %mem
br label %loop_begin
loop_begin:
- %var_val = load i32* %var
+ %var_val = load i32, i32* %var
switch i32 %c, label %second_switch [
i32 1, label %inc
diff --git a/llvm/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll b/llvm/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll
index 80e4d782731..223fbf18bf5 100644
--- a/llvm/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll
+++ b/llvm/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll
@@ -46,9 +46,9 @@ define void @_ZN1DptEv(%class.D.22.42.66.102.138.158.178.198.238.242.246.250.262
entry:
%this.addr = alloca %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379*, align 8
store %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this, %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379** %this.addr, align 8
- %this1 = load %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379** %this.addr
+ %this1 = load %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379*, %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379** %this.addr
%px = getelementptr inbounds %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379, %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this1, i32 0, i32 0
- %0 = load %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376** %px, align 8
+ %0 = load %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376*, %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376** %px, align 8
%tobool = icmp ne %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376* %0, null
br i1 %tobool, label %cond.end, label %cond.false
diff --git a/llvm/test/Transforms/LoopUnswitch/2012-05-20-Phi.ll b/llvm/test/Transforms/LoopUnswitch/2012-05-20-Phi.ll
index 4c63a56bfdc..96bc28c9a35 100644
--- a/llvm/test/Transforms/LoopUnswitch/2012-05-20-Phi.ll
+++ b/llvm/test/Transforms/LoopUnswitch/2012-05-20-Phi.ll
@@ -9,9 +9,9 @@ target triple = "x86_64-unknown-linux-gnu"
define void @func() noreturn nounwind uwtable {
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%tobool = icmp eq i32 %0, 0
- %1 = load i32* @b, align 4
+ %1 = load i32, i32* @b, align 4
br label %while.body
while.body: ; preds = %while.body, %entry
diff --git a/llvm/test/Transforms/LoopUnswitch/basictest.ll b/llvm/test/Transforms/LoopUnswitch/basictest.ll
index 2a0f5a5abb1..e990144d5cc 100644
--- a/llvm/test/Transforms/LoopUnswitch/basictest.ll
+++ b/llvm/test/Transforms/LoopUnswitch/basictest.ll
@@ -7,12 +7,12 @@ no_exit: ; preds = %no_exit.backedge, %entry
%i.0.0 = phi i32 [ 0, %entry ], [ %i.0.0.be, %no_exit.backedge ] ; <i32> [#uses=3]
%gep.upgrd.1 = zext i32 %i.0.0 to i64 ; <i64> [#uses=1]
%tmp.7 = getelementptr i32, i32* %A, i64 %gep.upgrd.1 ; <i32*> [#uses=4]
- %tmp.13 = load i32* %tmp.7 ; <i32> [#uses=2]
+ %tmp.13 = load i32, i32* %tmp.7 ; <i32> [#uses=2]
%tmp.14 = add i32 %tmp.13, 1 ; <i32> [#uses=1]
store i32 %tmp.14, i32* %tmp.7
br i1 %C, label %then, label %endif
then: ; preds = %no_exit
- %tmp.29 = load i32* %tmp.7 ; <i32> [#uses=1]
+ %tmp.29 = load i32, i32* %tmp.7 ; <i32> [#uses=1]
%tmp.30 = add i32 %tmp.29, 2 ; <i32> [#uses=1]
store i32 %tmp.30, i32* %tmp.7
%inc9 = add i32 %i.0.0, 1 ; <i32> [#uses=2]
@@ -36,13 +36,13 @@ return: ; preds = %endif, %then
define i32 @test2(i32* %var) {
%mem = alloca i32
store i32 2, i32* %mem
- %c = load i32* %mem
+ %c = load i32, i32* %mem
br label %loop_begin
loop_begin:
- %var_val = load i32* %var
+ %var_val = load i32, i32* %var
switch i32 %c, label %default [
i32 1, label %inc
diff --git a/llvm/test/Transforms/LoopUnswitch/preserve-analyses.ll b/llvm/test/Transforms/LoopUnswitch/preserve-analyses.ll
index 2725745ea60..e3774a1e907 100644
--- a/llvm/test/Transforms/LoopUnswitch/preserve-analyses.ll
+++ b/llvm/test/Transforms/LoopUnswitch/preserve-analyses.ll
@@ -11,8 +11,8 @@ target triple = "armv6-apple-darwin9"
define i32 @ineqn(i8* %s, i8* %p) nounwind readonly {
entry:
- %0 = load i32* @delim1, align 4 ; <i32> [#uses=1]
- %1 = load i32* @delim2, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @delim1, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* @delim2, align 4 ; <i32> [#uses=1]
br label %bb8.outer
bb: ; preds = %bb8
@@ -61,7 +61,7 @@ bb8.outer: ; preds = %bb8.outer.backedge,
bb8: ; preds = %bb8.outer, %bb8.backedge
%p_addr.0 = phi i8* [ %p_addr.0.ph, %bb8.outer ], [ %3, %bb8.backedge ] ; <i8*> [#uses=3]
- %7 = load i8* %p_addr.0, align 1 ; <i8> [#uses=2]
+ %7 = load i8, i8* %p_addr.0, align 1 ; <i8> [#uses=2]
%8 = sext i8 %7 to i32 ; <i32> [#uses=2]
%9 = icmp eq i8 %7, 0 ; <i1> [#uses=1]
br i1 %9, label %bb10, label %bb
diff --git a/llvm/test/Transforms/LoopVectorize/12-12-11-if-conv.ll b/llvm/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
index aa2e6183781..d5e020c36e9 100644
--- a/llvm/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
+++ b/llvm/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
@@ -15,7 +15,7 @@ entry:
for.body: ; preds = %entry, %if.end
%indvars.iv = phi i64 [ %indvars.iv.next, %if.end ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll
index 83e39a1fb7b..a689f44e912 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll
@@ -23,9 +23,9 @@ for.body.preheader: ; preds = %entry
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%arrayidx4 = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
store i32 %add, i32* %arrayidx4, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll b/llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
index a7a78c75a99..4cd703f6458 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
@@ -11,8 +11,8 @@ target triple = "aarch64--linux-gnueabi"
; }
; CHECK-LABEL: @ind_plus2(
-; CHECK: load <4 x i32>*
-; CHECK: load <4 x i32>*
+; CHECK: load <4 x i32>, <4 x i32>*
+; CHECK: load <4 x i32>, <4 x i32>*
; CHECK: mul nsw <4 x i32>
; CHECK: mul nsw <4 x i32>
; CHECK: add nsw <4 x i32>
@@ -21,7 +21,7 @@ target triple = "aarch64--linux-gnueabi"
; CHECK: icmp eq i64 %index.next, 512
; FORCE-VEC-LABEL: @ind_plus2(
-; FORCE-VEC: %wide.load = load <2 x i32>*
+; FORCE-VEC: %wide.load = load <2 x i32>, <2 x i32>*
; FORCE-VEC: mul nsw <2 x i32>
; FORCE-VEC: add nsw <2 x i32>
; FORCE-VEC: %index.next = add i64 %index, 2
@@ -35,7 +35,7 @@ for.body: ; preds = %entry, %for.body
%i = phi i32 [ 0, %entry ], [ %add1, %for.body ]
%sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
%inc.ptr = getelementptr inbounds i32, i32* %A.addr, i64 1
- %0 = load i32* %A.addr, align 4
+ %0 = load i32, i32* %A.addr, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%add1 = add nsw i32 %i, 2
@@ -55,8 +55,8 @@ for.end: ; preds = %for.body
; }
; CHECK-LABEL: @ind_minus2(
-; CHECK: load <4 x i32>*
-; CHECK: load <4 x i32>*
+; CHECK: load <4 x i32>, <4 x i32>*
+; CHECK: load <4 x i32>, <4 x i32>*
; CHECK: mul nsw <4 x i32>
; CHECK: mul nsw <4 x i32>
; CHECK: add nsw <4 x i32>
@@ -65,7 +65,7 @@ for.end: ; preds = %for.body
; CHECK: icmp eq i64 %index.next, 512
; FORCE-VEC-LABEL: @ind_minus2(
-; FORCE-VEC: %wide.load = load <2 x i32>*
+; FORCE-VEC: %wide.load = load <2 x i32>, <2 x i32>*
; FORCE-VEC: mul nsw <2 x i32>
; FORCE-VEC: add nsw <2 x i32>
; FORCE-VEC: %index.next = add i64 %index, 2
@@ -79,7 +79,7 @@ for.body: ; preds = %entry, %for.body
%i = phi i32 [ 1024, %entry ], [ %sub, %for.body ]
%sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
%inc.ptr = getelementptr inbounds i32, i32* %A.addr, i64 1
- %0 = load i32* %A.addr, align 4
+ %0 = load i32, i32* %A.addr, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%sub = add nsw i32 %i, -2
@@ -102,10 +102,10 @@ for.end: ; preds = %for.body
; }
; CHECK-LABEL: @ptr_ind_plus2(
-; CHECK: load i32*
-; CHECK: load i32*
-; CHECK: load i32*
-; CHECK: load i32*
+; CHECK: load i32, i32*
+; CHECK: load i32, i32*
+; CHECK: load i32, i32*
+; CHECK: load i32, i32*
; CHECK: mul nsw i32
; CHECK: mul nsw i32
; CHECK: add nsw i32
@@ -114,13 +114,13 @@ for.end: ; preds = %for.body
; CHECK: %21 = icmp eq i64 %index.next, 1024
; FORCE-VEC-LABEL: @ptr_ind_plus2(
-; FORCE-VEC: load i32*
+; FORCE-VEC: load i32, i32*
; FORCE-VEC: insertelement <2 x i32>
-; FORCE-VEC: load i32*
+; FORCE-VEC: load i32, i32*
; FORCE-VEC: insertelement <2 x i32>
-; FORCE-VEC: load i32*
+; FORCE-VEC: load i32, i32*
; FORCE-VEC: insertelement <2 x i32>
-; FORCE-VEC: load i32*
+; FORCE-VEC: load i32, i32*
; FORCE-VEC: insertelement <2 x i32>
; FORCE-VEC: mul nsw <2 x i32>
; FORCE-VEC: add nsw <2 x i32>
@@ -135,9 +135,9 @@ for.body: ; preds = %for.body, %entry
%sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
%i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%inc.ptr = getelementptr inbounds i32, i32* %A.addr, i64 1
- %0 = load i32* %A.addr, align 4
+ %0 = load i32, i32* %A.addr, align 4
%inc.ptr1 = getelementptr inbounds i32, i32* %A.addr, i64 2
- %1 = load i32* %inc.ptr, align 4
+ %1 = load i32, i32* %inc.ptr, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, %sum
%inc = add nsw i32 %i, 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll
index 159aaf488da..395b468c509 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll
@@ -23,9 +23,9 @@ for.body.preheader: ; preds = %entry
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%arrayidx4 = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
store i32 %add, i32* %arrayidx4, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
index 302ec796734..46b8ef1a09b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
@@ -31,23 +31,23 @@ for.body:
%add = add i64 %v.055, %offset
%mul = mul i64 %add, 3
%arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %mul
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i64 0, i64 %v.055
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %1
%arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i64 0, i64 %v.055
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%mul5 = fmul fast float %mul3, %2
%arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i64 0, i64 %v.055
- %3 = load float* %arrayidx6, align 4
+ %3 = load float, float* %arrayidx6, align 4
%mul7 = fmul fast float %mul5, %3
%arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i64 0, i64 %v.055
- %4 = load float* %arrayidx8, align 4
+ %4 = load float, float* %arrayidx8, align 4
%mul9 = fmul fast float %mul7, %4
%add10 = fadd fast float %r.057, %mul9
%arrayidx.sum = add i64 %mul, 1
%arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
- %5 = load float* %arrayidx11, align 4
+ %5 = load float, float* %arrayidx11, align 4
%mul13 = fmul fast float %1, %5
%mul15 = fmul fast float %2, %mul13
%mul17 = fmul fast float %3, %mul15
@@ -55,7 +55,7 @@ for.body:
%add20 = fadd fast float %g.056, %mul19
%arrayidx.sum52 = add i64 %mul, 2
%arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
- %6 = load float* %arrayidx21, align 4
+ %6 = load float, float* %arrayidx21, align 4
%mul23 = fmul fast float %1, %6
%mul25 = fmul fast float %2, %mul23
%mul27 = fmul fast float %3, %mul25
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll
index d1ca199b82c..f3c6548c68e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll
@@ -7,7 +7,7 @@ target triple = "aarch64--linux-gnu"
@Foo = common global %struct.anon zeroinitializer, align 4
; CHECK-LABEL: @foo(
-; CHECK: load <4 x i32>*
+; CHECK: load <4 x i32>, <4 x i32>*
; CHECK: sdiv <4 x i32>
; CHECK: store <4 x i32>
@@ -18,7 +18,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 2, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%div = sdiv i32 %0, 2
%arrayidx2 = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 0, i64 %indvars.iv
store i32 %div, i32* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/arm-unroll.ll b/llvm/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
index 96e13842106..7b09913636f 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
@@ -21,7 +21,7 @@ define i32 @foo(i32* nocapture %A, i32 %n) nounwind readonly ssp {
%i.02 = phi i32 [ %5, %.lr.ph ], [ 0, %0 ]
%sum.01 = phi i32 [ %4, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i32 %i.02
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = add nsw i32 %3, %sum.01
%5 = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %5, %n
@@ -49,7 +49,7 @@ define i32 @register_limit(i32* nocapture %A, i32 %n) {
%sum.05 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
%sum.06 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i32 %i.02
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = add nsw i32 %3, %sum.01
%5 = add nsw i32 %i.02, 1
%6 = add nsw i32 %3, %sum.02
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/gather-cost.ll b/llvm/test/Transforms/LoopVectorize/ARM/gather-cost.ll
index 1b5e45e089f..f14a8cc6ca2 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/gather-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/gather-cost.ll
@@ -34,23 +34,23 @@ for.body:
%add = add i32 %v.055, %offset
%mul = mul i32 %add, 3
%arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %mul
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i32 0, i32 %v.055
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %1
%arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i32 0, i32 %v.055
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%mul5 = fmul fast float %mul3, %2
%arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i32 0, i32 %v.055
- %3 = load float* %arrayidx6, align 4
+ %3 = load float, float* %arrayidx6, align 4
%mul7 = fmul fast float %mul5, %3
%arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i32 0, i32 %v.055
- %4 = load float* %arrayidx8, align 4
+ %4 = load float, float* %arrayidx8, align 4
%mul9 = fmul fast float %mul7, %4
%add10 = fadd fast float %r.057, %mul9
%arrayidx.sum = add i32 %mul, 1
%arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %arrayidx.sum
- %5 = load float* %arrayidx11, align 4
+ %5 = load float, float* %arrayidx11, align 4
%mul13 = fmul fast float %1, %5
%mul15 = fmul fast float %2, %mul13
%mul17 = fmul fast float %3, %mul15
@@ -58,7 +58,7 @@ for.body:
%add20 = fadd fast float %g.056, %mul19
%arrayidx.sum52 = add i32 %mul, 2
%arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %arrayidx.sum52
- %6 = load float* %arrayidx21, align 4
+ %6 = load float, float* %arrayidx21, align 4
%mul23 = fmul fast float %1, %6
%mul25 = fmul fast float %2, %mul23
%mul27 = fmul fast float %3, %mul25
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/gcc-examples.ll b/llvm/test/Transforms/LoopVectorize/ARM/gcc-examples.ll
index ae61da02220..783156d7399 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/gcc-examples.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/gcc-examples.ll
@@ -19,9 +19,9 @@ define void @example1() nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
@@ -45,7 +45,7 @@ define void @example10b(i16* noalias nocapture %sa, i16* noalias nocapture %sb,
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds i16, i16* %sb, i64 %indvars.iv
- %3 = load i16* %2, align 2
+ %3 = load i16, i16* %2, align 2
%4 = sext i16 %3 to i32
%5 = getelementptr inbounds i32, i32* %ia, i64 %indvars.iv
store i32 %4, i32* %5, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll b/llvm/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll
index d2e3de279f7..e88fcca1225 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll
@@ -16,9 +16,9 @@ target triple = "armv7--linux-gnueabihf"
define void @direct(%T432* %loadaddr, %T432* %loadaddr2, %T432* %storeaddr) {
; COST: function 'direct':
- %v0 = load %T432* %loadaddr
+ %v0 = load %T432, %T432* %loadaddr
; ASM: vld1.64
- %v1 = load %T432* %loadaddr2
+ %v1 = load %T432, %T432* %loadaddr2
; ASM: vld1.64
%r3 = mul %T432 %v0, %v1
; COST: cost of 2 for instruction: {{.*}} mul <4 x i32>
@@ -30,9 +30,9 @@ define void @direct(%T432* %loadaddr, %T432* %loadaddr2, %T432* %storeaddr) {
define void @ups1632(%T416* %loadaddr, %T416* %loadaddr2, %T432* %storeaddr) {
; COST: function 'ups1632':
- %v0 = load %T416* %loadaddr
+ %v0 = load %T416, %T416* %loadaddr
; ASM: vldr
- %v1 = load %T416* %loadaddr2
+ %v1 = load %T416, %T416* %loadaddr2
; ASM: vldr
%r1 = sext %T416 %v0 to %T432
%r2 = sext %T416 %v1 to %T432
@@ -47,9 +47,9 @@ define void @ups1632(%T416* %loadaddr, %T416* %loadaddr2, %T432* %storeaddr) {
define void @upu1632(%T416* %loadaddr, %T416* %loadaddr2, %T432* %storeaddr) {
; COST: function 'upu1632':
- %v0 = load %T416* %loadaddr
+ %v0 = load %T416, %T416* %loadaddr
; ASM: vldr
- %v1 = load %T416* %loadaddr2
+ %v1 = load %T416, %T416* %loadaddr2
; ASM: vldr
%r1 = zext %T416 %v0 to %T432
%r2 = zext %T416 %v1 to %T432
@@ -64,9 +64,9 @@ define void @upu1632(%T416* %loadaddr, %T416* %loadaddr2, %T432* %storeaddr) {
define void @ups3264(%T232* %loadaddr, %T232* %loadaddr2, %T264* %storeaddr) {
; COST: function 'ups3264':
- %v0 = load %T232* %loadaddr
+ %v0 = load %T232, %T232* %loadaddr
; ASM: vldr
- %v1 = load %T232* %loadaddr2
+ %v1 = load %T232, %T232* %loadaddr2
; ASM: vldr
%r3 = mul %T232 %v0, %v1
; ASM: vmul.i32
@@ -81,9 +81,9 @@ define void @ups3264(%T232* %loadaddr, %T232* %loadaddr2, %T264* %storeaddr) {
define void @upu3264(%T232* %loadaddr, %T232* %loadaddr2, %T264* %storeaddr) {
; COST: function 'upu3264':
- %v0 = load %T232* %loadaddr
+ %v0 = load %T232, %T232* %loadaddr
; ASM: vldr
- %v1 = load %T232* %loadaddr2
+ %v1 = load %T232, %T232* %loadaddr2
; ASM: vldr
%r3 = mul %T232 %v0, %v1
; ASM: vmul.i32
@@ -98,9 +98,9 @@ define void @upu3264(%T232* %loadaddr, %T232* %loadaddr2, %T264* %storeaddr) {
define void @dn3216(%T432* %loadaddr, %T432* %loadaddr2, %T416* %storeaddr) {
; COST: function 'dn3216':
- %v0 = load %T432* %loadaddr
+ %v0 = load %T432, %T432* %loadaddr
; ASM: vld1.64
- %v1 = load %T432* %loadaddr2
+ %v1 = load %T432, %T432* %loadaddr2
; ASM: vld1.64
%r3 = mul %T432 %v0, %v1
; ASM: vmul.i32
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/width-detect.ll b/llvm/test/Transforms/LoopVectorize/ARM/width-detect.ll
index f970e922810..66d2556dfb8 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/width-detect.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/width-detect.ll
@@ -14,7 +14,7 @@ define float @foo_F32(float* nocapture %A, i32 %n) nounwind uwtable readonly ssp
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%prod.01 = phi float [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
%2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
- %3 = load float* %2, align 8
+ %3 = load float, float* %2, align 8
%4 = fmul fast float %prod.01, %3
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -37,7 +37,7 @@ define signext i8 @foo_I8(i8* nocapture %A, i32 %n) nounwind uwtable readonly ss
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%red.01 = phi i8 [ %4, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv
- %3 = load i8* %2, align 1
+ %3 = load i8, i8* %2, align 1
%4 = xor i8 %3, %red.01
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll
index bc043475a64..2898af2986d 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll
@@ -30,7 +30,7 @@ for.body: ; preds = %for.body, %for.body
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%redx.05 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds double, double* %arr, i64 %indvars.iv
- %1 = load double* %arrayidx, align 8
+ %1 = load double, double* %arrayidx, align 8
%add = fadd fast double %1, %redx.05
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv to i32
diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
index 27a11028b5b..65b3919585e 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
@@ -9,7 +9,7 @@ target triple = "powerpc64-unknown-linux-gnu"
define signext i32 @s173() #0 {
entry:
- %0 = load i32* @ntimes, align 4
+ %0 = load i32, i32* @ntimes, align 4
%cmp21 = icmp sgt i32 %0, 0
br i1 %cmp21, label %for.cond1.preheader, label %for.end12
@@ -20,9 +20,9 @@ for.cond1.preheader: ; preds = %for.end, %entry
for.body3: ; preds = %for.body3, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
%arrayidx = getelementptr inbounds %struct.GlobalData, %struct.GlobalData* @global_data, i64 0, i32 0, i64 %indvars.iv
- %1 = load float* %arrayidx, align 4
+ %1 = load float, float* %arrayidx, align 4
%arrayidx5 = getelementptr inbounds %struct.GlobalData, %struct.GlobalData* @global_data, i64 0, i32 3, i64 %indvars.iv
- %2 = load float* %arrayidx5, align 4
+ %2 = load float, float* %arrayidx5, align 4
%add = fadd float %1, %2
%3 = add nsw i64 %indvars.iv, 16000
%arrayidx8 = getelementptr inbounds %struct.GlobalData, %struct.GlobalData* @global_data, i64 0, i32 0, i64 %3
@@ -33,7 +33,7 @@ for.body3: ; preds = %for.body3, %for.con
for.end: ; preds = %for.body3
%inc11 = add nsw i32 %nl.022, 1
- %4 = load i32* @ntimes, align 4
+ %4 = load i32, i32* @ntimes, align 4
%mul = mul nsw i32 %4, 10
%cmp = icmp slt i32 %inc11, %mul
br i1 %cmp, label %for.cond1.preheader, label %for.end12
@@ -42,7 +42,7 @@ for.end12: ; preds = %for.end, %entry
ret i32 0
; CHECK-LABEL: @s173
-; CHECK: load <4 x float>*
+; CHECK: load <4 x float>, <4 x float>*
; CHECK: add i64 %index, 16000
; CHECK: ret i32 0
}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/already-vectorized.ll b/llvm/test/Transforms/LoopVectorize/X86/already-vectorized.ll
index dcf2c6e70c4..248d6dc90bd 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/already-vectorized.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/already-vectorized.ll
@@ -22,7 +22,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%red.05 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds [255 x i32], [255 x i32]* @a, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %red.05
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 255
diff --git a/llvm/test/Transforms/LoopVectorize/X86/assume.ll b/llvm/test/Transforms/LoopVectorize/X86/assume.ll
index c036bba07ce..4fd378d1a0a 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/assume.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/assume.ll
@@ -23,7 +23,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp1 = fcmp ogt float %0, 1.000000e+02
tail call void @llvm.assume(i1 %cmp1)
%add = fadd float %0, 1.000000e+00
@@ -49,12 +49,12 @@ attributes #1 = { nounwind }
define void @test2(%struct.data* nocapture readonly %d) #0 {
entry:
%b = getelementptr inbounds %struct.data, %struct.data* %d, i64 0, i32 1
- %0 = load float** %b, align 8
+ %0 = load float*, float** %b, align 8
%ptrint = ptrtoint float* %0 to i64
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
%a = getelementptr inbounds %struct.data, %struct.data* %d, i64 0, i32 0
- %1 = load float** %a, align 8
+ %1 = load float*, float** %a, align 8
%ptrint2 = ptrtoint float* %1 to i64
%maskedptr3 = and i64 %ptrint2, 31
%maskcond4 = icmp eq i64 %maskedptr3, 0
@@ -85,7 +85,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
tail call void @llvm.assume(i1 %maskcond)
%arrayidx = getelementptr inbounds float, float* %0, i64 %indvars.iv
- %2 = load float* %arrayidx, align 4
+ %2 = load float, float* %arrayidx, align 4
%add = fadd float %2, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond4)
%arrayidx5 = getelementptr inbounds float, float* %1, i64 %indvars.iv
diff --git a/llvm/test/Transforms/LoopVectorize/X86/avx1.ll b/llvm/test/Transforms/LoopVectorize/X86/avx1.ll
index 95088df5c3f..37977c43ac3 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/avx1.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/avx1.ll
@@ -13,7 +13,7 @@ define i32 @read_mod_write_single_ptr(float* nocapture %a, i32 %n) nounwind uwta
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
- %3 = load float* %2, align 4
+ %3 = load float, float* %2, align 4
%4 = fmul float %3, 3.000000e+00
store float %4, float* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -36,7 +36,7 @@ define i32 @read_mod_i64(i64* nocapture %a, i32 %n) nounwind uwtable ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i64, i64* %a, i64 %indvars.iv
- %3 = load i64* %2, align 4
+ %3 = load i64, i64* %2, align 4
%4 = add i64 %3, 3
store i64 %4, i64* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll b/llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
index 106bd84fc7c..d75b1d940ae 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
@@ -16,7 +16,7 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%shl = ashr i32 %0, 3
%arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
store i32 %shl, i32* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
index 190e130b609..013657102e6 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
@@ -22,12 +22,12 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = shl nsw i64 %indvars.iv, 1
%arrayidx = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %0
- %1 = load i32* %arrayidx, align 8
+ %1 = load i32, i32* %arrayidx, align 8
%idxprom1 = sext i32 %1 to i64
%arrayidx2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %idxprom1
- %2 = load i32* %arrayidx2, align 4
+ %2 = load i32, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @d, i64 0, i64 %indvars.iv
- %3 = load i32* %arrayidx4, align 4
+ %3 = load i32, i32* %arrayidx4, align 4
%idxprom5 = sext i32 %3 to i64
%arrayidx6 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %idxprom5
store i32 %2, i32* %arrayidx6, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll
index 627ae0076b0..4a56d6b5ebf 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll
@@ -21,7 +21,7 @@ for.body.preheader: ; preds = %entry
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds [10000 x float], [10000 x float]* @float_array, i64 0, i64 %indvars.iv
- %1 = load float* %arrayidx, align 4
+ %1 = load float, float* %arrayidx, align 4
%conv = fptoui float %1 to i32
%arrayidx2 = getelementptr inbounds [10000 x i32], [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
store i32 %conv, i32* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
index 7c1dfe39236..c066afcfa63 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
@@ -13,7 +13,7 @@ target triple = "x86_64-apple-macosx"
define void @convert() {
entry:
- %0 = load i32* @n, align 4
+ %0 = load i32, i32* @n, align 4
%cmp4 = icmp eq i32 %0, 0
br i1 %cmp4, label %for.end, label %for.body.preheader
@@ -23,7 +23,7 @@ for.body.preheader: ; preds = %entry
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds [10000 x double], [10000 x double]* @double_array, i64 0, i64 %indvars.iv
- %1 = load double* %arrayidx, align 8
+ %1 = load double, double* %arrayidx, align 8
%conv = fptoui double %1 to i32
%arrayidx2 = getelementptr inbounds [10000 x i32], [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
store i32 %conv, i32* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll
index 106c9d66623..b3a0710545d 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll
@@ -12,7 +12,7 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
- %tmp = load float* %arrayidx, align 4
+ %tmp = load float, float* %arrayidx, align 4
%conv = fptosi float %tmp to i8
%arrayidx2 = getelementptr inbounds i8, i8* %a, i64 %indvars.iv
store i8 %conv, i8* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/gather-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/gather-cost.ll
index 8c9cb653da0..f0e6c8f205c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/gather-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/gather-cost.ll
@@ -32,23 +32,23 @@ for.body:
%add = add i64 %v.055, %offset
%mul = mul i64 %add, 3
%arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %mul
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i64 0, i64 %v.055
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %1
%arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i64 0, i64 %v.055
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%mul5 = fmul fast float %mul3, %2
%arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i64 0, i64 %v.055
- %3 = load float* %arrayidx6, align 4
+ %3 = load float, float* %arrayidx6, align 4
%mul7 = fmul fast float %mul5, %3
%arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i64 0, i64 %v.055
- %4 = load float* %arrayidx8, align 4
+ %4 = load float, float* %arrayidx8, align 4
%mul9 = fmul fast float %mul7, %4
%add10 = fadd fast float %r.057, %mul9
%arrayidx.sum = add i64 %mul, 1
%arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
- %5 = load float* %arrayidx11, align 4
+ %5 = load float, float* %arrayidx11, align 4
%mul13 = fmul fast float %1, %5
%mul15 = fmul fast float %2, %mul13
%mul17 = fmul fast float %3, %mul15
@@ -56,7 +56,7 @@ for.body:
%add20 = fadd fast float %g.056, %mul19
%arrayidx.sum52 = add i64 %mul, 2
%arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
- %6 = load float* %arrayidx21, align 4
+ %6 = load float, float* %arrayidx21, align 4
%mul23 = fmul fast float %1, %6
%mul25 = fmul fast float %2, %mul23
%mul27 = fmul fast float %3, %mul25
diff --git a/llvm/test/Transforms/LoopVectorize/X86/gcc-examples.ll b/llvm/test/Transforms/LoopVectorize/X86/gcc-examples.ll
index eb846622262..c581f4bf2a6 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/gcc-examples.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/gcc-examples.ll
@@ -29,9 +29,9 @@ define void @example1() nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
@@ -62,7 +62,7 @@ define void @example10b(i16* noalias nocapture %sa, i16* noalias nocapture %sb,
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds i16, i16* %sb, i64 %indvars.iv
- %3 = load i16* %2, align 2
+ %3 = load i16, i16* %2, align 2
%4 = sext i16 %3 to i32
%5 = getelementptr inbounds i32, i32* %ia, i64 %indvars.iv
store i32 %4, i32* %5, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll b/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
index f2163b0bf81..cbba5300b9c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
@@ -15,7 +15,7 @@ entry:
for.end.us: ; preds = %for.body3.us
%arrayidx9.us = getelementptr inbounds i32, i32* %b, i64 %indvars.iv33
- %0 = load i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3
+ %0 = load i32, i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3
%add10.us = add nsw i32 %0, 3
store i32 %add10.us, i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next34 = add i64 %indvars.iv33, 1
@@ -29,7 +29,7 @@ for.body3.us: ; preds = %for.body3.us, %for.
%add4.us = add i32 %add.us, %1
%idxprom.us = sext i32 %add4.us to i64
%arrayidx.us = getelementptr inbounds i32, i32* %a, i64 %idxprom.us
- %2 = load i32* %arrayidx.us, align 4, !llvm.mem.parallel_loop_access !3
+ %2 = load i32, i32* %arrayidx.us, align 4, !llvm.mem.parallel_loop_access !3
%add5.us = add nsw i32 %2, 1
store i32 %add5.us, i32* %arrayidx7.us, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next30 = add i64 %indvars.iv29, 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll
index c3ee6f81a79..c3175b02036 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/masked_load_store.ll
@@ -46,34 +46,34 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 10000
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%idxprom = sext i32 %1 to i64
- %2 = load i32** %trigger.addr, align 8
+ %2 = load i32*, i32** %trigger.addr, align 8
%arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
- %3 = load i32* %arrayidx, align 4
+ %3 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %for.body
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
- %5 = load i32** %B.addr, align 8
+ %5 = load i32*, i32** %B.addr, align 8
%arrayidx3 = getelementptr inbounds i32, i32* %5, i64 %idxprom2
- %6 = load i32* %arrayidx3, align 4
- %7 = load i32* %i, align 4
+ %6 = load i32, i32* %arrayidx3, align 4
+ %7 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
- %8 = load i32** %trigger.addr, align 8
+ %8 = load i32*, i32** %trigger.addr, align 8
%arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
- %9 = load i32* %arrayidx5, align 4
+ %9 = load i32, i32* %arrayidx5, align 4
%add = add nsw i32 %6, %9
- %10 = load i32* %i, align 4
+ %10 = load i32, i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
- %11 = load i32** %A.addr, align 8
+ %11 = load i32*, i32** %A.addr, align 8
%arrayidx7 = getelementptr inbounds i32, i32* %11, i64 %idxprom6
store i32 %add, i32* %arrayidx7, align 4
br label %if.end
@@ -82,7 +82,7 @@ if.end: ; preds = %if.then, %for.body
br label %for.inc
for.inc: ; preds = %if.end
- %12 = load i32* %i, align 4
+ %12 = load i32, i32* %i, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
@@ -130,35 +130,35 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 10000
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%idxprom = sext i32 %1 to i64
- %2 = load i32** %trigger.addr, align 8
+ %2 = load i32*, i32** %trigger.addr, align 8
%arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
- %3 = load i32* %arrayidx, align 4
+ %3 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %for.body
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
- %5 = load float** %B.addr, align 8
+ %5 = load float*, float** %B.addr, align 8
%arrayidx3 = getelementptr inbounds float, float* %5, i64 %idxprom2
- %6 = load float* %arrayidx3, align 4
- %7 = load i32* %i, align 4
+ %6 = load float, float* %arrayidx3, align 4
+ %7 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
- %8 = load i32** %trigger.addr, align 8
+ %8 = load i32*, i32** %trigger.addr, align 8
%arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
- %9 = load i32* %arrayidx5, align 4
+ %9 = load i32, i32* %arrayidx5, align 4
%conv = sitofp i32 %9 to float
%add = fadd float %6, %conv
- %10 = load i32* %i, align 4
+ %10 = load i32, i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
- %11 = load float** %A.addr, align 8
+ %11 = load float*, float** %A.addr, align 8
%arrayidx7 = getelementptr inbounds float, float* %11, i64 %idxprom6
store float %add, float* %arrayidx7, align 4
br label %if.end
@@ -167,7 +167,7 @@ if.end: ; preds = %if.then, %for.body
br label %for.inc
for.inc: ; preds = %if.end
- %12 = load i32* %i, align 4
+ %12 = load i32, i32* %i, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
@@ -218,35 +218,35 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 10000
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%idxprom = sext i32 %1 to i64
- %2 = load i32** %trigger.addr, align 8
+ %2 = load i32*, i32** %trigger.addr, align 8
%arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
- %3 = load i32* %arrayidx, align 4
+ %3 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %for.body
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
- %5 = load double** %B.addr, align 8
+ %5 = load double*, double** %B.addr, align 8
%arrayidx3 = getelementptr inbounds double, double* %5, i64 %idxprom2
- %6 = load double* %arrayidx3, align 8
- %7 = load i32* %i, align 4
+ %6 = load double, double* %arrayidx3, align 8
+ %7 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
- %8 = load i32** %trigger.addr, align 8
+ %8 = load i32*, i32** %trigger.addr, align 8
%arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
- %9 = load i32* %arrayidx5, align 4
+ %9 = load i32, i32* %arrayidx5, align 4
%conv = sitofp i32 %9 to double
%add = fadd double %6, %conv
- %10 = load i32* %i, align 4
+ %10 = load i32, i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
- %11 = load double** %A.addr, align 8
+ %11 = load double*, double** %A.addr, align 8
%arrayidx7 = getelementptr inbounds double, double* %11, i64 %idxprom6
store double %add, double* %arrayidx7, align 8
br label %if.end
@@ -255,7 +255,7 @@ if.end: ; preds = %if.then, %for.body
br label %for.inc
for.inc: ; preds = %if.end
- %12 = load i32* %i, align 4
+ %12 = load i32, i32* %i, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
@@ -297,36 +297,36 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 10000
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%idxprom = sext i32 %1 to i64
- %2 = load i32** %trigger.addr, align 8
+ %2 = load i32*, i32** %trigger.addr, align 8
%arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
- %3 = load i32* %arrayidx, align 4
+ %3 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %for.body
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%mul = mul nsw i32 %4, 2
%idxprom2 = sext i32 %mul to i64
- %5 = load double** %B.addr, align 8
+ %5 = load double*, double** %B.addr, align 8
%arrayidx3 = getelementptr inbounds double, double* %5, i64 %idxprom2
- %6 = load double* %arrayidx3, align 8
- %7 = load i32* %i, align 4
+ %6 = load double, double* %arrayidx3, align 8
+ %7 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
- %8 = load i32** %trigger.addr, align 8
+ %8 = load i32*, i32** %trigger.addr, align 8
%arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
- %9 = load i32* %arrayidx5, align 4
+ %9 = load i32, i32* %arrayidx5, align 4
%conv = sitofp i32 %9 to double
%add = fadd double %6, %conv
- %10 = load i32* %i, align 4
+ %10 = load i32, i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
- %11 = load double** %A.addr, align 8
+ %11 = load double*, double** %A.addr, align 8
%arrayidx7 = getelementptr inbounds double, double* %11, i64 %idxprom6
store double %add, double* %arrayidx7, align 8
br label %if.end
@@ -335,7 +335,7 @@ if.end: ; preds = %if.then, %for.body
br label %for.inc
for.inc: ; preds = %if.end
- %12 = load i32* %i, align 4
+ %12 = load i32, i32* %i, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
@@ -373,34 +373,34 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 10000
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%idxprom = sext i32 %1 to i64
- %2 = load i32** %trigger.addr, align 8
+ %2 = load i32*, i32** %trigger.addr, align 8
%arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
- %3 = load i32* %arrayidx, align 4
+ %3 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %for.body
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
- %5 = load i32** %B.addr, align 8
+ %5 = load i32*, i32** %B.addr, align 8
%arrayidx3 = getelementptr inbounds i32, i32* %5, i64 %idxprom2
- %6 = load i32* %arrayidx3, align 4
- %7 = load i32* %i, align 4
+ %6 = load i32, i32* %arrayidx3, align 4
+ %7 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
- %8 = load i32** %trigger.addr, align 8
+ %8 = load i32*, i32** %trigger.addr, align 8
%arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
- %9 = load i32* %arrayidx5, align 4
+ %9 = load i32, i32* %arrayidx5, align 4
%add = add nsw i32 %6, %9
- %10 = load i32* %i, align 4
+ %10 = load i32, i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
- %11 = load i32** %A.addr, align 8
+ %11 = load i32*, i32** %A.addr, align 8
%arrayidx7 = getelementptr inbounds i32, i32* %11, i64 %idxprom6
store i32 sdiv (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*]* @a, i64 0, i64 1), i32** @c) to i32)), i32* %arrayidx7, align 4
br label %if.end
@@ -409,7 +409,7 @@ if.end: ; preds = %if.then, %for.body
br label %for.inc
for.inc: ; preds = %if.end
- %12 = load i32* %i, align 4
+ %12 = load i32, i32* %i, align 4
%inc = add nsw i32 %12, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
@@ -459,29 +459,29 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp sge i32 %0, 0
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%idxprom = sext i32 %1 to i64
- %2 = load i32** %trigger.addr, align 8
+ %2 = load i32*, i32** %trigger.addr, align 8
%arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
- %3 = load i32* %arrayidx, align 4
+ %3 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %3, 0
br i1 %cmp1, label %if.then, label %if.end
if.then: ; preds = %for.body
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
- %5 = load double** %in.addr, align 8
+ %5 = load double*, double** %in.addr, align 8
%arrayidx3 = getelementptr inbounds double, double* %5, i64 %idxprom2
- %6 = load double* %arrayidx3, align 8
+ %6 = load double, double* %arrayidx3, align 8
%add = fadd double %6, 5.000000e-01
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
- %8 = load double** %out.addr, align 8
+ %8 = load double*, double** %out.addr, align 8
%arrayidx5 = getelementptr inbounds double, double* %8, i64 %idxprom4
store double %add, double* %arrayidx5, align 8
br label %if.end
@@ -490,7 +490,7 @@ if.end: ; preds = %if.then, %for.body
br label %for.inc
for.inc: ; preds = %if.end
- %9 = load i32* %i, align 4
+ %9 = load i32, i32* %i, align 4
%dec = add nsw i32 %9, -1
store i32 %dec, i32* %i, align 4
br label %for.cond
diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
index bdce3ad3d04..ba8e11e5874 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll
@@ -55,7 +55,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %N
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %add, i32* %arrayidx2, align 4
@@ -64,7 +64,7 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0
for.end: ; preds = %for.body
- %1 = load i32* %a, align 4
+ %1 = load i32, i32* %a, align 4
ret i32 %1
}
@@ -106,7 +106,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %N
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %add, i32* %arrayidx2, align 4
@@ -115,7 +115,7 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
- %1 = load i32* %a, align 4
+ %1 = load i32, i32* %a, align 4
ret i32 %1
}
@@ -157,7 +157,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %N
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %add, i32* %arrayidx2, align 4
@@ -166,7 +166,7 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !2
for.end: ; preds = %for.body
- %1 = load i32* %a, align 4
+ %1 = load i32, i32* %a, align 4
ret i32 %1
}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll b/llvm/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
index 3207025f167..bb972c4488a 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
@@ -11,7 +11,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%add = fadd float %0, 1.000000e+00
store float %add, float* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/no-vector.ll b/llvm/test/Transforms/LoopVectorize/X86/no-vector.ll
index 1ac18f27f83..4b464b01267 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/no-vector.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/no-vector.ll
@@ -9,7 +9,7 @@ for.body: ; preds = %entry, %for.body
%i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%r.05 = phi i32 [ %xor, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i8, i8* %s, i32 %i.06
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
%xor = xor i32 %conv, %r.05
%inc = add nsw i32 %i.06, 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
index a7f636290ba..631361c9d47 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
@@ -17,28 +17,28 @@ entry:
br label %for.body
for.body: ; preds = %for.body.for.body_crit_edge, %entry
- %indvars.iv.reload = load i64* %indvars.iv.reg2mem
+ %indvars.iv.reload = load i64, i64* %indvars.iv.reg2mem
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.reload
- %0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %0 = load i32, i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.reload
- %1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %1 = load i32, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next = add i64 %indvars.iv.reload, 1
; A new store without the parallel metadata here:
store i64 %indvars.iv.next, i64* %indvars.iv.next.reg2mem
- %indvars.iv.next.reload1 = load i64* %indvars.iv.next.reg2mem
+ %indvars.iv.next.reload1 = load i64, i64* %indvars.iv.next.reg2mem
%arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next.reload1
- %2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
+ %2 = load i32, i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
- %indvars.iv.next.reload = load i64* %indvars.iv.next.reg2mem
+ %indvars.iv.next.reload = load i64, i64* %indvars.iv.next.reg2mem
%lftr.wideiv = trunc i64 %indvars.iv.next.reload to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
br i1 %exitcond, label %for.end, label %for.body.for.body_crit_edge, !llvm.loop !3
for.body.for.body_crit_edge: ; preds = %for.body
- %indvars.iv.next.reload2 = load i64* %indvars.iv.next.reg2mem
+ %indvars.iv.next.reload2 = load i64, i64* %indvars.iv.next.reg2mem
store i64 %indvars.iv.next.reload2, i64* %indvars.iv.reg2mem
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
index 83bed435de4..53061edc35c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
@@ -21,15 +21,15 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
store i32 %0, i32* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
- %2 = load i32* %arrayidx6, align 4
+ %2 = load i32, i32* %arrayidx6, align 4
store i32 %2, i32* %arrayidx2, align 4
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
@@ -51,9 +51,9 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %0 = load i32, i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %1 = load i32, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
; This store might have originated from inlining a function with a parallel
@@ -61,7 +61,7 @@ for.body: ; preds = %for.body, %entry
store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !5
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
- %2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
+ %2 = load i32, i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
@@ -84,9 +84,9 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !6
+ %0 = load i32, i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !6
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
+ %1 = load i32, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
; This refers to the loop marked with !7 which we are not in at the moment.
@@ -94,7 +94,7 @@ for.body: ; preds = %for.body, %entry
store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !7
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
- %2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !6
+ %2 = load i32, i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !6
store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
diff --git a/llvm/test/Transforms/LoopVectorize/X86/powof2div.ll b/llvm/test/Transforms/LoopVectorize/X86/powof2div.ll
index af3cfe06ef9..6bc738a7d14 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/powof2div.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/powof2div.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu"
@Foo = common global %struct.anon zeroinitializer, align 4
;CHECK-LABEL: @foo(
-;CHECK: load <4 x i32>*
+;CHECK: load <4 x i32>, <4 x i32>*
;CHECK: sdiv <4 x i32>
;CHECK: store <4 x i32>
@@ -18,7 +18,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 2, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%div = sdiv i32 %0, 2
%arrayidx2 = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 0, i64 %indvars.iv
store i32 %div, i32* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/reduction-crash.ll b/llvm/test/Transforms/LoopVectorize/X86/reduction-crash.ll
index ce2aa8963e2..3741b95d985 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/reduction-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/reduction-crash.ll
@@ -14,14 +14,14 @@ bb:
br label %bb2
bb2: ; preds = %bb
- %tmp = load double* null, align 8
+ %tmp = load double, double* null, align 8
br i1 undef, label %bb3, label %bb12
bb3: ; preds = %bb3, %bb2
%tmp4 = phi double [ %tmp9, %bb3 ], [ %tmp, %bb2 ]
%tmp5 = phi i32 [ %tmp8, %bb3 ], [ 0, %bb2 ]
%tmp6 = getelementptr inbounds [16 x double], [16 x double]* undef, i32 0, i32 %tmp5
- %tmp7 = load double* %tmp6, align 4
+ %tmp7 = load double, double* %tmp6, align 4
%tmp8 = add nsw i32 %tmp5, 1
%tmp9 = fadd fast double %tmp4, undef
%tmp10 = getelementptr inbounds float, float* %arg, i32 %tmp5
diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
index 38e3c086347..47c262b11b4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll
@@ -31,9 +31,9 @@ define void @example1() optsize {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
@@ -77,9 +77,9 @@ define void @example2(i32 %n, i32 %x) optsize {
%.02 = phi i32 [ %4, %.lr.ph ], [ %n, %.preheader ]
%4 = add nsw i32 %.02, -1
%5 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %6 = load i32* %5, align 4
+ %6 = load i32, i32* %5, align 4
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %8 = load i32* %7, align 4
+ %8 = load i32, i32* %7, align 4
%9 = and i32 %8, %6
%10 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %9, i32* %10, align 4
@@ -105,7 +105,7 @@ define void @example3(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
%.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
%2 = add nsw i32 %.05, -1
%3 = getelementptr inbounds i32, i32* %.023, i64 1
- %4 = load i32* %.023, align 16
+ %4 = load i32, i32* %.023, align 16
%5 = getelementptr inbounds i32, i32* %.014, i64 1
store i32 %4, i32* %.014, align 16
%6 = icmp eq i32 %2, 0
@@ -129,7 +129,7 @@ define void @example4(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
%.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
%2 = add nsw i32 %.05, -1
%3 = getelementptr inbounds i32, i32* %.023, i64 1
- %4 = load i32* %.023, align 16
+ %4 = load i32, i32* %.023, align 16
%5 = getelementptr inbounds i32, i32* %.014, i64 1
store i32 %4, i32* %.014, align 16
%6 = icmp eq i32 %2, 0
@@ -153,7 +153,7 @@ define void @example23(i16* nocapture %src, i32* nocapture %dst) optsize {
%.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
%i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
%2 = getelementptr inbounds i16, i16* %.04, i64 1
- %3 = load i16* %.04, align 2
+ %3 = load i16, i16* %.04, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw nsw i32 %4, 7
%6 = getelementptr inbounds i32, i32* %.013, i64 1
@@ -179,7 +179,7 @@ define void @example23b(i16* noalias nocapture %src, i32* noalias nocapture %dst
%.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
%i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
%2 = getelementptr inbounds i16, i16* %.04, i64 1
- %3 = load i16* %.04, align 2
+ %3 = load i16, i16* %.04, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw nsw i32 %4, 7
%6 = getelementptr inbounds i32, i32* %.013, i64 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/tripcount.ll b/llvm/test/Transforms/LoopVectorize/X86/tripcount.ll
index 9984e2d04e8..c0bbb92c2c5 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/tripcount.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/tripcount.ll
@@ -23,7 +23,7 @@ for.body.preheader:
for.body:
%i.07 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @big, i32 0, i32 %i.07
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%neg = xor i32 %0, -1
store i32 %neg, i32* %arrayidx, align 4
%inc = add nsw i32 %i.07, 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll
index 40421d4cff0..38af11c443d 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll
@@ -13,7 +13,7 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i64, i64* %a, i64 %indvars.iv
- %tmp = load i64* %arrayidx, align 4
+ %tmp = load i64, i64* %arrayidx, align 4
%conv = uitofp i64 %tmp to double
%arrayidx2 = getelementptr inbounds double, double* %b, i64 %indvars.iv
store double %conv, double* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/unroll-pm.ll b/llvm/test/Transforms/LoopVectorize/X86/unroll-pm.ll
index 50fdf187499..52914b6a7c6 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/unroll-pm.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/unroll-pm.ll
@@ -18,7 +18,7 @@ define i32 @bar(i32* nocapture %A, i32 %n) nounwind uwtable ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = add nsw i32 %3, 6
store i32 %4, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll b/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
index eff6ca49edc..4411da3f0a9 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
@@ -16,8 +16,8 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK-VECTOR: ret
;
; CHECK-SCALAR-LABEL: @foo(
-; CHECK-SCALAR: load i32*
-; CHECK-SCALAR-NOT: load i32*
+; CHECK-SCALAR: load i32, i32*
+; CHECK-SCALAR-NOT: load i32, i32*
; CHECK-SCALAR: store i32
; CHECK-SCALAR-NOT: store i32
; CHECK-SCALAR: ret
@@ -27,7 +27,7 @@ define i32 @foo(i32* nocapture %A) nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = add nsw i32 %3, 6
store i32 %4, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -58,7 +58,7 @@ define i32 @bar(i32* nocapture %A, i32 %n) nounwind uwtable ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = add nsw i32 %3, 6
store i32 %4, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -87,7 +87,7 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%mul = fmul float %0, %N
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
store float %mul, float* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/unroll_selection.ll b/llvm/test/Transforms/LoopVectorize/X86/unroll_selection.ll
index 360d66d3571..71b829071e2 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/unroll_selection.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/unroll_selection.ll
@@ -17,7 +17,7 @@ define void @reg_pressure(double* nocapture %A, i32 %n) nounwind uwtable ssp {
; <label>:2 ; preds = %2, %0
%indvars.iv = phi i64 [ %indvars.iv.next, %2 ], [ %1, %0 ]
%3 = getelementptr inbounds double, double* %A, i64 %indvars.iv
- %4 = load double* %3, align 8
+ %4 = load double, double* %3, align 8
%5 = fadd double %4, 3.000000e+00
%6 = fmul double %4, 2.000000e+00
%7 = fadd double %5, %6
@@ -59,7 +59,7 @@ define void @small_loop(i16* nocapture %A, i64 %n) nounwind uwtable ssp {
.lr.ph: ; preds = %0, %.lr.ph
%i.01 = phi i64 [ %5, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i16, i16* %A, i64 %i.01
- %3 = load i16* %2, align 2
+ %3 = load i16, i16* %2, align 2
%4 = xor i16 %3, 3
store i16 %4, i16* %2, align 2
%5 = add i64 %i.01, 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
index e6a0d475161..10307568b85 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
@@ -36,7 +36,7 @@ for.body.preheader:
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
%call = tail call float @llvm.sin.f32(float %0)
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
@@ -70,7 +70,7 @@ for.body.preheader:
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
%call = tail call float @llvm.sin.f32(float %0)
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
index f4bc1600c3c..8d139ac7e5a 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
@@ -30,9 +30,9 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %1 = load float, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
%add = fadd fast float %0, %1
store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -56,9 +56,9 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
+ %0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
+ %1 = load float, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%add = fadd fast float %0, %1
store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll
index 86cebf342a6..5efabe16d32 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll
@@ -16,9 +16,9 @@ define void @scalarselect(i1 %cond) {
; <label>:1
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
@@ -43,9 +43,9 @@ define void @vectorselect(i1 %cond) {
; <label>:1
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%8 = icmp ult i64 %indvars.iv, 8
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll b/llvm/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
index 048c2603bbd..6cd3c9c3bc0 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
@@ -19,7 +19,7 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: test_consecutive_store
; CHECK: The Widest type: 64 bits
define void @test_consecutive_store(%0**, %0**, %0** nocapture) nounwind ssp uwtable align 2 {
- %4 = load %0** %2, align 8
+ %4 = load %0*, %0** %2, align 8
%5 = icmp eq %0** %0, %1
br i1 %5, label %12, label %6
@@ -62,7 +62,7 @@ define void @test_nonconsecutive_store() nounwind ssp uwtable {
; <label>:3 ; preds = %3, %1
%4 = phi i64 [ 0, %1 ], [ %11, %3 ]
%5 = getelementptr inbounds [2048 x i16], [2048 x i16]* @q, i64 0, i64 %4
- %6 = load i16* %5, align 2
+ %6 = load i16, i16* %5, align 2
%7 = sext i16 %6 to i64
%8 = add i64 %7, 1
%9 = inttoptr i64 %8 to i32*
@@ -101,7 +101,7 @@ define i8 @test_consecutive_ptr_load() nounwind readonly ssp uwtable {
%2 = phi i64 [ 0, %0 ], [ %10, %1 ]
%3 = phi i8 [ 0, %0 ], [ %9, %1 ]
%4 = getelementptr inbounds [1024 x i32*], [1024 x i32*]* @ia, i32 0, i64 %2
- %5 = load i32** %4, align 4
+ %5 = load i32*, i32** %4, align 4
%6 = ptrtoint i32* %5 to i64
%7 = trunc i64 %6 to i8
%8 = add i8 %3, 1
@@ -129,7 +129,7 @@ define void @test_nonconsecutive_ptr_load() nounwind ssp uwtable {
%4 = phi i64 [ 0, %1 ], [ %10, %3 ]
%5 = getelementptr inbounds [2048 x [8 x i32*]], [2048 x [8 x i32*]]* @p2, i64 0, i64 %4, i64 %2
%6 = getelementptr inbounds [2048 x i16], [2048 x i16]* @q2, i64 0, i64 %4
- %7 = load i32** %5, align 2
+ %7 = load i32*, i32** %5, align 2
%8 = ptrtoint i32* %7 to i64
%9 = trunc i64 %8 to i16
store i16 %9, i16* %6, align 8
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
index 92eba528c0d..2ab0ee3ea30 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
@@ -98,10 +98,10 @@ for.body.preheader: ; preds = %entry
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv, !dbg !35
- %0 = load i32* %arrayidx, align 4, !dbg !35, !tbaa !18
+ %0 = load i32, i32* %arrayidx, align 4, !dbg !35, !tbaa !18
%idxprom1 = sext i32 %0 to i64, !dbg !35
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1, !dbg !35
- %1 = load i32* %arrayidx2, align 4, !dbg !35, !tbaa !18
+ %1 = load i32, i32* %arrayidx2, align 4, !dbg !35, !tbaa !18
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !35
store i32 %1, i32* %arrayidx4, align 4, !dbg !35, !tbaa !18
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !32
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll
index cf64283b248..10e27c10cc0 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll
@@ -27,10 +27,10 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%add8 = phi i32 [ 0, %entry ], [ %add, %for.body ], !dbg !19
%arrayidx = getelementptr inbounds [16 x i8], [16 x i8]* %cb, i64 0, i64 %indvars.iv, !dbg !19
- %0 = load i8* %arrayidx, align 1, !dbg !19, !tbaa !21
+ %0 = load i8, i8* %arrayidx, align 1, !dbg !19, !tbaa !21
%conv = sext i8 %0 to i32, !dbg !19
%arrayidx2 = getelementptr inbounds [16 x i8], [16 x i8]* %cc, i64 0, i64 %indvars.iv, !dbg !19
- %1 = load i8* %arrayidx2, align 1, !dbg !19, !tbaa !21
+ %1 = load i8, i8* %arrayidx2, align 1, !dbg !19, !tbaa !21
%conv3 = sext i8 %1 to i32, !dbg !19
%sub = sub i32 %conv, %conv3, !dbg !19
%add = add nsw i32 %sub, %add8, !dbg !19
diff --git a/llvm/test/Transforms/LoopVectorize/align.ll b/llvm/test/Transforms/LoopVectorize/align.ll
index f12dbde416d..7ee401deef4 100644
--- a/llvm/test/Transforms/LoopVectorize/align.ll
+++ b/llvm/test/Transforms/LoopVectorize/align.ll
@@ -6,8 +6,8 @@ target triple = "x86_64-apple-macosx10.8.0"
; Make sure we output the abi alignment if no alignment is specified.
;CHECK-LABEL: @align
-;CHECK: load <4 x i32>* {{.*}} align 4
-;CHECK: load <4 x i32>* {{.*}} align 4
+;CHECK: load <4 x i32>, <4 x i32>* {{.*}} align 4
+;CHECK: load <4 x i32>, <4 x i32>* {{.*}} align 4
;CHECK: store <4 x i32> {{.*}} align 4
define void @align(i32* %a, i32* %b, i32* %c) nounwind uwtable ssp {
@@ -16,9 +16,9 @@ define void @align(i32* %a, i32* %b, i32* %c) nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %3 = load i32* %2
+ %3 = load i32, i32* %2
%4 = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
- %5 = load i32* %4
+ %5 = load i32, i32* %4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %6, i32* %7
diff --git a/llvm/test/Transforms/LoopVectorize/bzip_reverse_loops.ll b/llvm/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
index 707b5b9fd32..f1efb256391 100644
--- a/llvm/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
@@ -17,7 +17,7 @@ do.body: ; preds = %cond.end, %entry
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %cond.end ]
%p.addr.0 = phi i16* [ %p, %entry ], [ %incdec.ptr, %cond.end ]
%incdec.ptr = getelementptr inbounds i16, i16* %p.addr.0, i64 -1
- %0 = load i16* %incdec.ptr, align 2
+ %0 = load i16, i16* %incdec.ptr, align 2
%conv = zext i16 %0 to i32
%cmp = icmp ult i32 %conv, %size
br i1 %cmp, label %cond.end, label %cond.true
@@ -52,7 +52,7 @@ do.body: ; preds = %do.body, %entry
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.body ]
%p.0 = phi i32* [ %a, %entry ], [ %incdec.ptr, %do.body ]
%incdec.ptr = getelementptr inbounds i32, i32* %p.0, i64 -1
- %0 = load i32* %incdec.ptr, align 4
+ %0 = load i32, i32* %incdec.ptr, align 4
%cmp = icmp slt i32 %0, %wsize
%sub = sub nsw i32 %0, %wsize
%cond = select i1 %cmp, i32 0, i32 %sub
diff --git a/llvm/test/Transforms/LoopVectorize/calloc.ll b/llvm/test/Transforms/LoopVectorize/calloc.ll
index 3ac3b7b6106..a41e51787f3 100644
--- a/llvm/test/Transforms/LoopVectorize/calloc.ll
+++ b/llvm/test/Transforms/LoopVectorize/calloc.ll
@@ -23,7 +23,7 @@ for.body: ; preds = %for.body, %for.body
%i.030 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%shr = lshr i64 %i.030, 1
%arrayidx = getelementptr inbounds i8, i8* %bytes, i64 %shr
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
%conv = zext i8 %1 to i32
%and = shl i64 %i.030, 2
%neg = and i64 %and, 4
diff --git a/llvm/test/Transforms/LoopVectorize/conditional-assignment.ll b/llvm/test/Transforms/LoopVectorize/conditional-assignment.ll
index 7c523201b61..15750aad6b8 100644
--- a/llvm/test/Transforms/LoopVectorize/conditional-assignment.ll
+++ b/llvm/test/Transforms/LoopVectorize/conditional-assignment.ll
@@ -13,7 +13,7 @@ entry:
for.body: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%arrayidx = getelementptr inbounds i32, i32* %indices, i64 %indvars.iv, !dbg !12
- %0 = load i32* %arrayidx, align 4, !dbg !12, !tbaa !14
+ %0 = load i32, i32* %arrayidx, align 4, !dbg !12, !tbaa !14
%cmp1 = icmp eq i32 %0, 1024, !dbg !12
br i1 %cmp1, label %if.then, label %for.inc, !dbg !12
diff --git a/llvm/test/Transforms/LoopVectorize/control-flow.ll b/llvm/test/Transforms/LoopVectorize/control-flow.ll
index ee73110cf6d..c95d55cef18 100644
--- a/llvm/test/Transforms/LoopVectorize/control-flow.ll
+++ b/llvm/test/Transforms/LoopVectorize/control-flow.ll
@@ -31,7 +31,7 @@ for.body.preheader: ; preds = %entry
for.body: ; preds = %for.body.preheader, %if.else
%indvars.iv = phi i64 [ %indvars.iv.next, %if.else ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !12
- %0 = load i32* %arrayidx, align 4, !dbg !12, !tbaa !15
+ %0 = load i32, i32* %arrayidx, align 4, !dbg !12, !tbaa !15
%cmp1 = icmp sgt i32 %0, 10, !dbg !12
br i1 %cmp1, label %end.loopexit, label %if.else, !dbg !12
diff --git a/llvm/test/Transforms/LoopVectorize/cpp-new-array.ll b/llvm/test/Transforms/LoopVectorize/cpp-new-array.ll
index cc41e5c47d8..22896d33428 100644
--- a/llvm/test/Transforms/LoopVectorize/cpp-new-array.ll
+++ b/llvm/test/Transforms/LoopVectorize/cpp-new-array.ll
@@ -25,10 +25,10 @@ for.body: ; preds = %entry, %for.body
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%idxprom = sext i32 %i.01 to i64
%arrayidx = getelementptr inbounds float, float* %0, i64 %idxprom
- %3 = load float* %arrayidx, align 4
+ %3 = load float, float* %arrayidx, align 4
%idxprom5 = sext i32 %i.01 to i64
%arrayidx6 = getelementptr inbounds float, float* %1, i64 %idxprom5
- %4 = load float* %arrayidx6, align 4
+ %4 = load float, float* %arrayidx6, align 4
%add = fadd float %3, %4
%idxprom7 = sext i32 %i.01 to i64
%arrayidx8 = getelementptr inbounds float, float* %2, i64 %idxprom7
@@ -38,7 +38,7 @@ for.body: ; preds = %entry, %for.body
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
- %5 = load float* %2, align 4
+ %5 = load float, float* %2, align 4
%conv10 = fptosi float %5 to i32
ret i32 %conv10
}
diff --git a/llvm/test/Transforms/LoopVectorize/dbg.value.ll b/llvm/test/Transforms/LoopVectorize/dbg.value.ll
index f3e75d84393..bf9a7967ecc 100644
--- a/llvm/test/Transforms/LoopVectorize/dbg.value.ll
+++ b/llvm/test/Transforms/LoopVectorize/dbg.value.ll
@@ -18,9 +18,9 @@ for.body:
;CHECK: load <4 x i32>
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv, !dbg !19
- %0 = load i32* %arrayidx, align 4, !dbg !19
+ %0 = load i32, i32* %arrayidx, align 4, !dbg !19
%arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @C, i64 0, i64 %indvars.iv, !dbg !19
- %1 = load i32* %arrayidx2, align 4, !dbg !19
+ %1 = load i32, i32* %arrayidx2, align 4, !dbg !19
%add = add nsw i32 %1, %0, !dbg !19
%arrayidx4 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv, !dbg !19
store i32 %add, i32* %arrayidx4, align 4, !dbg !19
diff --git a/llvm/test/Transforms/LoopVectorize/debugloc.ll b/llvm/test/Transforms/LoopVectorize/debugloc.ll
index a30ca62752b..c443ae482f0 100644
--- a/llvm/test/Transforms/LoopVectorize/debugloc.ll
+++ b/llvm/test/Transforms/LoopVectorize/debugloc.ll
@@ -9,7 +9,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
; CHECK: vector.body
; CHECK: index {{.*}}, !dbg ![[LOC]]
; CHECK: getelementptr inbounds i32, i32* %a, {{.*}}, !dbg ![[LOC2:[0-9]+]]
-; CHECK: load <2 x i32>* {{.*}}, !dbg ![[LOC2]]
+; CHECK: load <2 x i32>, <2 x i32>* {{.*}}, !dbg ![[LOC2]]
; CHECK: add <2 x i32> {{.*}}, !dbg ![[LOC2]]
; CHECK: add i64 %index, 2, !dbg ![[LOC]]
; CHECK: icmp eq i64 %index.next, %end.idx.rnd.down, !dbg ![[LOC]]
@@ -33,7 +33,7 @@ for.body: ; preds = %for.body.lr.ph, %fo
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%sum.05 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv, !dbg !22
- %0 = load i32* %arrayidx, align 4, !dbg !22
+ %0 = load i32, i32* %arrayidx, align 4, !dbg !22
%add = add i32 %0, %sum.05, !dbg !22
tail call void @llvm.dbg.value(metadata i32 %add.lcssa, i64 0, metadata !15, metadata !{}), !dbg !22
%indvars.iv.next = add i64 %indvars.iv, 1, !dbg !21
diff --git a/llvm/test/Transforms/LoopVectorize/duplicated-metadata.ll b/llvm/test/Transforms/LoopVectorize/duplicated-metadata.ll
index e82d2804207..9f7cdef51fe 100644
--- a/llvm/test/Transforms/LoopVectorize/duplicated-metadata.ll
+++ b/llvm/test/Transforms/LoopVectorize/duplicated-metadata.ll
@@ -13,7 +13,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
- %p = load float* %arrayidx, align 4
+ %p = load float, float* %arrayidx, align 4
%mul = fmul float %p, 2.000000e+00
store float %mul, float* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/ee-crash.ll b/llvm/test/Transforms/LoopVectorize/ee-crash.ll
index 7ed1c669b74..0c387346f86 100644
--- a/llvm/test/Transforms/LoopVectorize/ee-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/ee-crash.ll
@@ -18,7 +18,7 @@ entry:
for.body.i: ; preds = %entry, %for.body.i
%__init.addr.05.i = phi i32 [ %add.i, %for.body.i ], [ 0, %entry ]
%__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
- %0 = load i32* %__first.addr.04.i, align 4
+ %0 = load i32, i32* %__first.addr.04.i, align 4
%q1 = extractelement <2 x i32> %q, i32 %n
%q2 = add nsw i32 %0, %q1
%add.i = add nsw i32 %q2, %__init.addr.05.i
diff --git a/llvm/test/Transforms/LoopVectorize/exact.ll b/llvm/test/Transforms/LoopVectorize/exact.ll
index 90bad3a99c2..0a4e0dc17df 100644
--- a/llvm/test/Transforms/LoopVectorize/exact.ll
+++ b/llvm/test/Transforms/LoopVectorize/exact.ll
@@ -12,7 +12,7 @@ entry:
for.body:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %x, i64 %iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%conv1 = lshr exact i32 %0, 1
store i32 %conv1, i32* %arrayidx, align 4
%iv.next = add nuw nsw i64 %iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/flags.ll b/llvm/test/Transforms/LoopVectorize/flags.ll
index a4e392f01a4..fcbc8741741 100644
--- a/llvm/test/Transforms/LoopVectorize/flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/flags.ll
@@ -15,7 +15,7 @@ define i32 @flags1(i32 %n, i32* nocapture %A) nounwind uwtable ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = mul nsw i32 %3, 3
store i32 %4, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -40,7 +40,7 @@ define i32 @flags2(i32 %n, i32* nocapture %A) nounwind uwtable ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = mul i32 %3, 3
store i32 %4, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -67,7 +67,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%q.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds float, float* %s, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%add = fadd fast float %q.04, %0
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 256
diff --git a/llvm/test/Transforms/LoopVectorize/float-reduction.ll b/llvm/test/Transforms/LoopVectorize/float-reduction.ll
index 1401bd9d7c7..1310b279f61 100644
--- a/llvm/test/Transforms/LoopVectorize/float-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/float-reduction.ll
@@ -13,7 +13,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%add = fadd fast float %sum.04, %0
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -35,7 +35,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.04 = phi float [ 0.000000e+00, %entry ], [ %sub, %for.body ]
%arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%sub = fsub fast float %sum.04, %0
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopVectorize/funcall.ll b/llvm/test/Transforms/LoopVectorize/funcall.ll
index 81158eeb483..35c2dfca4b2 100644
--- a/llvm/test/Transforms/LoopVectorize/funcall.ll
+++ b/llvm/test/Transforms/LoopVectorize/funcall.ll
@@ -17,7 +17,7 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds double, double* %d, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%1 = tail call double @llvm.pow.f64(double %0, double %t)
store double %1, double* %arrayidx, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/gcc-examples.ll b/llvm/test/Transforms/LoopVectorize/gcc-examples.ll
index 9bc5cb78e03..18809018615 100644
--- a/llvm/test/Transforms/LoopVectorize/gcc-examples.ll
+++ b/llvm/test/Transforms/LoopVectorize/gcc-examples.ll
@@ -45,9 +45,9 @@ define void @example1() nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
@@ -96,9 +96,9 @@ define void @example2(i32 %n, i32 %x) nounwind uwtable ssp {
%.02 = phi i32 [ %4, %.lr.ph ], [ %n, %.preheader ]
%4 = add nsw i32 %.02, -1
%5 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %6 = load i32* %5, align 4
+ %6 = load i32, i32* %5, align 4
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %8 = load i32* %7, align 4
+ %8 = load i32, i32* %7, align 4
%9 = and i32 %8, %6
%10 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %9, i32* %10, align 4
@@ -129,7 +129,7 @@ define void @example3(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
%.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
%2 = add nsw i32 %.05, -1
%3 = getelementptr inbounds i32, i32* %.023, i64 1
- %4 = load i32* %.023, align 16
+ %4 = load i32, i32* %.023, align 16
%5 = getelementptr inbounds i32, i32* %.014, i64 1
store i32 %4, i32* %.014, align 16
%6 = icmp eq i32 %2, 0
@@ -162,7 +162,7 @@ define void @example4(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
%.018 = phi i32* [ %8, %.lr.ph10 ], [ %p, %0 ]
%.027 = phi i32* [ %5, %.lr.ph10 ], [ %q, %0 ]
%5 = getelementptr inbounds i32, i32* %.027, i64 1
- %6 = load i32* %.027, align 16
+ %6 = load i32, i32* %.027, align 16
%7 = add nsw i32 %6, 5
%8 = getelementptr inbounds i32, i32* %.018, i64 1
store i32 %7, i32* %.018, align 16
@@ -177,10 +177,10 @@ define void @example4(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
%indvars.iv11 = phi i64 [ %indvars.iv.next12, %.lr.ph6 ], [ 0, %.preheader4 ]
%indvars.iv.next12 = add i64 %indvars.iv11, 1
%11 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv.next12
- %12 = load i32* %11, align 4
+ %12 = load i32, i32* %11, align 4
%13 = add nsw i64 %indvars.iv11, 3
%14 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %13
- %15 = load i32* %14, align 4
+ %15 = load i32, i32* %14, align 4
%16 = add nsw i32 %15, %12
%17 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv11
store i32 %16, i32* %17, align 4
@@ -191,7 +191,7 @@ define void @example4(i32 %n, i32* noalias nocapture %p, i32* noalias nocapture
.lr.ph: ; preds = %.preheader, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %.preheader ]
%18 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
- %19 = load i32* %18, align 4
+ %19 = load i32, i32* %18, align 4
%20 = icmp sgt i32 %19, 4
%21 = select i1 %20, i32 4, i32 0
%22 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
@@ -250,9 +250,9 @@ define i32 @example9() nounwind uwtable readonly ssp {
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%diff.01 = phi i32 [ 0, %0 ], [ %7, %1 ]
%2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add i32 %3, %diff.01
%7 = sub i32 %6, %5
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -277,16 +277,16 @@ define void @example10a(i16* noalias nocapture %sa, i16* noalias nocapture %sb,
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds i32, i32* %ib, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds i32, i32* %ic, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds i32, i32* %ia, i64 %indvars.iv
store i32 %6, i32* %7, align 4
%8 = getelementptr inbounds i16, i16* %sb, i64 %indvars.iv
- %9 = load i16* %8, align 2
+ %9 = load i16, i16* %8, align 2
%10 = getelementptr inbounds i16, i16* %sc, i64 %indvars.iv
- %11 = load i16* %10, align 2
+ %11 = load i16, i16* %10, align 2
%12 = add i16 %11, %9
%13 = getelementptr inbounds i16, i16* %sa, i64 %indvars.iv
store i16 %12, i16* %13, align 2
@@ -310,7 +310,7 @@ define void @example10b(i16* noalias nocapture %sa, i16* noalias nocapture %sb,
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds i16, i16* %sb, i64 %indvars.iv
- %3 = load i16* %2, align 2
+ %3 = load i16, i16* %2, align 2
%4 = sext i16 %3 to i32
%5 = getelementptr inbounds i32, i32* %ia, i64 %indvars.iv
store i32 %4, i32* %5, align 4
@@ -341,14 +341,14 @@ define void @example11() nounwind uwtable ssp {
%2 = shl nsw i64 %indvars.iv, 1
%3 = or i64 %2, 1
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %3
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %3
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
%8 = mul nsw i32 %7, %5
%9 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %2
- %10 = load i32* %9, align 8
+ %10 = load i32, i32* %9, align 8
%11 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %2
- %12 = load i32* %11, align 8
+ %12 = load i32, i32* %11, align 8
%13 = mul nsw i32 %12, %10
%14 = sub nsw i32 %8, %13
%15 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
@@ -397,18 +397,18 @@ define void @example13(i32** nocapture %A, i32** nocapture %B, i32* nocapture %o
.preheader: ; preds = %14, %0
%indvars.iv4 = phi i64 [ 0, %0 ], [ %indvars.iv.next5, %14 ]
%1 = getelementptr inbounds i32*, i32** %A, i64 %indvars.iv4
- %2 = load i32** %1, align 8
+ %2 = load i32*, i32** %1, align 8
%3 = getelementptr inbounds i32*, i32** %B, i64 %indvars.iv4
- %4 = load i32** %3, align 8
+ %4 = load i32*, i32** %3, align 8
br label %5
; <label>:5 ; preds = %.preheader, %5
%indvars.iv = phi i64 [ 0, %.preheader ], [ %indvars.iv.next, %5 ]
%diff.02 = phi i32 [ 0, %.preheader ], [ %11, %5 ]
%6 = getelementptr inbounds i32, i32* %2, i64 %indvars.iv
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
%8 = getelementptr inbounds i32, i32* %4, i64 %indvars.iv
- %9 = load i32* %8, align 4
+ %9 = load i32, i32* %8, align 4
%10 = add i32 %7, %diff.02
%11 = sub i32 %10, %9
%indvars.iv.next = add i64 %indvars.iv, 8
@@ -445,13 +445,13 @@ define void @example14(i32** nocapture %in, i32** nocapture %coeff, i32* nocaptu
%indvars.iv = phi i64 [ 0, %.preheader ], [ %indvars.iv.next, %0 ]
%sum.12 = phi i32 [ %sum.05, %.preheader ], [ %10, %0 ]
%1 = getelementptr inbounds i32*, i32** %in, i64 %indvars.iv
- %2 = load i32** %1, align 8
+ %2 = load i32*, i32** %1, align 8
%3 = getelementptr inbounds i32, i32* %2, i64 %indvars.iv7
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
%5 = getelementptr inbounds i32*, i32** %coeff, i64 %indvars.iv
- %6 = load i32** %5, align 8
+ %6 = load i32*, i32** %5, align 8
%7 = getelementptr inbounds i32, i32* %6, i64 %indvars.iv7
- %8 = load i32* %7, align 4
+ %8 = load i32, i32* %7, align 4
%9 = mul nsw i32 %8, %4
%10 = add nsw i32 %9, %sum.12
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -479,13 +479,13 @@ define void @example14(i32** nocapture %in, i32** nocapture %coeff, i32* nocaptu
%sum.12.1 = phi i32 [ %sum.05.1, %.preheader.1 ], [ %23, %12 ]
%13 = add nsw i64 %indvars.iv.1, 1
%14 = getelementptr inbounds i32*, i32** %in, i64 %13
- %15 = load i32** %14, align 8
+ %15 = load i32*, i32** %14, align 8
%16 = getelementptr inbounds i32, i32* %15, i64 %indvars.iv7.1
- %17 = load i32* %16, align 4
+ %17 = load i32, i32* %16, align 4
%18 = getelementptr inbounds i32*, i32** %coeff, i64 %indvars.iv.1
- %19 = load i32** %18, align 8
+ %19 = load i32*, i32** %18, align 8
%20 = getelementptr inbounds i32, i32* %19, i64 %indvars.iv7.1
- %21 = load i32* %20, align 4
+ %21 = load i32, i32* %20, align 4
%22 = mul nsw i32 %21, %17
%23 = add nsw i32 %22, %sum.12.1
%lftr.wideiv.1 = trunc i64 %13 to i32
@@ -513,13 +513,13 @@ define void @example14(i32** nocapture %in, i32** nocapture %coeff, i32* nocaptu
%sum.12.2 = phi i32 [ %sum.05.2, %.preheader.2 ], [ %37, %26 ]
%27 = add nsw i64 %indvars.iv.2, 2
%28 = getelementptr inbounds i32*, i32** %in, i64 %27
- %29 = load i32** %28, align 8
+ %29 = load i32*, i32** %28, align 8
%30 = getelementptr inbounds i32, i32* %29, i64 %indvars.iv7.2
- %31 = load i32* %30, align 4
+ %31 = load i32, i32* %30, align 4
%32 = getelementptr inbounds i32*, i32** %coeff, i64 %indvars.iv.2
- %33 = load i32** %32, align 8
+ %33 = load i32*, i32** %32, align 8
%34 = getelementptr inbounds i32, i32* %33, i64 %indvars.iv7.2
- %35 = load i32* %34, align 4
+ %35 = load i32, i32* %34, align 4
%36 = mul nsw i32 %35, %31
%37 = add nsw i32 %36, %sum.12.2
%indvars.iv.next.2 = add i64 %indvars.iv.2, 1
@@ -548,13 +548,13 @@ define void @example14(i32** nocapture %in, i32** nocapture %coeff, i32* nocaptu
%sum.12.3 = phi i32 [ %sum.05.3, %.preheader.3 ], [ %51, %40 ]
%41 = add nsw i64 %indvars.iv.3, 3
%42 = getelementptr inbounds i32*, i32** %in, i64 %41
- %43 = load i32** %42, align 8
+ %43 = load i32*, i32** %42, align 8
%44 = getelementptr inbounds i32, i32* %43, i64 %indvars.iv7.3
- %45 = load i32* %44, align 4
+ %45 = load i32, i32* %44, align 4
%46 = getelementptr inbounds i32*, i32** %coeff, i64 %indvars.iv.3
- %47 = load i32** %46, align 8
+ %47 = load i32*, i32** %46, align 8
%48 = getelementptr inbounds i32, i32* %47, i64 %indvars.iv7.3
- %49 = load i32* %48, align 4
+ %49 = load i32, i32* %48, align 4
%50 = mul nsw i32 %49, %45
%51 = add nsw i32 %50, %sum.12.3
%indvars.iv.next.3 = add i64 %indvars.iv.3, 1
@@ -591,7 +591,7 @@ define i32 @example21(i32* nocapture %b, i32 %n) nounwind uwtable readonly ssp {
%a.02 = phi i32 [ 0, %.lr.ph ], [ %6, %3 ]
%indvars.iv.next = add i64 %indvars.iv, -1
%4 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %a.02
%7 = trunc i64 %indvars.iv.next to i32
%8 = icmp sgt i32 %7, 0
@@ -613,7 +613,7 @@ define void @example23(i16* nocapture %src, i32* nocapture %dst) nounwind uwtabl
%.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
%i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
%2 = getelementptr inbounds i16, i16* %.04, i64 1
- %3 = load i16* %.04, align 2
+ %3 = load i16, i16* %.04, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw nsw i32 %4, 7
%6 = getelementptr inbounds i32, i32* %.013, i64 1
@@ -635,9 +635,9 @@ define void @example24(i16 signext %x, i16 signext %y) nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [1024 x float], [1024 x float]* @fa, i64 0, i64 %indvars.iv
- %3 = load float* %2, align 4
+ %3 = load float, float* %2, align 4
%4 = getelementptr inbounds [1024 x float], [1024 x float]* @fb, i64 0, i64 %indvars.iv
- %5 = load float* %4, align 4
+ %5 = load float, float* %4, align 4
%6 = fcmp olt float %3, %5
%x.y = select i1 %6, i16 %x, i16 %y
%7 = sext i16 %x.y to i32
@@ -662,14 +662,14 @@ define void @example25() nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [1024 x float], [1024 x float]* @da, i64 0, i64 %indvars.iv
- %3 = load float* %2, align 4
+ %3 = load float, float* %2, align 4
%4 = getelementptr inbounds [1024 x float], [1024 x float]* @db, i64 0, i64 %indvars.iv
- %5 = load float* %4, align 4
+ %5 = load float, float* %4, align 4
%6 = fcmp olt float %3, %5
%7 = getelementptr inbounds [1024 x float], [1024 x float]* @dc, i64 0, i64 %indvars.iv
- %8 = load float* %7, align 4
+ %8 = load float, float* %7, align 4
%9 = getelementptr inbounds [1024 x float], [1024 x float]* @dd, i64 0, i64 %indvars.iv
- %10 = load float* %9, align 4
+ %10 = load float, float* %9, align 4
%11 = fcmp olt float %8, %10
%12 = and i1 %6, %11
%13 = zext i1 %12 to i32
diff --git a/llvm/test/Transforms/LoopVectorize/global_alias.ll b/llvm/test/Transforms/LoopVectorize/global_alias.ll
index ef232669f8f..b2ac4ca04ec 100644
--- a/llvm/test/Transforms/LoopVectorize/global_alias.ll
+++ b/llvm/test/Transforms/LoopVectorize/global_alias.ll
@@ -35,31 +35,31 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx1, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx2, align 4
+ %7 = load i32, i32* %arrayidx2, align 4
ret i32 %7
}
@@ -83,32 +83,32 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 90
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%add = add nsw i32 %1, 10
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %add
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add1 = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add1, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx3, align 4
+ %7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
@@ -132,32 +132,32 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%add1 = add nsw i32 %4, 10
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add1
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx3, align 4
+ %7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
@@ -184,34 +184,34 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32** @PB, align 4
- %2 = load i32* %i, align 4
+ %1 = load i32*, i32** @PB, align 4
+ %2 = load i32, i32* %i, align 4
%add.ptr = getelementptr inbounds i32, i32* %1, i32 %2
- %3 = load i32* %add.ptr, align 4
- %4 = load i32* %a.addr, align 4
+ %3 = load i32, i32* %add.ptr, align 4
+ %4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
- %5 = load i32** @PA, align 4
- %6 = load i32* %i, align 4
+ %5 = load i32*, i32** @PA, align 4
+ %6 = load i32, i32* %i, align 4
%add.ptr1 = getelementptr inbounds i32, i32* %5, i32 %6
store i32 %add, i32* %add.ptr1, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %8 = load i32** @PA, align 4
- %9 = load i32* %a.addr, align 4
+ %8 = load i32*, i32** @PA, align 4
+ %9 = load i32, i32* %a.addr, align 4
%add.ptr2 = getelementptr inbounds i32, i32* %8, i32 %9
- %10 = load i32* %add.ptr2, align 4
+ %10 = load i32, i32* %add.ptr2, align 4
ret i32 %10
}
@@ -237,37 +237,37 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
- %2 = load i32* %N, align 4
+ %1 = load i32, i32* %i, align 4
+ %2 = load i32, i32* %N, align 4
%arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
%arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %1
- %3 = load i32* %arrayidx1, align 4
- %4 = load i32* %a.addr, align 4
+ %3 = load i32, i32* %arrayidx1, align 4
+ %4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
- %5 = load i32* %i, align 4
- %6 = load i32* %N, align 4
+ %5 = load i32, i32* %i, align 4
+ %6 = load i32, i32* %N, align 4
%arrayidx2 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx2, i32 0, i32 %5
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %8 = load i32* %a.addr, align 4
- %9 = load i32* %N, align 4
+ %8 = load i32, i32* %a.addr, align 4
+ %9 = load i32, i32* %N, align 4
%arrayidx4 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx4, i32 0, i32 %8
- %10 = load i32* %arrayidx5, align 4
+ %10 = load i32, i32* %arrayidx5, align 4
ret i32 %10
}
@@ -293,38 +293,38 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
- %2 = load i32* %N, align 4
+ %1 = load i32, i32* %i, align 4
+ %2 = load i32, i32* %N, align 4
%add = add nsw i32 %2, 1
%arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
%arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %1
- %3 = load i32* %arrayidx1, align 4
- %4 = load i32* %a.addr, align 4
+ %3 = load i32, i32* %arrayidx1, align 4
+ %4 = load i32, i32* %a.addr, align 4
%add2 = add nsw i32 %3, %4
- %5 = load i32* %i, align 4
- %6 = load i32* %N, align 4
+ %5 = load i32, i32* %i, align 4
+ %6 = load i32, i32* %N, align 4
%arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx3, i32 0, i32 %5
store i32 %add2, i32* %arrayidx4, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %8 = load i32* %a.addr, align 4
- %9 = load i32* %N, align 4
+ %8 = load i32, i32* %a.addr, align 4
+ %9 = load i32, i32* %N, align 4
%arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
%arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx5, i32 0, i32 %8
- %10 = load i32* %arrayidx6, align 4
+ %10 = load i32, i32* %arrayidx6, align 4
ret i32 %10
}
@@ -347,19 +347,19 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
@@ -367,15 +367,15 @@ for.body: ; preds = %for.cond
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx5, align 4
+ %7 = load i32, i32* %arrayidx5, align 4
ret i32 %7
}
@@ -399,19 +399,19 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 90
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
@@ -419,15 +419,15 @@ for.body: ; preds = %for.cond
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx5, align 4
+ %7 = load i32, i32* %arrayidx5, align 4
ret i32 %7
}
@@ -451,19 +451,19 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 10
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
@@ -471,15 +471,15 @@ for.body: ; preds = %for.cond
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx5, align 4
+ %7 = load i32, i32* %arrayidx5, align 4
ret i32 %7
}
@@ -506,23 +506,23 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32** @PB, align 4
+ %1 = load i32*, i32** @PB, align 4
%add.ptr = getelementptr inbounds i32, i32* %1, i32 100
- %2 = load i32* %i, align 4
+ %2 = load i32, i32* %i, align 4
%idx.neg = sub i32 0, %2
%add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %idx.neg
%add.ptr2 = getelementptr inbounds i32, i32* %add.ptr1, i32 -1
- %3 = load i32* %add.ptr2, align 4
- %4 = load i32* %a.addr, align 4
+ %3 = load i32, i32* %add.ptr2, align 4
+ %4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
- %5 = load i32** @PA, align 4
+ %5 = load i32*, i32** @PA, align 4
%add.ptr3 = getelementptr inbounds i32, i32* %5, i32 100
- %6 = load i32* %i, align 4
+ %6 = load i32, i32* %i, align 4
%idx.neg4 = sub i32 0, %6
%add.ptr5 = getelementptr inbounds i32, i32* %add.ptr3, i32 %idx.neg4
%add.ptr6 = getelementptr inbounds i32, i32* %add.ptr5, i32 -1
@@ -530,16 +530,16 @@ for.body: ; preds = %for.cond
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %8 = load i32** @PA, align 4
- %9 = load i32* %a.addr, align 4
+ %8 = load i32*, i32** @PA, align 4
+ %9 = load i32, i32* %a.addr, align 4
%add.ptr7 = getelementptr inbounds i32, i32* %8, i32 %9
- %10 = load i32* %add.ptr7, align 4
+ %10 = load i32, i32* %add.ptr7, align 4
ret i32 %10
}
@@ -565,41 +565,41 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
- %2 = load i32* %N, align 4
+ %2 = load i32, i32* %N, align 4
%arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %sub1
- %3 = load i32* %arrayidx2, align 4
- %4 = load i32* %a.addr, align 4
+ %3 = load i32, i32* %arrayidx2, align 4
+ %4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%sub3 = sub nsw i32 100, %5
%sub4 = sub nsw i32 %sub3, 1
- %6 = load i32* %N, align 4
+ %6 = load i32, i32* %N, align 4
%arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
%arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx5, i32 0, i32 %sub4
store i32 %add, i32* %arrayidx6, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %8 = load i32* %a.addr, align 4
- %9 = load i32* %N, align 4
+ %8 = load i32, i32* %a.addr, align 4
+ %9 = load i32, i32* %N, align 4
%arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
%arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx7, i32 0, i32 %8
- %10 = load i32* %arrayidx8, align 4
+ %10 = load i32, i32* %arrayidx8, align 4
ret i32 %10
}
@@ -625,42 +625,42 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
- %2 = load i32* %N, align 4
+ %2 = load i32, i32* %N, align 4
%add = add nsw i32 %2, 1
%arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %sub1
- %3 = load i32* %arrayidx2, align 4
- %4 = load i32* %a.addr, align 4
+ %3 = load i32, i32* %arrayidx2, align 4
+ %4 = load i32, i32* %a.addr, align 4
%add3 = add nsw i32 %3, %4
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%sub4 = sub nsw i32 100, %5
%sub5 = sub nsw i32 %sub4, 1
- %6 = load i32* %N, align 4
+ %6 = load i32, i32* %N, align 4
%arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
%arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx6, i32 0, i32 %sub5
store i32 %add3, i32* %arrayidx7, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %8 = load i32* %a.addr, align 4
- %9 = load i32* %N, align 4
+ %8 = load i32, i32* %a.addr, align 4
+ %9 = load i32, i32* %N, align 4
%arrayidx8 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
%arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx8, i32 0, i32 %8
- %10 = load i32* %arrayidx9, align 4
+ %10 = load i32, i32* %arrayidx9, align 4
ret i32 %10
}
@@ -684,32 +684,32 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%add = add nsw i32 %1, 4
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add1 = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add1, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx3, align 4
+ %7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
@@ -733,19 +733,19 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 5
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
@@ -753,15 +753,15 @@ for.body: ; preds = %for.cond
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx5, align 4
+ %7 = load i32, i32* %arrayidx5, align 4
ret i32 %7
}
@@ -789,33 +789,33 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx3, align 4
+ %7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
@@ -839,17 +839,17 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %4
%sub1 = sub nsw i32 %sub, 1
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
@@ -857,15 +857,15 @@ for.body: ; preds = %for.cond
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx3, align 4
+ %7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
@@ -889,37 +889,37 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32** @PB, align 4
+ %1 = load i32*, i32** @PB, align 4
%add.ptr = getelementptr inbounds i32, i32* %1, i32 100
- %2 = load i32* %i, align 4
+ %2 = load i32, i32* %i, align 4
%idx.neg = sub i32 0, %2
%add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %idx.neg
%add.ptr2 = getelementptr inbounds i32, i32* %add.ptr1, i32 -1
- %3 = load i32* %add.ptr2, align 4
- %4 = load i32* %a.addr, align 4
+ %3 = load i32, i32* %add.ptr2, align 4
+ %4 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %3, %4
- %5 = load i32** @PA, align 4
- %6 = load i32* %i, align 4
+ %5 = load i32*, i32** @PA, align 4
+ %6 = load i32, i32* %i, align 4
%add.ptr3 = getelementptr inbounds i32, i32* %5, i32 %6
store i32 %add, i32* %add.ptr3, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
%inc = add nsw i32 %7, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %8 = load i32** @PA, align 4
- %9 = load i32* %a.addr, align 4
+ %8 = load i32*, i32** @PA, align 4
+ %9 = load i32, i32* %a.addr, align 4
%add.ptr4 = getelementptr inbounds i32, i32* %8, i32 %9
- %10 = load i32* %add.ptr4, align 4
+ %10 = load i32, i32* %add.ptr4, align 4
ret i32 %10
}
@@ -946,34 +946,34 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%add2 = add nsw i32 %4, 10
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx4, align 4
+ %7 = load i32, i32* %arrayidx4, align 4
ret i32 %7
}
@@ -996,33 +996,33 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx3, align 4
+ %7 = load i32, i32* %arrayidx3, align 4
ret i32 %7
}
@@ -1045,33 +1045,33 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 100
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
- %2 = load i32* %arrayidx, align 4
- %3 = load i32* %a.addr, align 4
+ %2 = load i32, i32* %arrayidx, align 4
+ %3 = load i32, i32* %a.addr, align 4
%add = add nsw i32 %2, %3
- %4 = load i32* %i, align 4
+ %4 = load i32, i32* %i, align 4
%add2 = add nsw i32 %4, 10
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %5 = load i32* %i, align 4
+ %5 = load i32, i32* %i, align 4
%inc = add nsw i32 %5, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %6 = load i32* %a.addr, align 4
+ %6 = load i32, i32* %a.addr, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
- %7 = load i32* %arrayidx4, align 4
+ %7 = load i32, i32* %arrayidx4, align 4
ret i32 %7
}
diff --git a/llvm/test/Transforms/LoopVectorize/hoist-loads.ll b/llvm/test/Transforms/LoopVectorize/hoist-loads.ll
index ae7f5dc3b79..a20b0f6cdf3 100644
--- a/llvm/test/Transforms/LoopVectorize/hoist-loads.ll
+++ b/llvm/test/Transforms/LoopVectorize/hoist-loads.ll
@@ -16,12 +16,12 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %if.end9 ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @A, i64 0, i64 %indvars.iv
%arrayidx2 = getelementptr inbounds [1024 x float], [1024 x float]* @B, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx2, align 4
+ %0 = load float, float* %arrayidx2, align 4
%cmp3 = fcmp oeq float %0, 0.000000e+00
br i1 %cmp3, label %if.end9, label %if.else
if.else:
- %1 = load float* %arrayidx, align 4
+ %1 = load float, float* %arrayidx, align 4
br label %if.end9
if.end9:
@@ -48,12 +48,12 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %if.end9 ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @A, i64 0, i64 %indvars.iv
%arrayidx2 = getelementptr inbounds [1024 x float], [1024 x float]* @B, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx2, align 4
+ %0 = load float, float* %arrayidx2, align 4
%cmp3 = fcmp oeq float %0, 0.000000e+00
br i1 %cmp3, label %if.end9, label %if.else
if.else:
- %1 = load float* %arrayidx, align 4
+ %1 = load float, float* %arrayidx, align 4
br label %if.end9
if.end9:
diff --git a/llvm/test/Transforms/LoopVectorize/i8-induction.ll b/llvm/test/Transforms/LoopVectorize/i8-induction.ll
index 90e3ec00cde..d9e8a430be1 100644
--- a/llvm/test/Transforms/LoopVectorize/i8-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/i8-induction.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @f() nounwind uwtable ssp {
scalar.ph:
store i8 0, i8* inttoptr (i64 1 to i8*), align 1
- %0 = load i8* @a, align 1
+ %0 = load i8, i8* @a, align 1
br label %for.body
for.body:
diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion-edgemasks.ll b/llvm/test/Transforms/LoopVectorize/if-conversion-edgemasks.ll
index f2d1f2596c2..8d435f51353 100644
--- a/llvm/test/Transforms/LoopVectorize/if-conversion-edgemasks.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-conversion-edgemasks.ll
@@ -20,15 +20,15 @@ entry:
br i1 %cmp88, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load i32** @b, align 8
- %1 = load i32** @a, align 8
- %2 = load i32** @c, align 8
+ %0 = load i32*, i32** @b, align 8
+ %1 = load i32*, i32** @a, align 8
+ %2 = load i32*, i32** @c, align 8
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %_ZL3fn3ii.exit58 ]
%arrayidx = getelementptr inbounds i32, i32* %0, i64 %indvars.iv
- %3 = load i32* %arrayidx, align 4 %4 = trunc i64 %indvars.iv to i32
+ %3 = load i32, i32* %arrayidx, align 4 %4 = trunc i64 %indvars.iv to i32
%and.i = and i32 %4, 1
%tobool.i.i = icmp eq i32 %and.i, 0
br i1 %tobool.i.i, label %if.end.i, label %if.then.i
@@ -136,7 +136,7 @@ _ZL3fn3ii.exit:
%p1.addr.0.i16.i = phi i32 [ %or.i14.i, %if.then.i15.i ], [ %p1.addr.3.i.i, %_Z3fn2iii.exit.i ]
%arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
store i32 %p1.addr.0.i16.i, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %0, i64 %indvars.iv
- %10 = load i32* %arrayidx4, align 4 br i1 %tobool.i.i, label %_Z3fn1ii.exit.i26, label %if.then.i.i21
+ %10 = load i32, i32* %arrayidx4, align 4 br i1 %tobool.i.i, label %_Z3fn1ii.exit.i26, label %if.then.i.i21
if.then.i.i21:
%and.i.i18 = lshr i32 %10, 2
diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
index 4a327e8fe4f..3a581ebf847 100644
--- a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll
@@ -20,9 +20,9 @@ entry:
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %if.end14 ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%cmp3 = icmp sgt i32 %0, %1
br i1 %cmp3, label %if.then, label %if.end14
diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion-reduction.ll b/llvm/test/Transforms/LoopVectorize/if-conversion-reduction.ll
index 711ca7633a5..20333b94345 100644
--- a/llvm/test/Transforms/LoopVectorize/if-conversion-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-conversion-reduction.ll
@@ -15,7 +15,7 @@ for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%sum.011 = phi i32 [ %sum.1, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 30
br i1 %cmp1, label %if.then, label %for.inc
diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion.ll b/llvm/test/Transforms/LoopVectorize/if-conversion.ll
index 7a3d825353e..3a6ac8b1747 100644
--- a/llvm/test/Transforms/LoopVectorize/if-conversion.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-conversion.ll
@@ -36,9 +36,9 @@ for.body.lr.ph:
for.body:
%indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %if.end ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
- %2 = load i32* %arrayidx4, align 4
+ %2 = load i32, i32* %arrayidx4, align 4
%cmp5 = icmp sgt i32 %1, %2
br i1 %cmp5, label %if.then, label %if.end
@@ -85,7 +85,7 @@ for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%sum.011 = phi i32 [ %sum.1, %for.inc ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 30
br i1 %cmp1, label %if.then, label %for.inc
diff --git a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
index ee5f8bcce88..991d027ada5 100644
--- a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
@@ -41,8 +41,8 @@ entry:
; UNROLL: %[[IND1:[a-zA-Z0-9]+]] = add i64 %{{.*}}, 1
; UNROLL: %[[v0:[a-zA-Z0-9]+]] = getelementptr inbounds i32, i32* %f, i64 %[[IND]]
; UNROLL: %[[v1:[a-zA-Z0-9]+]] = getelementptr inbounds i32, i32* %f, i64 %[[IND1]]
-; UNROLL: %[[v2:[a-zA-Z0-9]+]] = load i32* %[[v0]], align 4
-; UNROLL: %[[v3:[a-zA-Z0-9]+]] = load i32* %[[v1]], align 4
+; UNROLL: %[[v2:[a-zA-Z0-9]+]] = load i32, i32* %[[v0]], align 4
+; UNROLL: %[[v3:[a-zA-Z0-9]+]] = load i32, i32* %[[v1]], align 4
; UNROLL: %[[v4:[a-zA-Z0-9]+]] = icmp sgt i32 %[[v2]], 100
; UNROLL: %[[v5:[a-zA-Z0-9]+]] = icmp sgt i32 %[[v3]], 100
; UNROLL: %[[v6:[a-zA-Z0-9]+]] = add nsw i32 %[[v2]], 20
@@ -67,7 +67,7 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 100
br i1 %cmp1, label %if.then, label %for.inc
@@ -105,7 +105,7 @@ for.body14:
%indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ undef, %for.body9 ]
%iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ undef, %for.body9 ]
%arrayidx16 = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 %indvars.iv3
- %tmp = load i32* %arrayidx16, align 4
+ %tmp = load i32, i32* %arrayidx16, align 4
br i1 undef, label %if.then18, label %for.inc23
if.then18:
diff --git a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
index 12fc13a5265..798793a641b 100644
--- a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
+++ b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
@@ -59,7 +59,7 @@ thread-pre-split.loopexit: ; preds = %11, %.thread-pre-sp
; <label>:11 ; preds = %.lr.ph21
%12 = getelementptr inbounds [0 x i8], [0 x i8]* @PL_utf8skip, i64 0, i64 undef
- %13 = load i8* %12, align 1
+ %13 = load i8, i8* %12, align 1
%14 = zext i8 %13 to i64
%15 = icmp ugt i64 %14, %10
%. = select i1 %15, i64 %10, i64 %14
@@ -91,7 +91,7 @@ thread-pre-split.loopexit: ; preds = %11, %.thread-pre-sp
br label %26
; <label>:26 ; preds = %25, %24, %23, %22
- %27 = load i64* %len, align 8
+ %27 = load i64, i64* %len, align 8
%28 = add i64 %27, -1
br i1 undef, label %thread-pre-split._crit_edge, label %.lr.ph21
diff --git a/llvm/test/Transforms/LoopVectorize/increment.ll b/llvm/test/Transforms/LoopVectorize/increment.ll
index 369bd38c78f..d0b25096e37 100644
--- a/llvm/test/Transforms/LoopVectorize/increment.ll
+++ b/llvm/test/Transforms/LoopVectorize/increment.ll
@@ -21,7 +21,7 @@ define void @inc(i32 %n) nounwind uwtable noinline ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = trunc i64 %indvars.iv to i32
%5 = add nsw i32 %3, %4
store i32 %5, i32* %2, align 4
@@ -50,10 +50,10 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%idxprom1 = sext i32 %0 to i64
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%inc = add nsw i32 %1, 1
store i32 %inc, i32* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index 811c4927245..2fbb2de797a 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -52,10 +52,10 @@ for.body:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
%ind.sum = add i64 %iv, %offset
%arr.idx = getelementptr inbounds float, float* %a, i64 %ind.sum
- %l1 = load float* %arr.idx, align 4
+ %l1 = load float, float* %arr.idx, align 4
%ind.sum2 = add i64 %iv, %offset2
%arr.idx2 = getelementptr inbounds float, float* %a, i64 %ind.sum2
- %l2 = load float* %arr.idx2, align 4
+ %l2 = load float, float* %arr.idx2, align 4
%m = fmul fast float %b, %l2
%ad = fadd fast float %l1, %m
store float %ad, float* %arr.idx, align 4
@@ -153,9 +153,9 @@ define i32 @max_i32_backedgetaken() nounwind readnone ssp uwtable {
@c = common global i32 0, align 4
define i32 @testoverflowcheck() {
entry:
- %.pr.i = load i8* @e, align 1
- %0 = load i32* @d, align 4
- %c.promoted.i = load i32* @c, align 4
+ %.pr.i = load i8, i8* @e, align 1
+ %0 = load i32, i32* @d, align 4
+ %c.promoted.i = load i32, i32* @c, align 4
br label %cond.end.i
cond.end.i:
diff --git a/llvm/test/Transforms/LoopVectorize/intrinsic.ll b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
index 029d8b61a4a..fae6a8cb714 100644
--- a/llvm/test/Transforms/LoopVectorize/intrinsic.ll
+++ b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
@@ -14,7 +14,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.sqrt.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -40,7 +40,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.sqrt.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -66,7 +66,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.sin.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -92,7 +92,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.sin.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -118,7 +118,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.cos.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -144,7 +144,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.cos.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -170,7 +170,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.exp.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -196,7 +196,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.exp.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -222,7 +222,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.exp2.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -248,7 +248,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.exp2.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -274,7 +274,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.log.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -300,7 +300,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.log.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -326,7 +326,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.log10.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -352,7 +352,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.log10.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -378,7 +378,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.log2.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -404,7 +404,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.log2.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -430,7 +430,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.fabs.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -453,7 +453,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.fabs(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -479,9 +479,9 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds float, float* %z, i64 %indvars.iv
- %1 = load float* %arrayidx1, align 4
+ %1 = load float, float* %arrayidx1, align 4
%call = tail call float @llvm.copysign.f32(float %0, float %1) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -504,9 +504,9 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx1 = getelementptr inbounds double, double* %z, i64 %indvars.iv
- %1 = load double* %arrayidx, align 8
+ %1 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.copysign(double %0, double %1) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -532,7 +532,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.floor.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -558,7 +558,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.floor.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -584,7 +584,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.ceil.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -610,7 +610,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.ceil.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -636,7 +636,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.trunc.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -662,7 +662,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.trunc.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -688,7 +688,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.rint.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -714,7 +714,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.rint.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -740,7 +740,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.nearbyint.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -766,7 +766,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.nearbyint.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -792,7 +792,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @llvm.round.f32(float %0) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
@@ -818,7 +818,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.round.f64(double %0) nounwind readnone
%arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
@@ -844,11 +844,11 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float, float* %w, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds float, float* %z, i64 %indvars.iv
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%3 = tail call float @llvm.fma.f32(float %0, float %2, float %1)
%arrayidx6 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %3, float* %arrayidx6, align 4
@@ -874,11 +874,11 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx2 = getelementptr inbounds double, double* %w, i64 %indvars.iv
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%arrayidx4 = getelementptr inbounds double, double* %z, i64 %indvars.iv
- %2 = load double* %arrayidx4, align 8
+ %2 = load double, double* %arrayidx4, align 8
%3 = tail call double @llvm.fma.f64(double %0, double %2, double %1)
%arrayidx6 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %3, double* %arrayidx6, align 8
@@ -904,11 +904,11 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float, float* %w, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds float, float* %z, i64 %indvars.iv
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%3 = tail call float @llvm.fmuladd.f32(float %0, float %2, float %1)
%arrayidx6 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %3, float* %arrayidx6, align 4
@@ -934,11 +934,11 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx2 = getelementptr inbounds double, double* %w, i64 %indvars.iv
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%arrayidx4 = getelementptr inbounds double, double* %z, i64 %indvars.iv
- %2 = load double* %arrayidx4, align 8
+ %2 = load double, double* %arrayidx4, align 8
%3 = tail call double @llvm.fmuladd.f64(double %0, double %2, double %1)
%arrayidx6 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %3, double* %arrayidx6, align 8
@@ -964,9 +964,9 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float, float* %z, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%call = tail call float @llvm.pow.f32(float %0, float %1) nounwind readnone
%arrayidx4 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx4, align 4
@@ -992,9 +992,9 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx2 = getelementptr inbounds double, double* %z, i64 %indvars.iv
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%call = tail call double @llvm.pow.f64(double %0, double %1) nounwind readnone
%arrayidx4 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx4, align 8
@@ -1017,7 +1017,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @fabsf(float %0) nounwind readnone
store float %call, float* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -1051,7 +1051,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%call = tail call float @roundf(float %0) nounwind readnone
store float %call, float* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -1078,7 +1078,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds double, double* %x, i64 %indvars.iv
- %0 = load double* %arrayidx, align 4
+ %0 = load double, double* %arrayidx, align 4
store double %0, double* %arrayidx, align 4
tail call void @round(double %0) nounwind readnone
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -1103,7 +1103,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%call = tail call double @llvm.powi.f64(double %0, i32 %P) nounwind readnone
%arrayidx4 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx4, align 8
@@ -1127,7 +1127,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%1 = trunc i64 %indvars.iv to i32
%call = tail call double @llvm.powi.f64(double %0, i32 %1) nounwind readnone
%arrayidx4 = getelementptr inbounds double, double* %x, i64 %indvars.iv
@@ -1154,7 +1154,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i64, i64* %y, i64 %indvars.iv
- %0 = load i64* %arrayidx, align 8
+ %0 = load i64, i64* %arrayidx, align 8
%call = tail call i64 @llvm.cttz.i64(i64 %0, i1 true) nounwind readnone
%arrayidx4 = getelementptr inbounds i64, i64* %x, i64 %indvars.iv
store i64 %call, i64* %arrayidx4, align 8
@@ -1180,7 +1180,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i64, i64* %y, i64 %indvars.iv
- %0 = load i64* %arrayidx, align 8
+ %0 = load i64, i64* %arrayidx, align 8
%call = tail call i64 @llvm.ctlz.i64(i64 %0, i1 true) nounwind readnone
%arrayidx4 = getelementptr inbounds i64, i64* %x, i64 %indvars.iv
store i64 %call, i64* %arrayidx4, align 8
@@ -1206,9 +1206,9 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float, float* %z, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%call = tail call float @llvm.minnum.f32(float %0, float %1) nounwind readnone
%arrayidx4 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx4, align 4
@@ -1234,9 +1234,9 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float, float* %z, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%call = tail call float @llvm.maxnum.f32(float %0, float %1) nounwind readnone
%arrayidx4 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx4, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/lifetime.ll b/llvm/test/Transforms/LoopVectorize/lifetime.ll
index 63bde5a4570..6e525ca1d82 100644
--- a/llvm/test/Transforms/LoopVectorize/lifetime.ll
+++ b/llvm/test/Transforms/LoopVectorize/lifetime.ll
@@ -20,7 +20,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
call void @llvm.lifetime.end(i64 4096, i8* %0) #1
%arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
- %1 = load i32* %arrayidx, align 8
+ %1 = load i32, i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
call void @llvm.lifetime.start(i64 4096, i8* %0) #1
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -50,7 +50,7 @@ for.body:
%1 = bitcast [1024 x i32]* %arr to i8*
call void @llvm.lifetime.end(i64 4096, i8* %1) #1
%arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
- %2 = load i32* %arrayidx, align 8
+ %2 = load i32, i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
call void @llvm.lifetime.start(i64 4096, i8* %1) #1
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -79,7 +79,7 @@ for.body:
%1 = bitcast [1024 x i32]* %arr to i8*
call void @llvm.lifetime.end(i64 4096, i8* %1) #1
%arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
- %2 = load i32* %arrayidx, align 8
+ %2 = load i32, i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
call void @llvm.lifetime.start(i64 4096, i8* %1) #1
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/loop-vect-memdep.ll b/llvm/test/Transforms/LoopVectorize/loop-vect-memdep.ll
index 326c4d42f75..d9efaa5c085 100644
--- a/llvm/test/Transforms/LoopVectorize/loop-vect-memdep.ll
+++ b/llvm/test/Transforms/LoopVectorize/loop-vect-memdep.ll
@@ -6,7 +6,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @test_loop_novect(double** %arr, i64 %n) {
for.body.lr.ph:
- %t = load double** %arr, align 8
+ %t = load double*, double** %arr, align 8
br label %for.body
for.body: ; preds = %for.body, %for.body.lr.ph
@@ -14,8 +14,8 @@ for.body: ; preds = %for.body, %for.body.lr
%a = getelementptr inbounds double, double* %t, i64 %i
%i.next = add nuw nsw i64 %i, 1
%a.next = getelementptr inbounds double, double* %t, i64 %i.next
- %t1 = load double* %a, align 8
- %t2 = load double* %a.next, align 8
+ %t1 = load double, double* %a, align 8
+ %t2 = load double, double* %a.next, align 8
store double %t1, double* %a.next, align 8
store double %t2, double* %a, align 8
%c = icmp eq i64 %i, %n
diff --git a/llvm/test/Transforms/LoopVectorize/memdep.ll b/llvm/test/Transforms/LoopVectorize/memdep.ll
index 43fccb730dd..fb608836dfe 100644
--- a/llvm/test/Transforms/LoopVectorize/memdep.ll
+++ b/llvm/test/Transforms/LoopVectorize/memdep.ll
@@ -20,7 +20,7 @@ for.body:
%indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add i32 %indvars.iv, 1
%arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv.next
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add1 = add nsw i32 %0, 1
%arrayidx3 = getelementptr inbounds i32, i32* %A, i32 %indvars.iv
store i32 %add1, i32* %arrayidx3, align 4
@@ -45,7 +45,7 @@ entry:
for.body:
%indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, 1
%indvars.iv.next = add i32 %indvars.iv, 1
%arrayidx3 = getelementptr inbounds i32, i32* %A, i32 %indvars.iv.next
@@ -75,7 +75,7 @@ for.body:
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%idxprom = sext i32 %i.01 to i64
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, 1
%add1 = add nsw i32 %i.01, 2
%idxprom2 = sext i32 %add1 to i64
@@ -106,12 +106,12 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
store i32 %0, i32* %arrayidx2, align 4
%indvars.iv.next = add nsw i64 %indvars.iv, 1
%arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv.next
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
store i32 %1, i32* %arrayidx, align 4
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 1024
@@ -141,7 +141,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
store i32 %tmp.addr.08, i32* %arrayidx, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx3, align 4
+ %0 = load i32, i32* %arrayidx3, align 4
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 1024
br i1 %exitcond, label %for.body, label %for.end
@@ -170,10 +170,10 @@ for.body:
%indvars.iv = phi i64 [ 16, %entry ], [ %indvars.iv.next, %for.body ]
%0 = add nsw i64 %indvars.iv, -3
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%2 = add nsw i64 %indvars.iv, 4
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %2
- %3 = load i32* %arrayidx2, align 4
+ %3 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %3, %1
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
store i32 %add3, i32* %arrayidx5, align 4
@@ -204,12 +204,12 @@ entry:
for.body:
%indvars.iv = phi i64 [ 16, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
store i32 %0, i32* %arrayidx2, align 4
%1 = add nsw i64 %indvars.iv, -3
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %1
- %2 = load i32* %arrayidx4, align 4
+ %2 = load i32, i32* %arrayidx4, align 4
%arrayidx6 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
store i32 %2, i32* %arrayidx6, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/metadata-unroll.ll b/llvm/test/Transforms/LoopVectorize/metadata-unroll.ll
index 38df838de2a..3c80ae0cb54 100644
--- a/llvm/test/Transforms/LoopVectorize/metadata-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/metadata-unroll.ll
@@ -24,7 +24,7 @@ define void @inc(i32 %n) nounwind uwtable noinline ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = trunc i64 %indvars.iv to i32
%5 = add nsw i32 %3, %4
store i32 %5, i32* %2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/metadata.ll b/llvm/test/Transforms/LoopVectorize/metadata.ll
index 009463cdc11..9a791aedbbe 100644
--- a/llvm/test/Transforms/LoopVectorize/metadata.ll
+++ b/llvm/test/Transforms/LoopVectorize/metadata.ll
@@ -10,7 +10,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float, float* %arrayidx, align 4, !tbaa !0
%conv = fptosi float %0 to i32
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %conv, i32* %arrayidx2, align 4, !tbaa !4
@@ -23,7 +23,7 @@ for.end: ; preds = %for.body
}
; CHECK-LABEL: @test1
-; CHECK: load <4 x float>* %{{.*}}, align 4, !tbaa ![[TFLT:[0-9]+]]
+; CHECK: load <4 x float>, <4 x float>* %{{.*}}, align 4, !tbaa ![[TFLT:[0-9]+]]
; CHECK: store <4 x i32> %{{.*}}, <4 x i32>* %{{.*}}, align 4, !tbaa ![[TINT:[0-9]+]]
; CHECK: ret i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/minmax_reduction.ll b/llvm/test/Transforms/LoopVectorize/minmax_reduction.ll
index 858536fbed9..5a0356fe11a 100644
--- a/llvm/test/Transforms/LoopVectorize/minmax_reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/minmax_reduction.ll
@@ -27,7 +27,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp sgt i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -56,7 +56,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp slt i32 %max.red.08, %0
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -84,7 +84,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp slt i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -113,7 +113,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp sgt i32 %max.red.08, %0
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -143,7 +143,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp ugt i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -172,7 +172,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp ult i32 %max.red.08, %0
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -200,7 +200,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp ult i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -229,7 +229,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp ugt i32 %max.red.08, %0
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -258,7 +258,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp sge i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -287,7 +287,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp sle i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -316,7 +316,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp uge i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -345,7 +345,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp3 = icmp ule i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -369,8 +369,8 @@ for.body:
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%arrayidx1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 1, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
- %1 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
%cmp3 = icmp sgt i32 %0, %1
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -393,8 +393,8 @@ for.body:
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%arrayidx1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 1, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
- %1 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
%cmp3 = icmp sgt i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %1
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -426,7 +426,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ogt float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -452,7 +452,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp oge float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -478,7 +478,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp olt float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %max.red.08, float %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -504,7 +504,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ole float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %max.red.08, float %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -530,7 +530,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ugt float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -556,7 +556,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp uge float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -582,7 +582,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ult float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %max.red.08, float %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -608,7 +608,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ule float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %max.red.08, float %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -637,7 +637,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp olt float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %0, float %min.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -663,7 +663,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ole float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %0, float %min.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -689,7 +689,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ogt float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %min.red.08, float %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -715,7 +715,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp oge float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %min.red.08, float %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -741,7 +741,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ult float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %0, float %min.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -767,7 +767,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ule float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %0, float %min.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -793,7 +793,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ugt float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %min.red.08, float %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -819,7 +819,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp uge float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %min.red.08, float %0
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -846,7 +846,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi double [ %min, %entry ], [ %min.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x double], [1024 x double]* @dA, i64 0, i64 %indvars.iv
- %0 = load double* %arrayidx, align 4
+ %0 = load double, double* %arrayidx, align 4
%cmp3 = fcmp olt double %0, %min.red.08
%min.red.0 = select i1 %cmp3, double %0, double %min.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -870,7 +870,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
%arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%cmp3 = fcmp ogt float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll b/llvm/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll
index cd022adc939..9f7fb39dc7c 100644
--- a/llvm/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll
+++ b/llvm/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll
@@ -16,10 +16,10 @@ entry:
%n = alloca i32, align 4
%k7 = alloca i32, align 4
%nf = alloca i32, align 4
- %0 = load i32* %k7, align 4
+ %0 = load i32, i32* %k7, align 4
%.neg1 = sub i32 0, %0
- %n.promoted = load i32* %n, align 4
- %nf.promoted = load i32* %nf, align 4
+ %n.promoted = load i32, i32* %n, align 4
+ %nf.promoted = load i32, i32* %nf, align 4
br label %for.body
for.body:
diff --git a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
index 1659af684e9..e79c93146b1 100644
--- a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
+++ b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
@@ -28,7 +28,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds [40000 x i8], [40000 x i8] addrspace(1)* @Y, i64 0, i64 %indvars.iv
- %0 = load i8 addrspace(1)* %arrayidx, align 1
+ %0 = load i8, i8 addrspace(1)* %arrayidx, align 1
%add = add i8 %0, 1
%arrayidx3 = getelementptr inbounds [40000 x i8], [40000 x i8]* @X, i64 0, i64 %indvars.iv
store i8 %add, i8* %arrayidx3, align 1
diff --git a/llvm/test/Transforms/LoopVectorize/no_array_bounds.ll b/llvm/test/Transforms/LoopVectorize/no_array_bounds.ll
index 2835c6604ec..865c8da4c8d 100644
--- a/llvm/test/Transforms/LoopVectorize/no_array_bounds.ll
+++ b/llvm/test/Transforms/LoopVectorize/no_array_bounds.ll
@@ -34,10 +34,10 @@ for.body7.preheader: ; preds = %for.cond5.preheader
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv27 = phi i64 [ %indvars.iv.next28, %for.body ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv27, !dbg !14
- %0 = load i32* %arrayidx, align 4, !dbg !14, !tbaa !22
+ %0 = load i32, i32* %arrayidx, align 4, !dbg !14, !tbaa !22
%idxprom1 = sext i32 %0 to i64, !dbg !14
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1, !dbg !14
- %1 = load i32* %arrayidx2, align 4, !dbg !14, !tbaa !22
+ %1 = load i32, i32* %arrayidx2, align 4, !dbg !14, !tbaa !22
%inc = add nsw i32 %1, 1, !dbg !14
store i32 %inc, i32* %arrayidx2, align 4, !dbg !14, !tbaa !22
%indvars.iv.next28 = add nuw nsw i64 %indvars.iv27, 1, !dbg !10
@@ -48,10 +48,10 @@ for.body: ; preds = %for.body.preheader,
for.body7: ; preds = %for.body7.preheader, %for.body7
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body7 ], [ 0, %for.body7.preheader ]
%arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !20
- %2 = load i32* %arrayidx9, align 4, !dbg !20, !tbaa !22
+ %2 = load i32, i32* %arrayidx9, align 4, !dbg !20, !tbaa !22
%idxprom10 = sext i32 %2 to i64, !dbg !20
%arrayidx11 = getelementptr inbounds i32, i32* %B, i64 %idxprom10, !dbg !20
- %3 = load i32* %arrayidx11, align 4, !dbg !20, !tbaa !22
+ %3 = load i32, i32* %arrayidx11, align 4, !dbg !20, !tbaa !22
%inc12 = add nsw i32 %3, 1, !dbg !20
store i32 %inc12, i32* %arrayidx11, align 4, !dbg !20, !tbaa !22
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !16
diff --git a/llvm/test/Transforms/LoopVectorize/no_idiv_reduction.ll b/llvm/test/Transforms/LoopVectorize/no_idiv_reduction.ll
index 326ffc8828d..bfa48a2529b 100644
--- a/llvm/test/Transforms/LoopVectorize/no_idiv_reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/no_idiv_reduction.ll
@@ -12,7 +12,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.05 = phi i32 [ 80, %entry ], [ %div, %for.body ]
%arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* @a, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%div = sdiv i32 %r.05, %0
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopVectorize/no_int_induction.ll b/llvm/test/Transforms/LoopVectorize/no_int_induction.ll
index 02848a067a2..7e6b26cd900 100644
--- a/llvm/test/Transforms/LoopVectorize/no_int_induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/no_int_induction.ll
@@ -21,7 +21,7 @@ define i32 @sum_array(i32* %A, i32 %n) nounwind uwtable readonly noinline ssp {
.lr.ph.i: ; preds = %0, %.lr.ph.i
%.03.i = phi i32* [ %6, %.lr.ph.i ], [ %A, %0 ]
%.012.i = phi i32 [ %5, %.lr.ph.i ], [ 0, %0 ]
- %4 = load i32* %.03.i, align 4
+ %4 = load i32, i32* %.03.i, align 4
%5 = add nsw i32 %4, %.012.i
%6 = getelementptr inbounds i32, i32* %.03.i, i64 1
%7 = icmp eq i32* %6, %2
@@ -48,7 +48,7 @@ define i32 @sum_array_as1(i32 addrspace(1)* %A, i32 %n) nounwind uwtable readonl
.lr.ph.i: ; preds = %0, %.lr.ph.i
%.03.i = phi i32 addrspace(1)* [ %6, %.lr.ph.i ], [ %A, %0 ]
%.012.i = phi i32 [ %5, %.lr.ph.i ], [ 0, %0 ]
- %4 = load i32 addrspace(1)* %.03.i, align 4
+ %4 = load i32, i32 addrspace(1)* %.03.i, align 4
%5 = add nsw i32 %4, %.012.i
%6 = getelementptr inbounds i32, i32 addrspace(1)* %.03.i, i64 1
%7 = icmp eq i32 addrspace(1)* %6, %2
diff --git a/llvm/test/Transforms/LoopVectorize/no_outside_user.ll b/llvm/test/Transforms/LoopVectorize/no_outside_user.ll
index bcd29c1a439..7030b6b4df2 100644
--- a/llvm/test/Transforms/LoopVectorize/no_outside_user.ll
+++ b/llvm/test/Transforms/LoopVectorize/no_outside_user.ll
@@ -20,7 +20,7 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
define i32 @main() {
bb:
- %b.promoted = load i32* @b, align 4
+ %b.promoted = load i32, i32* @b, align 4
br label %.lr.ph.i
.lr.ph.i:
@@ -56,7 +56,7 @@ f1.exit.loopexit:
define i32 @test2() {
entry:
store i32 0, i32* @x1, align 4
- %0 = load i32* @x0, align 4
+ %0 = load i32, i32* @x0, align 4
br label %for.cond1.preheader
for.cond1.preheader:
diff --git a/llvm/test/Transforms/LoopVectorize/no_switch.ll b/llvm/test/Transforms/LoopVectorize/no_switch.ll
index e24e91f4ec1..76c1c0c16eb 100644
--- a/llvm/test/Transforms/LoopVectorize/no_switch.ll
+++ b/llvm/test/Transforms/LoopVectorize/no_switch.ll
@@ -22,7 +22,7 @@ for.body.preheader: ; preds = %entry
for.body: ; preds = %for.body.preheader, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !14
- %0 = load i32* %arrayidx, align 4, !dbg !14, !tbaa !16
+ %0 = load i32, i32* %arrayidx, align 4, !dbg !14, !tbaa !16
switch i32 %0, label %for.inc [
i32 0, label %sw.bb
i32 1, label %sw.bb3
diff --git a/llvm/test/Transforms/LoopVectorize/non-const-n.ll b/llvm/test/Transforms/LoopVectorize/non-const-n.ll
index 4b957758d2f..9007b1b00fd 100644
--- a/llvm/test/Transforms/LoopVectorize/non-const-n.ll
+++ b/llvm/test/Transforms/LoopVectorize/non-const-n.ll
@@ -21,9 +21,9 @@ define void @example1(i32 %n) nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/opt.ll b/llvm/test/Transforms/LoopVectorize/opt.ll
index 90d8b098caf..71b7e1f906b 100644
--- a/llvm/test/Transforms/LoopVectorize/opt.ll
+++ b/llvm/test/Transforms/LoopVectorize/opt.ll
@@ -17,7 +17,7 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%red.05 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %red.05
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 255
diff --git a/llvm/test/Transforms/LoopVectorize/ptr_loops.ll b/llvm/test/Transforms/LoopVectorize/ptr_loops.ll
index 4d7e315e2f0..62fc1d9cc69 100644
--- a/llvm/test/Transforms/LoopVectorize/ptr_loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/ptr_loops.ll
@@ -17,7 +17,7 @@ define i32 @_Z5test1v() nounwind uwtable ssp {
; <label>:1 ; preds = %0, %1
%p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 18), %0 ], [ %4, %1 ]
%b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 0), %0 ], [ %5, %1 ]
- %2 = load i32* %b.01, align 4
+ %2 = load i32, i32* %b.01, align 4
%3 = shl nsw i32 %2, 1
store i32 %3, i32* %p.02, align 4
%4 = getelementptr inbounds i32, i32* %p.02, i64 -1
@@ -40,7 +40,7 @@ define i32 @_Z5test2v() nounwind uwtable ssp {
; <label>:1 ; preds = %0, %1
%p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 25), %0 ], [ %3, %1 ]
%b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 2), %0 ], [ %4, %1 ]
- %2 = load i32* %b.01, align 4
+ %2 = load i32, i32* %b.01, align 4
store i32 %2, i32* %p.02, align 4
%3 = getelementptr inbounds i32, i32* %p.02, i64 -1
%4 = getelementptr inbounds i32, i32* %b.01, i64 1
@@ -62,7 +62,7 @@ define i32 @_Z5test3v() nounwind uwtable ssp {
; <label>:1 ; preds = %0, %1
%p.02 = phi i32* [ getelementptr inbounds ([36 x i32]* @A, i64 0, i64 29), %0 ], [ %3, %1 ]
%b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 5), %0 ], [ %4, %1 ]
- %2 = load i32* %b.01, align 4
+ %2 = load i32, i32* %b.01, align 4
store i32 %2, i32* %p.02, align 4
%3 = getelementptr inbounds i32, i32* %p.02, i64 -1
%4 = getelementptr inbounds i32, i32* %b.01, i64 1
diff --git a/llvm/test/Transforms/LoopVectorize/read-only.ll b/llvm/test/Transforms/LoopVectorize/read-only.ll
index ef12edb059a..f81afd6ad34 100644
--- a/llvm/test/Transforms/LoopVectorize/read-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/read-only.ll
@@ -14,10 +14,10 @@ define i32 @read_only_func(i32* nocapture %A, i32* nocapture %B, i32 %n) nounwin
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = add nsw i64 %indvars.iv, 13
%5 = getelementptr inbounds i32, i32* %B, i64 %4
- %6 = load i32* %5, align 4
+ %6 = load i32, i32* %5, align 4
%7 = shl i32 %6, 1
%8 = add i32 %3, %sum.02
%9 = add i32 %8, %7
diff --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll
index 70c63feea40..647e58a7e41 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction.ll
@@ -21,9 +21,9 @@ define i32 @reduction_sum(i32 %n, i32* noalias nocapture %A, i32* noalias nocapt
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = trunc i64 %indvars.iv to i32
%7 = add i32 %sum.02, %6
%8 = add i32 %7, %3
@@ -56,9 +56,9 @@ define i32 @reduction_prod(i32 %n, i32* noalias nocapture %A, i32* noalias nocap
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%prod.02 = phi i32 [ %9, %.lr.ph ], [ 1, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = trunc i64 %indvars.iv to i32
%7 = mul i32 %prod.02, %6
%8 = mul i32 %7, %3
@@ -91,9 +91,9 @@ define i32 @reduction_mix(i32 %n, i32* noalias nocapture %A, i32* noalias nocapt
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = mul nsw i32 %5, %3
%7 = trunc i64 %indvars.iv to i32
%8 = add i32 %sum.02, %7
@@ -124,9 +124,9 @@ define i32 @reduction_mul(i32 %n, i32* noalias nocapture %A, i32* noalias nocapt
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 19, %0 ]
%2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = trunc i64 %indvars.iv to i32
%7 = add i32 %3, %6
%8 = add i32 %7, %5
@@ -159,9 +159,9 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.09 = phi i32 [ %add, %for.body ], [ 120, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %coeff, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, %sum.09
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -192,9 +192,9 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%result.08 = phi i32 [ %and, %for.body ], [ -1, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%and = and i32 %add, %result.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -224,9 +224,9 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%result.08 = phi i32 [ %or, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%or = or i32 %add, %result.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -256,9 +256,9 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%result.08 = phi i32 [ %xor, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%xor = xor i32 %add, %result.08
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -285,7 +285,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%x.05 = phi i32 [ %sub, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%sub = sub nsw i32 %0, %x.05
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -312,7 +312,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%x.05 = phi i32 [ %sub, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%sub = sub nsw i32 %x.05, %0
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -336,9 +336,9 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%sum.033 = phi float [ %S, %entry ], [ %sum.1, %for.inc ]
%arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float, float* %B, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%cmp3 = fcmp ogt float %0, %1
br i1 %cmp3, label %if.then, label %for.inc
@@ -381,9 +381,9 @@ for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%sum.033 = phi float [ %S, %entry ], [ %sum.1, %for.inc ]
%arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float, float* %B, i64 %indvars.iv
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%cmp3 = fcmp ogt float %0, %1
br i1 %cmp3, label %if.then, label %for.inc
@@ -428,7 +428,7 @@ for.body:
%sum2.09 = phi float [ 0.000000e+00, %entry ], [ %add1, %for.body ]
%sum.08 = phi float [ %S, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%add = fadd fast float %sum.08, %0
%add1 = fadd fast float %sum2.09, %add
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
index 943fb9e59a3..6b63a0d8db6 100644
--- a/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reverse_induction.ll
@@ -19,7 +19,7 @@ for.body:
%redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
%add.i = add i64 %add.i7, -1
%kind_.i = getelementptr inbounds i32, i32* %ptr, i64 %add.i
- %tmp.i1 = load i32* %kind_.i, align 4
+ %tmp.i1 = load i32, i32* %kind_.i, align 4
%inc.redux = add i32 %tmp.i1, %redux5
%inc4 = add i32 %i.06, 1
%exitcond = icmp ne i32 %inc4, 1024
@@ -42,7 +42,7 @@ for.body:
%redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
%add.i = add i128 %add.i7, -1
%kind_.i = getelementptr inbounds i32, i32* %ptr, i128 %add.i
- %tmp.i1 = load i32* %kind_.i, align 4
+ %tmp.i1 = load i32, i32* %kind_.i, align 4
%inc.redux = add i32 %tmp.i1, %redux5
%inc4 = add i32 %i.06, 1
%exitcond = icmp ne i32 %inc4, 1024
@@ -66,7 +66,7 @@ for.body:
%redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
%add.i = add i16 %add.i7, -1
%kind_.i = getelementptr inbounds i32, i32* %ptr, i16 %add.i
- %tmp.i1 = load i32* %kind_.i, align 4
+ %tmp.i1 = load i32, i32* %kind_.i, align 4
%inc.redux = add i32 %tmp.i1, %redux5
%inc4 = add i32 %i.06, 1
%exitcond = icmp ne i32 %inc4, 1024
diff --git a/llvm/test/Transforms/LoopVectorize/reverse_iter.ll b/llvm/test/Transforms/LoopVectorize/reverse_iter.ll
index b0ecb2e0716..5bbc769a20a 100644
--- a/llvm/test/Transforms/LoopVectorize/reverse_iter.ll
+++ b/llvm/test/Transforms/LoopVectorize/reverse_iter.ll
@@ -31,7 +31,7 @@ define i32 @foo(i32 %n, i32* nocapture %A) {
%5 = shl nsw i32 %4, 1
%6 = sext i32 %5 to i64
%7 = getelementptr inbounds i32, i32* %A, i64 %6
- %8 = load i32* %7, align 4
+ %8 = load i32, i32* %7, align 4
%9 = add nsw i32 %8, %sum.01
%indvars.iv.next = add i64 %indvars.iv, -1
%10 = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
index 1d336477172..8e7ac1f865a 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
@@ -38,7 +38,7 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %idxprom
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
%arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom1
@@ -65,7 +65,7 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %idxprom
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
@@ -92,7 +92,7 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
%arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom1
@@ -120,7 +120,7 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %idxprom
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
@@ -148,7 +148,7 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
%arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
@@ -177,7 +177,7 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
%arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %idxprom1
@@ -205,7 +205,7 @@ for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32] addrspace(2)* @q_as2, i64 0, i64 %idxprom
- %0 = load i32 addrspace(2)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(2)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
%arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
index bc3345bf145..6ee983d22c6 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
@@ -15,9 +15,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %i.01
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %c, i64 %i.01
- %1 = load i32 addrspace(1)* %arrayidx1, align 4
+ %1 = load i32, i32 addrspace(1)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %i.01
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4
@@ -40,9 +40,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %i.01
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %c, i64 %i.01
- %1 = load i32* %arrayidx1, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %i.01
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4
@@ -65,9 +65,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %i.01
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %c, i64 %i.01
- %1 = load i32* %arrayidx1, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.01
store i32 %add, i32* %arrayidx2, align 4
@@ -90,9 +90,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %i.01
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %c, i64 %i.01
- %1 = load i32 addrspace(1)* %arrayidx1, align 4
+ %1 = load i32, i32 addrspace(1)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.01
store i32 %add, i32* %arrayidx2, align 4
@@ -115,9 +115,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %i.01
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32 addrspace(2)* %c, i64 %i.01
- %1 = load i32 addrspace(2)* %arrayidx1, align 4
+ %1 = load i32, i32 addrspace(2)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.01
store i32 %add, i32* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-readonly.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-readonly.ll
index 75a0d56dc6f..a3b5a598d22 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check-readonly.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check-readonly.ll
@@ -23,9 +23,9 @@ entry:
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
store i32 %add, i32* %arrayidx4, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
index 483d35a31dc..b1c5d402c45 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll
@@ -22,7 +22,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%mul = fmul float %0, 3.000000e+00
%arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
store float %mul, float* %arrayidx2, align 4
@@ -48,10 +48,10 @@ for.body:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
%ind.sum = add i64 %iv, %offset
%arr.idx = getelementptr inbounds float, float* %a, i64 %ind.sum
- %l1 = load float* %arr.idx, align 4
+ %l1 = load float, float* %arr.idx, align 4
%ind.sum2 = add i64 %iv, %offset2
%arr.idx2 = getelementptr inbounds float, float* %a, i64 %ind.sum2
- %l2 = load float* %arr.idx2, align 4
+ %l2 = load float, float* %arr.idx2, align 4
%m = fmul fast float %b, %l2
%ad = fadd fast float %l1, %m
store float %ad, float* %arr.idx, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-limit.ll b/llvm/test/Transforms/LoopVectorize/runtime-limit.ll
index 04b44cd9f86..6bc71e160cc 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-limit.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-limit.ll
@@ -14,18 +14,18 @@ entry:
for.body: ; preds = %for.body, %entry
%i.016 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.016
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %B, i64 %i.016
- %1 = load i32* %arrayidx1, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
%add = add nsw i32 %1, %0
%arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %i.016
- %2 = load i32* %arrayidx2, align 4
+ %2 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %add, %2
%arrayidx4 = getelementptr inbounds i32, i32* %E, i64 %i.016
- %3 = load i32* %arrayidx4, align 4
+ %3 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %add3, %3
%arrayidx6 = getelementptr inbounds i32, i32* %F, i64 %i.016
- %4 = load i32* %arrayidx6, align 4
+ %4 = load i32, i32* %arrayidx6, align 4
%add7 = add nsw i32 %add5, %4
%arrayidx8 = getelementptr inbounds i32, i32* %out, i64 %i.016
store i32 %add7, i32* %arrayidx8, align 4
@@ -48,29 +48,29 @@ entry:
for.body: ; preds = %for.body, %entry
%i.037 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.037
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %B, i64 %i.037
- %1 = load i32* %arrayidx1, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
%add = add nsw i32 %1, %0
%arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %i.037
- %2 = load i32* %arrayidx2, align 4
+ %2 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %add, %2
%arrayidx4 = getelementptr inbounds i32, i32* %E, i64 %i.037
- %3 = load i32* %arrayidx4, align 4
+ %3 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %add3, %3
%arrayidx6 = getelementptr inbounds i32, i32* %F, i64 %i.037
- %4 = load i32* %arrayidx6, align 4
+ %4 = load i32, i32* %arrayidx6, align 4
%add7 = add nsw i32 %add5, %4
%arrayidx8 = getelementptr inbounds i32, i32* %out, i64 %i.037
store i32 %add7, i32* %arrayidx8, align 4
- %5 = load i32* %arrayidx, align 4
- %6 = load i32* %arrayidx1, align 4
+ %5 = load i32, i32* %arrayidx, align 4
+ %6 = load i32, i32* %arrayidx1, align 4
%add11 = add nsw i32 %6, %5
- %7 = load i32* %arrayidx2, align 4
+ %7 = load i32, i32* %arrayidx2, align 4
%add13 = add nsw i32 %add11, %7
- %8 = load i32* %arrayidx4, align 4
+ %8 = load i32, i32* %arrayidx4, align 4
%add15 = add nsw i32 %add13, %8
- %9 = load i32* %arrayidx6, align 4
+ %9 = load i32, i32* %arrayidx6, align 4
%add17 = add nsw i32 %add15, %9
%arrayidx18 = getelementptr inbounds i32, i32* %out2, i64 %i.037
store i32 %add17, i32* %arrayidx18, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/safegep.ll b/llvm/test/Transforms/LoopVectorize/safegep.ll
index 1003759541e..ecef8138f0b 100644
--- a/llvm/test/Transforms/LoopVectorize/safegep.ll
+++ b/llvm/test/Transforms/LoopVectorize/safegep.ll
@@ -18,9 +18,9 @@ entry:
"<bb 3>":
%i_15 = phi i32 [ 0, %entry ], [ %i_19, %"<bb 3>" ]
%pp3 = getelementptr float, float* %A, i32 %i_15
- %D.1396_10 = load float* %pp3, align 4
+ %D.1396_10 = load float, float* %pp3, align 4
%pp24 = getelementptr float, float* %B, i32 %i_15
- %D.1398_15 = load float* %pp24, align 4
+ %D.1398_15 = load float, float* %pp24, align 4
%D.1399_17 = fadd float %D.1398_15, %K
%D.1400_18 = fmul float %D.1396_10, %D.1399_17
store float %D.1400_18, float* %pp3, align 4
@@ -44,9 +44,9 @@ entry:
"<bb 3>":
%i_15 = phi i32 [ 0, %entry ], [ %i_19, %"<bb 3>" ]
%pp3 = getelementptr float, float addrspace(5) * %A, i32 %i_15
- %D.1396_10 = load float addrspace(5) * %pp3, align 4
+ %D.1396_10 = load float, float addrspace(5) * %pp3, align 4
%pp24 = getelementptr float, float* %B, i32 %i_15
- %D.1398_15 = load float* %pp24, align 4
+ %D.1398_15 = load float, float* %pp24, align 4
%D.1399_17 = fadd float %D.1398_15, %K
%D.1400_18 = fmul float %D.1396_10, %D.1399_17
store float %D.1400_18, float addrspace(5) * %pp3, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/same-base-access.ll b/llvm/test/Transforms/LoopVectorize/same-base-access.ll
index fe94dd14b68..31cff0ee653 100644
--- a/llvm/test/Transforms/LoopVectorize/same-base-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/same-base-access.ll
@@ -26,33 +26,33 @@ define i32 @kernel11(double* %x, double* %y, i32 %n) nounwind uwtable ssp {
br label %4
; <label>:4 ; preds = %25, %0
- %5 = load i32* %k, align 4
- %6 = load i32* %3, align 4
+ %5 = load i32, i32* %k, align 4
+ %6 = load i32, i32* %3, align 4
%7 = icmp slt i32 %5, %6
br i1 %7, label %8, label %28
; <label>:8 ; preds = %4
- %9 = load i32* %k, align 4
+ %9 = load i32, i32* %k, align 4
%10 = sub nsw i32 %9, 1
%11 = sext i32 %10 to i64
- %12 = load double** %1, align 8
+ %12 = load double*, double** %1, align 8
%13 = getelementptr inbounds double, double* %12, i64 %11
- %14 = load double* %13, align 8
- %15 = load i32* %k, align 4
+ %14 = load double, double* %13, align 8
+ %15 = load i32, i32* %k, align 4
%16 = sext i32 %15 to i64
- %17 = load double** %2, align 8
+ %17 = load double*, double** %2, align 8
%18 = getelementptr inbounds double, double* %17, i64 %16
- %19 = load double* %18, align 8
+ %19 = load double, double* %18, align 8
%20 = fadd double %14, %19
- %21 = load i32* %k, align 4
+ %21 = load i32, i32* %k, align 4
%22 = sext i32 %21 to i64
- %23 = load double** %1, align 8
+ %23 = load double*, double** %1, align 8
%24 = getelementptr inbounds double, double* %23, i64 %22
store double %20, double* %24, align 8
br label %25
; <label>:25 ; preds = %8
- %26 = load i32* %k, align 4
+ %26 = load i32, i32* %k, align 4
%27 = add nsw i32 %26, 1
store i32 %27, i32* %k, align 4
br label %4
@@ -87,7 +87,7 @@ define i32 @func2(i32* nocapture %a) nounwind uwtable ssp {
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %7 ]
%2 = mul nsw i64 %indvars.iv, 7
%3 = getelementptr inbounds i32, i32* %a, i64 %2
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
%5 = icmp sgt i32 %4, 3
br i1 %5, label %6, label %7
diff --git a/llvm/test/Transforms/LoopVectorize/scalar-select.ll b/llvm/test/Transforms/LoopVectorize/scalar-select.ll
index b10e80e21ce..b17b2028821 100644
--- a/llvm/test/Transforms/LoopVectorize/scalar-select.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalar-select.ll
@@ -19,9 +19,9 @@ define void @example1(i1 %cond) nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%sel = select i1 %cond, i32 %6, i32 zeroinitializer
diff --git a/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll b/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
index 58abcb0d350..cfc1d6d40a7 100644
--- a/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
@@ -29,14 +29,14 @@ for.cond4.preheader: ; preds = %for.cond
br i1 %cmp514, label %for.cond7.preheader.lr.ph, label %for.end26
for.cond7.preheader.lr.ph: ; preds = %for.cond4.preheader
- %0 = load i32** @e, align 8, !tbaa !4
+ %0 = load i32*, i32** @e, align 8, !tbaa !4
br label %for.cond7.preheader
for.cond7.preheader: ; preds = %for.cond7.preheader.lr.ph, %for.inc23
%y.017 = phi i32 [ 0, %for.cond7.preheader.lr.ph ], [ %inc24, %for.inc23 ]
%i.116 = phi i32 [ 0, %for.cond7.preheader.lr.ph ], [ %i.2.lcssa, %for.inc23 ]
%n.015 = phi i32 [ undef, %for.cond7.preheader.lr.ph ], [ %inc25, %for.inc23 ]
- %1 = load i32* @b, align 4, !tbaa !5
+ %1 = load i32, i32* @b, align 4, !tbaa !5
%tobool11 = icmp eq i32 %1, 0
br i1 %tobool11, label %for.inc23, label %for.body8.lr.ph
@@ -63,7 +63,7 @@ for.body13: ; preds = %for.body13.lr.ph, %
%indvars.iv = phi i64 [ %3, %for.body13.lr.ph ], [ %indvars.iv.next, %for.body13 ]
%add.ptr.sum = add i64 %idx.ext, %indvars.iv
%arrayidx = getelementptr inbounds i32, i32* @a, i64 %add.ptr.sum
- %4 = load i32* %arrayidx, align 4, !tbaa !5
+ %4 = load i32, i32* %arrayidx, align 4, !tbaa !5
%arrayidx15 = getelementptr inbounds i32, i32* %0, i64 %indvars.iv
store i32 %4, i32* %arrayidx15, align 4, !tbaa !5
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -75,11 +75,11 @@ for.cond11.for.inc19_crit_edge: ; preds = %for.body13
br label %for.inc19
for.inc19: ; preds = %for.cond11.for.inc19_crit_edge, %for.body8
- %6 = load i32* @c, align 4, !tbaa !5
+ %6 = load i32, i32* @c, align 4, !tbaa !5
%inc20 = add nsw i32 %6, 1
store i32 %inc20, i32* @c, align 4, !tbaa !5
%indvars.iv.next20 = add i64 %indvars.iv19, 1
- %7 = load i32* @b, align 4, !tbaa !5
+ %7 = load i32, i32* @b, align 4, !tbaa !5
%tobool = icmp eq i32 %7, 0
br i1 %tobool, label %for.cond7.for.inc23_crit_edge, label %for.body8
diff --git a/llvm/test/Transforms/LoopVectorize/simple-unroll.ll b/llvm/test/Transforms/LoopVectorize/simple-unroll.ll
index c925a053c13..fc996ed3818 100644
--- a/llvm/test/Transforms/LoopVectorize/simple-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/simple-unroll.ll
@@ -24,7 +24,7 @@ define void @inc(i32 %n) nounwind uwtable noinline ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = trunc i64 %indvars.iv to i32
%5 = add nsw i32 %3, %4
store i32 %5, i32* %2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/small-loop.ll b/llvm/test/Transforms/LoopVectorize/small-loop.ll
index bc6e21f8118..ce606d1b360 100644
--- a/llvm/test/Transforms/LoopVectorize/small-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/small-loop.ll
@@ -16,9 +16,9 @@ define void @example1() nounwind uwtable ssp {
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %5, %3
%7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/start-non-zero.ll b/llvm/test/Transforms/LoopVectorize/start-non-zero.ll
index 12c15a9b3d8..b444a692319 100644
--- a/llvm/test/Transforms/LoopVectorize/start-non-zero.ll
+++ b/llvm/test/Transforms/LoopVectorize/start-non-zero.ll
@@ -18,7 +18,7 @@ for.body.lr.ph: ; preds = %entry
for.body: ; preds = %for.body.lr.ph, %for.body
%indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%mul = mul nuw i32 %1, 333
store i32 %mul, i32* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll b/llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll
index 2512d4f2f41..4d62df15717 100644
--- a/llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll
+++ b/llvm/test/Transforms/LoopVectorize/store-shuffle-bug.ll
@@ -32,12 +32,12 @@ for.body:
%0 = add i64 %indvars.iv, 1
%arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @uf, i64 0, i64 %0
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* @xi, i64 0, i64 %0
- %1 = load i32* %arrayidx3, align 4
- %2 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx, align 4
%add4 = add nsw i32 %2, %1
store i32 %add4, i32* %arrayidx, align 4
%arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* @q, i64 0, i64 %0
- %3 = load i32* %arrayidx7, align 4
+ %3 = load i32, i32* %arrayidx7, align 4
%add8 = add nsw i32 %add4, %3
store i32 %add8, i32* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, -1
diff --git a/llvm/test/Transforms/LoopVectorize/struct_access.ll b/llvm/test/Transforms/LoopVectorize/struct_access.ll
index 0d68e2d59a1..1e4019a7810 100644
--- a/llvm/test/Transforms/LoopVectorize/struct_access.ll
+++ b/llvm/test/Transforms/LoopVectorize/struct_access.ll
@@ -33,7 +33,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%x = getelementptr inbounds %struct.coordinate, %struct.coordinate* %A, i64 %indvars.iv, i32 0
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%add = add nsw i32 %0, %sum.05
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -75,7 +75,7 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%x = getelementptr inbounds %struct.lit, %struct.lit* %A, i64 %indvars.iv, i32 0
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%add = add nsw i32 %0, %sum.05
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopVectorize/tbaa-nodep.ll b/llvm/test/Transforms/LoopVectorize/tbaa-nodep.ll
index 8469a9d95c6..06d00023002 100644
--- a/llvm/test/Transforms/LoopVectorize/tbaa-nodep.ll
+++ b/llvm/test/Transforms/LoopVectorize/tbaa-nodep.ll
@@ -11,7 +11,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float, float* %arrayidx, align 4, !tbaa !0
%conv = fptosi float %0 to i32
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %conv, i32* %arrayidx2, align 4, !tbaa !4
@@ -30,7 +30,7 @@ for.end: ; preds = %for.body
; CHECK-NEXT: br label %vector.body
; CHECK: vector.body:
-; CHECK: load <4 x float>* %{{.*}}, align 4, !tbaa
+; CHECK: load <4 x float>, <4 x float>* %{{.*}}, align 4, !tbaa
; CHECK: store <4 x i32> %{{.*}}, <4 x i32>* %{{.*}}, align 4, !tbaa
; CHECK: ret i32 0
@@ -38,7 +38,7 @@ for.end: ; preds = %for.body
; CHECK-NOTBAA-LABEL: @test1
; CHECK-NOTBAA: icmp uge i32*
-; CHECK-NOTBAA: load <4 x float>* %{{.*}}, align 4, !tbaa
+; CHECK-NOTBAA: load <4 x float>, <4 x float>* %{{.*}}, align 4, !tbaa
; CHECK-NOTBAA: store <4 x i32> %{{.*}}, <4 x i32>* %{{.*}}, align 4, !tbaa
; CHECK-NOTBAA: ret i32 0
@@ -52,9 +52,9 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4, !tbaa !0
+ %0 = load float, float* %arrayidx, align 4, !tbaa !0
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx2, align 4, !tbaa !4
+ %1 = load i32, i32* %arrayidx2, align 4, !tbaa !4
%conv = sitofp i32 %1 to float
%mul = fmul float %0, %conv
%arrayidx4 = getelementptr inbounds float, float* %c, i64 %indvars.iv
@@ -74,7 +74,7 @@ for.end: ; preds = %for.body
; CHECK: icmp uge float*
; CHECK-NOT: icmp uge i32*
-; CHECK: load <4 x float>* %{{.*}}, align 4, !tbaa
+; CHECK: load <4 x float>, <4 x float>* %{{.*}}, align 4, !tbaa
; CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 4, !tbaa
; CHECK: ret i32 0
@@ -85,7 +85,7 @@ for.end: ; preds = %for.body
; CHECK-NOTBAA-DAG: icmp uge float*
; CHECK-NOTBAA-DAG: icmp uge i32*
-; CHECK-NOTBAA: load <4 x float>* %{{.*}}, align 4, !tbaa
+; CHECK-NOTBAA: load <4 x float>, <4 x float>* %{{.*}}, align 4, !tbaa
; CHECK-NOTBAA: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 4, !tbaa
; CHECK-NOTBAA: ret i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/unroll_novec.ll b/llvm/test/Transforms/LoopVectorize/unroll_novec.ll
index fc23d3d98ef..c23ad77a31f 100644
--- a/llvm/test/Transforms/LoopVectorize/unroll_novec.ll
+++ b/llvm/test/Transforms/LoopVectorize/unroll_novec.ll
@@ -10,11 +10,11 @@ target triple = "x86_64-apple-macosx10.8.0"
; a[i] += i;
; }
;CHECK-LABEL: @inc(
-;CHECK: load i32*
-;CHECK: load i32*
-;CHECK: load i32*
-;CHECK: load i32*
-;CHECK-NOT: load i32*
+;CHECK: load i32, i32*
+;CHECK: load i32, i32*
+;CHECK: load i32, i32*
+;CHECK: load i32, i32*
+;CHECK-NOT: load i32, i32*
;CHECK: add nsw i32
;CHECK: add nsw i32
;CHECK: add nsw i32
@@ -34,7 +34,7 @@ define void @inc(i32 %n) nounwind uwtable noinline ssp {
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = trunc i64 %indvars.iv to i32
%5 = add nsw i32 %3, %4
store i32 %5, i32* %2, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/value-ptr-bug.ll b/llvm/test/Transforms/LoopVectorize/value-ptr-bug.ll
index 4fb0318b3d5..ce4601f7b92 100644
--- a/llvm/test/Transforms/LoopVectorize/value-ptr-bug.ll
+++ b/llvm/test/Transforms/LoopVectorize/value-ptr-bug.ll
@@ -38,7 +38,7 @@ do.body272:
%sp.4 = phi i8* [ %tmp30, %block1 ], [ %incdec.ptr273, %do.body272 ]
%dp.addr.4 = phi i8* [ %tmp29, %block1 ], [ %incdec.ptr274, %do.body272 ]
%incdec.ptr273 = getelementptr inbounds i8, i8* %sp.4, i64 1
- %tmp31 = load i8* %sp.4, align 1
+ %tmp31 = load i8, i8* %sp.4, align 1
%incdec.ptr274 = getelementptr inbounds i8, i8* %dp.addr.4, i64 1
store i8 %tmp31, i8* %dp.addr.4, align 1
%dec = add i32 %row_width.5, -1
diff --git a/llvm/test/Transforms/LoopVectorize/vect.omp.persistence.ll b/llvm/test/Transforms/LoopVectorize/vect.omp.persistence.ll
index 4ad34ada207..0d7f8b9398f 100644
--- a/llvm/test/Transforms/LoopVectorize/vect.omp.persistence.ll
+++ b/llvm/test/Transforms/LoopVectorize/vect.omp.persistence.ll
@@ -46,10 +46,10 @@ for.body:
%0 = add nsw i64 %indvars.iv, -5
%arrayidx = getelementptr inbounds float, float* %a, i64 %0
- %1 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
+ %1 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
%2 = add nsw i64 %indvars.iv, 2
%arrayidx2 = getelementptr inbounds float, float* %a, i64 %2
- %3 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
+ %3 = load float, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
%mul = fmul float %1, %3
%arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvars.iv
store float %mul, float* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !1
diff --git a/llvm/test/Transforms/LoopVectorize/vect.stats.ll b/llvm/test/Transforms/LoopVectorize/vect.stats.ll
index 4a748f28b7a..7bf5a608484 100644
--- a/llvm/test/Transforms/LoopVectorize/vect.stats.ll
+++ b/llvm/test/Transforms/LoopVectorize/vect.stats.ll
@@ -21,7 +21,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv2 = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv2
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%mul = fmul float %0, %0
store float %mul, float* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv2, 1
@@ -43,10 +43,10 @@ for.body: ; preds = %entry, %for.body
%indvars.iv2 = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%0 = add nsw i64 %indvars.iv2, -5
%arrayidx = getelementptr inbounds float, float* %a, i64 %0
- %1 = load float* %arrayidx, align 4
+ %1 = load float, float* %arrayidx, align 4
%2 = add nsw i64 %indvars.iv2, 2
%arrayidx2 = getelementptr inbounds float, float* %a, i64 %2
- %3 = load float* %arrayidx2, align 4
+ %3 = load float, float* %arrayidx2, align 4
%mul = fmul float %1, %3
%arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvars.iv2
store float %mul, float* %arrayidx4, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-once.ll b/llvm/test/Transforms/LoopVectorize/vectorize-once.ll
index f5f39cc1f5b..5d0e96b2c5a 100644
--- a/llvm/test/Transforms/LoopVectorize/vectorize-once.ll
+++ b/llvm/test/Transforms/LoopVectorize/vectorize-once.ll
@@ -29,7 +29,7 @@ entry:
for.body.i: ; preds = %entry, %for.body.i
%__init.addr.05.i = phi i32 [ %add.i, %for.body.i ], [ 0, %entry ]
%__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
- %0 = load i32* %__first.addr.04.i, align 4
+ %0 = load i32, i32* %__first.addr.04.i, align 4
%add.i = add nsw i32 %0, %__init.addr.05.i
%incdec.ptr.i = getelementptr inbounds i32, i32* %__first.addr.04.i, i64 1
%cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
@@ -55,7 +55,7 @@ entry:
for.body.i: ; preds = %entry, %for.body.i
%__init.addr.05.i = phi i32 [ %add.i, %for.body.i ], [ 0, %entry ]
%__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
- %0 = load i32* %__first.addr.04.i, align 4
+ %0 = load i32, i32* %__first.addr.04.i, align 4
%add.i = add nsw i32 %0, %__init.addr.05.i
%incdec.ptr.i = getelementptr inbounds i32, i32* %__first.addr.04.i, i64 1
%cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
diff --git a/llvm/test/Transforms/LoopVectorize/version-mem-access.ll b/llvm/test/Transforms/LoopVectorize/version-mem-access.ll
index 1a0b81ce9ea..a9d319e5a2d 100644
--- a/llvm/test/Transforms/LoopVectorize/version-mem-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/version-mem-access.ll
@@ -35,10 +35,10 @@ for.body:
%mul = mul i32 %iv.trunc, %BStride
%mul64 = zext i32 %mul to i64
%arrayidx = getelementptr inbounds i32, i32* %B, i64 %mul64
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%mul2 = mul nsw i64 %indvars.iv, %CStride
%arrayidx3 = getelementptr inbounds i32, i32* %C, i64 %mul2
- %1 = load i32* %arrayidx3, align 4
+ %1 = load i32, i32* %arrayidx3, align 4
%mul4 = mul nsw i32 %1, %0
%mul3 = mul nsw i64 %indvars.iv, %AStride
%arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %mul3
@@ -77,7 +77,7 @@ for.body:
%mul = mul nsw i32 %0, %conv
%idxprom = sext i32 %mul to i64
%arrayidx = getelementptr inbounds double, double* %x, i64 %idxprom
- %1 = load double* %arrayidx, align 8
+ %1 = load double, double* %arrayidx, align 8
%arrayidx3 = getelementptr inbounds double, double* %c, i64 %indvars.iv
store double %1, double* %arrayidx3, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopVectorize/write-only.ll b/llvm/test/Transforms/LoopVectorize/write-only.ll
index cd1b33017af..b2bc0458a23 100644
--- a/llvm/test/Transforms/LoopVectorize/write-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/write-only.ll
@@ -13,7 +13,7 @@ define i32 @read_mod_write_single_ptr(float* nocapture %a, i32 %n) nounwind uwta
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
- %3 = load float* %2, align 4
+ %3 = load float, float* %2, align 4
%4 = fmul float %3, 3.000000e+00
store float %4, float* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LowerAtomic/atomic-swap.ll b/llvm/test/Transforms/LowerAtomic/atomic-swap.ll
index cb1124136f8..77000527a11 100644
--- a/llvm/test/Transforms/LowerAtomic/atomic-swap.ll
+++ b/llvm/test/Transforms/LowerAtomic/atomic-swap.ll
@@ -5,7 +5,7 @@ define i8 @cmpswap() {
%i = alloca i8
%pair = cmpxchg i8* %i, i8 0, i8 42 monotonic monotonic
%j = extractvalue { i8, i1 } %pair, 0
-; CHECK: [[OLDVAL:%[a-z0-9]+]] = load i8* [[ADDR:%[a-z0-9]+]]
+; CHECK: [[OLDVAL:%[a-z0-9]+]] = load i8, i8* [[ADDR:%[a-z0-9]+]]
; CHECK-NEXT: [[SAME:%[a-z0-9]+]] = icmp eq i8 [[OLDVAL]], 0
; CHECK-NEXT: [[TO_STORE:%[a-z0-9]+]] = select i1 [[SAME]], i8 42, i8 [[OLDVAL]]
; CHECK-NEXT: store i8 [[TO_STORE]], i8* [[ADDR]]
diff --git a/llvm/test/Transforms/LowerBitSets/simple.ll b/llvm/test/Transforms/LowerBitSets/simple.ll
index 79e01b9f7f9..704ab720cbf 100644
--- a/llvm/test/Transforms/LowerBitSets/simple.ll
+++ b/llvm/test/Transforms/LowerBitSets/simple.ll
@@ -61,7 +61,7 @@ define i1 @foo(i32* %p) {
; CHECK: [[R8:%[^ ]*]] = lshr i32 [[R5]], 5
; CHECK: [[R9:%[^ ]*]] = getelementptr i32, i32* bitcast ([9 x i8]* @bitset1.bits to i32*), i32 [[R8]]
- ; CHECK: [[R10:%[^ ]*]] = load i32* [[R9]]
+ ; CHECK: [[R10:%[^ ]*]] = load i32, i32* [[R9]]
; CHECK: [[R11:%[^ ]*]] = and i32 [[R5]], 31
; CHECK: [[R12:%[^ ]*]] = shl i32 1, [[R11]]
; CHECK: [[R13:%[^ ]*]] = and i32 [[R10]], [[R12]]
@@ -107,7 +107,7 @@ define i1 @baz(i32* %p) {
; CHECK: [[T8:%[^ ]*]] = lshr i32 [[T5]], 5
; CHECK: [[T9:%[^ ]*]] = getelementptr i32, i32* bitcast ([9 x i8]* @bitset3.bits to i32*), i32 [[T8]]
- ; CHECK: [[T10:%[^ ]*]] = load i32* [[T9]]
+ ; CHECK: [[T10:%[^ ]*]] = load i32, i32* [[T9]]
; CHECK: [[T11:%[^ ]*]] = and i32 [[T5]], 31
; CHECK: [[T12:%[^ ]*]] = shl i32 1, [[T11]]
; CHECK: [[T13:%[^ ]*]] = and i32 [[T10]], [[T12]]
diff --git a/llvm/test/Transforms/LowerExpectIntrinsic/basic.ll b/llvm/test/Transforms/LowerExpectIntrinsic/basic.ll
index f4326c8393c..5d723710be0 100644
--- a/llvm/test/Transforms/LowerExpectIntrinsic/basic.ll
+++ b/llvm/test/Transforms/LowerExpectIntrinsic/basic.ll
@@ -7,7 +7,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%cmp = icmp sgt i32 %tmp, 1
%conv = zext i1 %cmp to i32
%conv1 = sext i32 %conv to i64
@@ -27,7 +27,7 @@ if.end: ; preds = %entry
br label %return
return: ; preds = %if.end, %if.then
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -41,7 +41,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%conv = sext i32 %tmp to i64
%expval = call i64 @llvm.expect.i64(i64 %conv, i64 1)
%tobool = icmp ne i64 %expval, 0
@@ -59,7 +59,7 @@ if.end: ; preds = %entry
br label %return
return: ; preds = %if.end, %if.then
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -69,7 +69,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%tobool = icmp ne i32 %tmp, 0
%lnot = xor i1 %tobool, true
%lnot.ext = zext i1 %lnot to i32
@@ -90,7 +90,7 @@ if.end: ; preds = %entry
br label %return
return: ; preds = %if.end, %if.then
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -100,7 +100,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%tobool = icmp ne i32 %tmp, 0
%lnot = xor i1 %tobool, true
%lnot1 = xor i1 %lnot, true
@@ -122,7 +122,7 @@ if.end: ; preds = %entry
br label %return
return: ; preds = %if.end, %if.then
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -132,7 +132,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%cmp = icmp slt i32 %tmp, 0
%conv = zext i1 %cmp to i32
%conv1 = sext i32 %conv to i64
@@ -152,7 +152,7 @@ if.end: ; preds = %entry
br label %return
return: ; preds = %if.end, %if.then
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -162,7 +162,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%conv = sext i32 %tmp to i64
%expval = call i64 @llvm.expect.i64(i64 %conv, i64 1)
; CHECK: !prof !2
@@ -181,7 +181,7 @@ sw.epilog: ; preds = %entry
br label %return
return: ; preds = %sw.epilog, %sw.bb
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -191,7 +191,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%conv = sext i32 %tmp to i64
%expval = call i64 @llvm.expect.i64(i64 %conv, i64 1)
; CHECK: !prof !3
@@ -202,7 +202,7 @@ entry:
]
sw.bb: ; preds = %entry, %entry
- %tmp1 = load i32* %x.addr, align 4
+ %tmp1 = load i32, i32* %x.addr, align 4
store i32 %tmp1, i32* %retval
br label %return
@@ -211,7 +211,7 @@ sw.epilog: ; preds = %entry
br label %return
return: ; preds = %sw.epilog, %sw.bb
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -221,7 +221,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%cmp = icmp sgt i32 %tmp, 1
%conv = zext i1 %cmp to i32
%expval = call i32 @llvm.expect.i32(i32 %conv, i32 1)
@@ -240,7 +240,7 @@ if.end: ; preds = %entry
br label %return
return: ; preds = %if.end, %if.then
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -252,7 +252,7 @@ entry:
%retval = alloca i32, align 4
%x.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
- %tmp = load i32* %x.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
%cmp = icmp sgt i32 %tmp, 1
%expval = call i1 @llvm.expect.i1(i1 %cmp, i1 1)
; CHECK: !prof !0
@@ -269,7 +269,7 @@ if.end: ; preds = %entry
br label %return
return: ; preds = %if.end, %if.then
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
diff --git a/llvm/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll b/llvm/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll
index 3673c04a8b8..22173b492c4 100644
--- a/llvm/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll
+++ b/llvm/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll
@@ -6,7 +6,7 @@ entry:
%retval = alloca i32, align 4
%a.addr = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
- %0 = load i32* %a.addr, align 4
+ %0 = load i32, i32* %a.addr, align 4
switch i32 %0, label %sw.default [
i32 0, label %sw.bb
i32 1, label %sw.bb1
diff --git a/llvm/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll b/llvm/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll
index ecdd767bdea..2652a6cc5e0 100644
--- a/llvm/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll
+++ b/llvm/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll
@@ -21,7 +21,7 @@ define i32 @foo(i32 %a) {
%1 = alloca i32, align 4
%2 = alloca i32, align 4
store i32 %a, i32* %2, align 4
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
switch i32 %3, label %6 [
i32 0, label %4
i32 1, label %5
@@ -39,6 +39,6 @@ define i32 @foo(i32 %a) {
unreachable
; <label>:7
- %8 = load i32* %1
+ %8 = load i32, i32* %1
ret i32 %8
}
diff --git a/llvm/test/Transforms/Mem2Reg/2002-03-28-UninitializedVal.ll b/llvm/test/Transforms/Mem2Reg/2002-03-28-UninitializedVal.ll
index 777f3757bb8..49b560551d4 100644
--- a/llvm/test/Transforms/Mem2Reg/2002-03-28-UninitializedVal.ll
+++ b/llvm/test/Transforms/Mem2Reg/2002-03-28-UninitializedVal.ll
@@ -6,6 +6,6 @@
define i32 @test() {
; To be promoted
%X = alloca i32 ; <i32*> [#uses=1]
- %Y = load i32* %X ; <i32> [#uses=1]
+ %Y = load i32, i32* %X ; <i32> [#uses=1]
ret i32 %Y
}
diff --git a/llvm/test/Transforms/Mem2Reg/2003-04-24-MultipleIdenticalSuccessors.ll b/llvm/test/Transforms/Mem2Reg/2003-04-24-MultipleIdenticalSuccessors.ll
index f5f1ee34365..a013ff401f6 100644
--- a/llvm/test/Transforms/Mem2Reg/2003-04-24-MultipleIdenticalSuccessors.ll
+++ b/llvm/test/Transforms/Mem2Reg/2003-04-24-MultipleIdenticalSuccessors.ll
@@ -10,7 +10,7 @@ B2: ; preds = %0
store i32 2, i32* %X
br i1 %c2, label %Exit, label %Exit
Exit: ; preds = %B2, %B2, %0
- %Y = load i32* %X ; <i32> [#uses=1]
+ %Y = load i32, i32* %X ; <i32> [#uses=1]
ret i32 %Y
}
diff --git a/llvm/test/Transforms/Mem2Reg/2003-06-26-IterativePromote.ll b/llvm/test/Transforms/Mem2Reg/2003-06-26-IterativePromote.ll
index e82caa9fe0f..de7280e4d8e 100644
--- a/llvm/test/Transforms/Mem2Reg/2003-06-26-IterativePromote.ll
+++ b/llvm/test/Transforms/Mem2Reg/2003-06-26-IterativePromote.ll
@@ -7,10 +7,10 @@ define i32 @test2() {
%p = alloca i32* ; <i32**> [#uses=2]
store i32 0, i32* %a
store i32* %a, i32** %p
- %tmp.0 = load i32** %p ; <i32*> [#uses=1]
- %tmp.1 = load i32* %tmp.0 ; <i32> [#uses=1]
+ %tmp.0 = load i32*, i32** %p ; <i32*> [#uses=1]
+ %tmp.1 = load i32, i32* %tmp.0 ; <i32> [#uses=1]
store i32 %tmp.1, i32* %result
- %tmp.2 = load i32* %result ; <i32> [#uses=1]
+ %tmp.2 = load i32, i32* %result ; <i32> [#uses=1]
ret i32 %tmp.2
}
diff --git a/llvm/test/Transforms/Mem2Reg/2003-10-05-DeadPHIInsertion.ll b/llvm/test/Transforms/Mem2Reg/2003-10-05-DeadPHIInsertion.ll
index 1d38efc7457..8d55a1d0fa1 100644
--- a/llvm/test/Transforms/Mem2Reg/2003-10-05-DeadPHIInsertion.ll
+++ b/llvm/test/Transforms/Mem2Reg/2003-10-05-DeadPHIInsertion.ll
@@ -9,11 +9,11 @@ define void @test(i32 %B, i1 %C) {
br i1 %C, label %L1, label %L2
L1: ; preds = %0
store i32 %B, i32* %A
- %D = load i32* %A ; <i32> [#uses=1]
+ %D = load i32, i32* %A ; <i32> [#uses=1]
call void @test( i32 %D, i1 false )
br label %L3
L2: ; preds = %0
- %E = load i32* %A ; <i32> [#uses=1]
+ %E = load i32, i32* %A ; <i32> [#uses=1]
call void @test( i32 %E, i1 true )
br label %L3
L3: ; preds = %L2, %L1
diff --git a/llvm/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll b/llvm/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll
index b064b13ac8b..f0f1fdc7ee2 100644
--- a/llvm/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll
+++ b/llvm/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll
@@ -14,31 +14,31 @@ entry:
store i32 0, i32* %i
br label %loopentry
loopentry: ; preds = %endif, %entry
- %tmp.0 = load i32* %n_addr ; <i32> [#uses=1]
+ %tmp.0 = load i32, i32* %n_addr ; <i32> [#uses=1]
%tmp.1 = add i32 %tmp.0, 1 ; <i32> [#uses=1]
- %tmp.2 = load i32* %i ; <i32> [#uses=1]
+ %tmp.2 = load i32, i32* %i ; <i32> [#uses=1]
%tmp.3 = icmp sgt i32 %tmp.1, %tmp.2 ; <i1> [#uses=2]
%tmp.4 = zext i1 %tmp.3 to i32 ; <i32> [#uses=0]
br i1 %tmp.3, label %no_exit, label %return
no_exit: ; preds = %loopentry
- %tmp.5 = load i32* %undef ; <i32> [#uses=1]
+ %tmp.5 = load i32, i32* %undef ; <i32> [#uses=1]
store i32 %tmp.5, i32* %out
store i32 0, i32* %undef
- %tmp.6 = load i32* %i ; <i32> [#uses=1]
+ %tmp.6 = load i32, i32* %i ; <i32> [#uses=1]
%tmp.7 = icmp sgt i32 %tmp.6, 0 ; <i1> [#uses=2]
%tmp.8 = zext i1 %tmp.7 to i32 ; <i32> [#uses=0]
br i1 %tmp.7, label %then, label %endif
then: ; preds = %no_exit
- %tmp.9 = load i8** %p_addr ; <i8*> [#uses=1]
- %tmp.10 = load i32* %i ; <i32> [#uses=1]
+ %tmp.9 = load i8*, i8** %p_addr ; <i8*> [#uses=1]
+ %tmp.10 = load i32, i32* %i ; <i32> [#uses=1]
%tmp.11 = sub i32 %tmp.10, 1 ; <i32> [#uses=1]
%tmp.12 = getelementptr i8, i8* %tmp.9, i32 %tmp.11 ; <i8*> [#uses=1]
- %tmp.13 = load i32* %out ; <i32> [#uses=1]
+ %tmp.13 = load i32, i32* %out ; <i32> [#uses=1]
%tmp.14 = trunc i32 %tmp.13 to i8 ; <i8> [#uses=1]
store i8 %tmp.14, i8* %tmp.12
br label %endif
endif: ; preds = %then, %no_exit
- %tmp.15 = load i32* %i ; <i32> [#uses=1]
+ %tmp.15 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp.15, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %loopentry
diff --git a/llvm/test/Transforms/Mem2Reg/2005-11-28-Crash.ll b/llvm/test/Transforms/Mem2Reg/2005-11-28-Crash.ll
index 8fd3351ba42..4b1d7f6651f 100644
--- a/llvm/test/Transforms/Mem2Reg/2005-11-28-Crash.ll
+++ b/llvm/test/Transforms/Mem2Reg/2005-11-28-Crash.ll
@@ -41,7 +41,7 @@ endif.3: ; preds = %then.3, %endif.1
loopexit: ; preds = %loopentry
br label %endif.4
then.4: ; No predecessors!
- %tmp.61 = load i32* %flags ; <i32> [#uses=0]
+ %tmp.61 = load i32, i32* %flags ; <i32> [#uses=0]
br label %out
dead_block_after_goto: ; No predecessors!
br label %endif.4
diff --git a/llvm/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll b/llvm/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll
index ea581d197e5..812b8b6b5e5 100644
--- a/llvm/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll
+++ b/llvm/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll
@@ -22,7 +22,7 @@ entry:
br i1 %toBool, label %bb, label %bb5
bb: ; preds = %entry
- %tmp4 = load volatile i32* %v, align 4 ; <i32> [#uses=1]
+ %tmp4 = load volatile i32, i32* %v, align 4 ; <i32> [#uses=1]
store i32 %tmp4, i32* %tmp, align 4
br label %bb6
@@ -33,12 +33,12 @@ bb5: ; preds = %entry
br label %bb6
bb6: ; preds = %bb5, %bb
- %tmp7 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
store i32 %tmp7, i32* %retval, align 4
br label %return
return: ; preds = %bb6
- %retval8 = load i32* %retval ; <i32> [#uses=1]
+ %retval8 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval8
}
diff --git a/llvm/test/Transforms/Mem2Reg/ConvertDebugInfo.ll b/llvm/test/Transforms/Mem2Reg/ConvertDebugInfo.ll
index a7369c0902b..16067f573dd 100644
--- a/llvm/test/Transforms/Mem2Reg/ConvertDebugInfo.ll
+++ b/llvm/test/Transforms/Mem2Reg/ConvertDebugInfo.ll
@@ -15,18 +15,18 @@ entry:
store i32 %i, i32* %i_addr
call void @llvm.dbg.declare(metadata double* %j_addr, metadata !9, metadata !{}), !dbg !8
store double %j, double* %j_addr
- %1 = load i32* %i_addr, align 4, !dbg !10 ; <i32> [#uses=1]
+ %1 = load i32, i32* %i_addr, align 4, !dbg !10 ; <i32> [#uses=1]
%2 = add nsw i32 %1, 1, !dbg !10 ; <i32> [#uses=1]
%3 = sitofp i32 %2 to double, !dbg !10 ; <double> [#uses=1]
- %4 = load double* %j_addr, align 8, !dbg !10 ; <double> [#uses=1]
+ %4 = load double, double* %j_addr, align 8, !dbg !10 ; <double> [#uses=1]
%5 = fadd double %3, %4, !dbg !10 ; <double> [#uses=1]
store double %5, double* %0, align 8, !dbg !10
- %6 = load double* %0, align 8, !dbg !10 ; <double> [#uses=1]
+ %6 = load double, double* %0, align 8, !dbg !10 ; <double> [#uses=1]
store double %6, double* %retval, align 8, !dbg !10
br label %return, !dbg !10
return: ; preds = %entry
- %retval1 = load double* %retval, !dbg !10 ; <double> [#uses=1]
+ %retval1 = load double, double* %retval, !dbg !10 ; <double> [#uses=1]
ret double %retval1, !dbg !10
}
diff --git a/llvm/test/Transforms/Mem2Reg/ConvertDebugInfo2.ll b/llvm/test/Transforms/Mem2Reg/ConvertDebugInfo2.ll
index 76d2a1aa7d2..b8543bc693c 100644
--- a/llvm/test/Transforms/Mem2Reg/ConvertDebugInfo2.ll
+++ b/llvm/test/Transforms/Mem2Reg/ConvertDebugInfo2.ll
@@ -13,16 +13,16 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
call void @llvm.dbg.declare(metadata i32* %a_addr, metadata !0, metadata !{}), !dbg !7
store i32 %a, i32* %a_addr
- %0 = load i32* %a_addr, align 4, !dbg !8 ; <i32> [#uses=1]
+ %0 = load i32, i32* %a_addr, align 4, !dbg !8 ; <i32> [#uses=1]
call void @llvm.dbg.declare(metadata i32* %x_addr.i, metadata !9, metadata !{}) nounwind, !dbg !15
store i32 %0, i32* %x_addr.i
call void @llvm.dbg.declare(metadata i64* %y_addr.i, metadata !16, metadata !{}) nounwind, !dbg !15
store i64 55, i64* %y_addr.i
call void @llvm.dbg.declare(metadata i8** %z_addr.i, metadata !17, metadata !{}) nounwind, !dbg !15
store i8* bitcast (void (i32)* @baz to i8*), i8** %z_addr.i
- %1 = load i32* %x_addr.i, align 4, !dbg !18 ; <i32> [#uses=1]
- %2 = load i64* %y_addr.i, align 8, !dbg !18 ; <i64> [#uses=1]
- %3 = load i8** %z_addr.i, align 8, !dbg !18 ; <i8*> [#uses=1]
+ %1 = load i32, i32* %x_addr.i, align 4, !dbg !18 ; <i32> [#uses=1]
+ %2 = load i64, i64* %y_addr.i, align 8, !dbg !18 ; <i64> [#uses=1]
+ %3 = load i8*, i8** %z_addr.i, align 8, !dbg !18 ; <i8*> [#uses=1]
call void @foo(i32 %1, i64 %2, i8* %3) nounwind, !dbg !18
br label %return, !dbg !19
diff --git a/llvm/test/Transforms/Mem2Reg/PromoteMemToRegister.ll b/llvm/test/Transforms/Mem2Reg/PromoteMemToRegister.ll
index 1be6b03beec..b7f39947afb 100644
--- a/llvm/test/Transforms/Mem2Reg/PromoteMemToRegister.ll
+++ b/llvm/test/Transforms/Mem2Reg/PromoteMemToRegister.ll
@@ -6,12 +6,12 @@ define double @testfunc(i32 %i, double %j) {
%J = alloca double ; <double*> [#uses=2]
store i32 %i, i32* %I
store double %j, double* %J
- %t1 = load i32* %I ; <i32> [#uses=1]
+ %t1 = load i32, i32* %I ; <i32> [#uses=1]
%t2 = add i32 %t1, 1 ; <i32> [#uses=1]
store i32 %t2, i32* %I
- %t3 = load i32* %I ; <i32> [#uses=1]
+ %t3 = load i32, i32* %I ; <i32> [#uses=1]
%t4 = sitofp i32 %t3 to double ; <double> [#uses=1]
- %t5 = load double* %J ; <double> [#uses=1]
+ %t5 = load double, double* %J ; <double> [#uses=1]
%t6 = fmul double %t4, %t5 ; <double> [#uses=1]
ret double %t6
}
diff --git a/llvm/test/Transforms/Mem2Reg/UndefValuesMerge.ll b/llvm/test/Transforms/Mem2Reg/UndefValuesMerge.ll
index 5013229b77f..eeeb72f8e5a 100644
--- a/llvm/test/Transforms/Mem2Reg/UndefValuesMerge.ll
+++ b/llvm/test/Transforms/Mem2Reg/UndefValuesMerge.ll
@@ -7,7 +7,7 @@ T: ; preds = %0
store i32 %i, i32* %I
br label %Cont
Cont: ; preds = %T, %0
- %Y = load i32* %I ; <i32> [#uses=1]
+ %Y = load i32, i32* %I ; <i32> [#uses=1]
ret i32 %Y
}
diff --git a/llvm/test/Transforms/Mem2Reg/atomic.ll b/llvm/test/Transforms/Mem2Reg/atomic.ll
index 5bc9e9281b2..f20043d9716 100644
--- a/llvm/test/Transforms/Mem2Reg/atomic.ll
+++ b/llvm/test/Transforms/Mem2Reg/atomic.ll
@@ -7,6 +7,6 @@ define i32 @test1(i32 %x) {
; CHECK: ret i32 %x
%a = alloca i32
store atomic i32 %x, i32* %a seq_cst, align 4
- %r = load atomic i32* %a seq_cst, align 4
+ %r = load atomic i32, i32* %a seq_cst, align 4
ret i32 %r
}
diff --git a/llvm/test/Transforms/Mem2Reg/crash.ll b/llvm/test/Transforms/Mem2Reg/crash.ll
index 59e2c0b3108..a4a31b112ee 100644
--- a/llvm/test/Transforms/Mem2Reg/crash.ll
+++ b/llvm/test/Transforms/Mem2Reg/crash.ll
@@ -14,7 +14,7 @@ invcont2:
br label %bb15
bb15:
- %B = load i32* %whichFlag
+ %B = load i32, i32* %whichFlag
ret i32 %B
lpad86:
@@ -33,11 +33,11 @@ entry:
br label %bb15
bb15:
- %B = load i32* %whichFlag
+ %B = load i32, i32* %whichFlag
ret i32 %B
invcont2:
- %C = load i32* %whichFlag
+ %C = load i32, i32* %whichFlag
store i32 %C, i32* %whichFlag
br label %bb15
}
diff --git a/llvm/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll b/llvm/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
index e28b24aa94e..ffbb2992557 100644
--- a/llvm/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
+++ b/llvm/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
@@ -15,7 +15,7 @@ entry:
%b_i8 = bitcast %b* %b_var to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_i8, i8* %a_i8, i32 4, i32 1, i1 false)
%tmp1 = getelementptr %b, %b* %b_var, i32 0, i32 0
- %tmp2 = load float* %tmp1
+ %tmp2 = load float, float* %tmp1
ret float %tmp2
}
diff --git a/llvm/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll b/llvm/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll
index bf04814b7e6..8e4a0230d7f 100644
--- a/llvm/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll
+++ b/llvm/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll
@@ -24,13 +24,13 @@ define i32 @foo() nounwind {
%bit1 = bitcast %struct1* %x to i64*
%bit2 = bitcast %struct2* %y to i64*
- %load = load i64* %bit1, align 8
+ %load = load i64, i64* %bit1, align 8
store i64 %load, i64* %bit2, align 8
-; CHECK: %load = load i64* %bit1, align 8
+; CHECK: %load = load i64, i64* %bit1, align 8
; CHECK: store i64 %load, i64* %bit2, align 8
%gep1 = getelementptr %struct2, %struct2* %y, i32 0, i32 0, i32 0
- %ret = load i32* %gep1
+ %ret = load i32, i32* %gep1
ret i32 %ret
}
diff --git a/llvm/test/Transforms/MemCpyOpt/atomic.ll b/llvm/test/Transforms/MemCpyOpt/atomic.ll
index ee7e5488ba3..5be6b1555b5 100644
--- a/llvm/test/Transforms/MemCpyOpt/atomic.ll
+++ b/llvm/test/Transforms/MemCpyOpt/atomic.ll
@@ -33,7 +33,7 @@ define void @test2() nounwind uwtable ssp {
%new = alloca i32
call void @otherf(i32* nocapture %old)
store atomic i32 0, i32* @x unordered, align 4
- %v = load i32* %old
+ %v = load i32, i32* %old
store i32 %v, i32* %new
call void @otherf(i32* nocapture %new)
ret void
diff --git a/llvm/test/Transforms/MemCpyOpt/loadstore-sret.ll b/llvm/test/Transforms/MemCpyOpt/loadstore-sret.ll
index 888701da5f4..55cbe59651f 100644
--- a/llvm/test/Transforms/MemCpyOpt/loadstore-sret.ll
+++ b/llvm/test/Transforms/MemCpyOpt/loadstore-sret.ll
@@ -14,7 +14,7 @@ _ZNSt8auto_ptrIiED1Ev.exit:
call void @_Z3barv(%"class.std::auto_ptr"* sret %temp.lvalue)
%tmp.i.i = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
; CHECK-NOT: load
- %tmp2.i.i = load i32** %tmp.i.i, align 8
+ %tmp2.i.i = load i32*, i32** %tmp.i.i, align 8
%tmp.i.i4 = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
; CHECK-NOT: store
store i32* %tmp2.i.i, i32** %tmp.i.i4, align 8
diff --git a/llvm/test/Transforms/MemCpyOpt/memcpy.ll b/llvm/test/Transforms/MemCpyOpt/memcpy.ll
index f8c33f04bb2..72445cf8a7f 100644
--- a/llvm/test/Transforms/MemCpyOpt/memcpy.ll
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy.ll
@@ -196,7 +196,7 @@ define void @test10(%opaque* noalias nocapture sret %x, i32 %y) {
%a = alloca i32, align 4
store i32 %y, i32* %a
call void @foo(i32* noalias nocapture %a)
- %c = load i32* %a
+ %c = load i32, i32* %a
%d = bitcast %opaque* %x to i32*
store i32 %c, i32* %d
ret void
diff --git a/llvm/test/Transforms/MemCpyOpt/sret.ll b/llvm/test/Transforms/MemCpyOpt/sret.ll
index 3f3c13dc15e..34ba4c4d8bb 100644
--- a/llvm/test/Transforms/MemCpyOpt/sret.ll
+++ b/llvm/test/Transforms/MemCpyOpt/sret.ll
@@ -10,12 +10,12 @@ entry:
%iz = alloca %0
%memtmp = alloca %0, align 16
%tmp1 = getelementptr %0, %0* %z, i32 0, i32 1
- %tmp2 = load x86_fp80* %tmp1, align 16
+ %tmp2 = load x86_fp80, x86_fp80* %tmp1, align 16
%tmp3 = fsub x86_fp80 0xK80000000000000000000, %tmp2
%tmp4 = getelementptr %0, %0* %iz, i32 0, i32 1
%real = getelementptr %0, %0* %iz, i32 0, i32 0
%tmp7 = getelementptr %0, %0* %z, i32 0, i32 0
- %tmp8 = load x86_fp80* %tmp7, align 16
+ %tmp8 = load x86_fp80, x86_fp80* %tmp7, align 16
store x86_fp80 %tmp3, x86_fp80* %real, align 16
store x86_fp80 %tmp8, x86_fp80* %tmp4, align 16
call void @ccoshl(%0* noalias sret %memtmp, %0* byval align 8 %iz) nounwind
diff --git a/llvm/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll b/llvm/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll
index 689dc22bf00..e684da8c89c 100644
--- a/llvm/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll
+++ b/llvm/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll
@@ -36,15 +36,15 @@ entry:
store %"struct.kc::impl_fileline_FileLine"* %this, %"struct.kc::impl_fileline_FileLine"** %this_addr
store %"struct.kc::impl_casestring__Str"* %_file, %"struct.kc::impl_casestring__Str"** %_file_addr
store i32 %_line, i32* %_line_addr
- %0 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %0 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
call void @_ZN2kc13impl_filelineC2Ev() nounwind
- %2 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %2 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
%4 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %3, i32 0, i32 0
%5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc22impl_fileline_FileLineE, i32 0, i32 2), i32 (...)*** %5, align 4
- %6 = load %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
+ %6 = load %"struct.kc::impl_casestring__Str"*, %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
%7 = icmp eq %"struct.kc::impl_casestring__Str"* %6, null
br i1 %7, label %bb, label %bb1
@@ -57,20 +57,20 @@ invcont: ; preds = %bb
br label %bb2
bb1: ; preds = %entry
- %9 = load %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
+ %9 = load %"struct.kc::impl_casestring__Str"*, %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
store %"struct.kc::impl_casestring__Str"* %9, %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
br label %bb2
bb2: ; preds = %bb1, %invcont
- %10 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %10 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
%12 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %11, i32 0, i32 1
- %13 = load %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
+ %13 = load %"struct.kc::impl_casestring__Str"*, %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
store %"struct.kc::impl_casestring__Str"* %13, %"struct.kc::impl_casestring__Str"** %12, align 4
- %14 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %14 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
%16 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %15, i32 0, i32 2
- %17 = load i32* %_line_addr, align 4
+ %17 = load i32, i32* %_line_addr, align 4
store i32 %17, i32* %16, align 4
ret void
@@ -79,21 +79,21 @@ lpad: ; preds = %bb
cleanup
%exn = extractvalue { i8*, i32 } %eh_ptr, 0
store i8* %exn, i8** %eh_exception
- %eh_ptr4 = load i8** %eh_exception
+ %eh_ptr4 = load i8*, i8** %eh_exception
%eh_select5 = extractvalue { i8*, i32 } %eh_ptr, 1
store i32 %eh_select5, i32* %eh_selector
- %eh_select = load i32* %eh_selector
+ %eh_select = load i32, i32* %eh_selector
store i32 %eh_select, i32* %save_filt.150, align 4
- %eh_value = load i8** %eh_exception
+ %eh_value = load i8*, i8** %eh_exception
store i8* %eh_value, i8** %save_eptr.149, align 4
- %18 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %18 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%19 = bitcast %"struct.kc::impl_fileline_FileLine"* %18 to %"struct.kc::impl_fileline"*
call void @_ZN2kc13impl_filelineD2Ev(%"struct.kc::impl_fileline"* %19) nounwind
- %20 = load i8** %save_eptr.149, align 4
+ %20 = load i8*, i8** %save_eptr.149, align 4
store i8* %20, i8** %eh_exception, align 4
- %21 = load i32* %save_filt.150, align 4
+ %21 = load i32, i32* %save_filt.150, align 4
store i32 %21, i32* %eh_selector, align 4
- %eh_ptr6 = load i8** %eh_exception
+ %eh_ptr6 = load i8*, i8** %eh_exception
call void @_Unwind_Resume_or_Rethrow()
unreachable
}
@@ -105,7 +105,7 @@ entry:
%this_addr = alloca %"struct.kc::impl_fileline"*, align 4
%"alloca point" = bitcast i32 0 to i32
store %"struct.kc::impl_fileline"* %this, %"struct.kc::impl_fileline"** %this_addr
- %0 = load %"struct.kc::impl_fileline"** %this_addr, align 4
+ %0 = load %"struct.kc::impl_fileline"*, %"struct.kc::impl_fileline"** %this_addr, align 4
%1 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %0, i32 0, i32 0
%2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc13impl_filelineE, i32 0, i32 2), i32 (...)*** %2, align 4
@@ -114,7 +114,7 @@ entry:
br i1 %toBool, label %bb1, label %return
bb1: ; preds = %entry
- %4 = load %"struct.kc::impl_fileline"** %this_addr, align 4
+ %4 = load %"struct.kc::impl_fileline"*, %"struct.kc::impl_fileline"** %this_addr, align 4
%5 = bitcast %"struct.kc::impl_fileline"* %4 to i8*
call void @_ZdlPv() nounwind
br label %return
@@ -130,7 +130,7 @@ entry:
%this_addr = alloca %"struct.kc::impl_fileline"*, align 4
%"alloca point" = bitcast i32 0 to i32
store %"struct.kc::impl_fileline"* %this, %"struct.kc::impl_fileline"** %this_addr
- %0 = load %"struct.kc::impl_fileline"** %this_addr, align 4
+ %0 = load %"struct.kc::impl_fileline"*, %"struct.kc::impl_fileline"** %this_addr, align 4
%1 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %0, i32 0, i32 0
%2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc13impl_filelineE, i32 0, i32 2), i32 (...)*** %2, align 4
@@ -139,7 +139,7 @@ entry:
br i1 %toBool, label %bb1, label %return
bb1: ; preds = %entry
- %4 = load %"struct.kc::impl_fileline"** %this_addr, align 4
+ %4 = load %"struct.kc::impl_fileline"*, %"struct.kc::impl_fileline"** %this_addr, align 4
%5 = bitcast %"struct.kc::impl_fileline"* %4 to i8*
call void @_ZdlPv() nounwind
br label %return
@@ -162,15 +162,15 @@ entry:
store %"struct.kc::impl_fileline_FileLine"* %this, %"struct.kc::impl_fileline_FileLine"** %this_addr
store %"struct.kc::impl_casestring__Str"* %_file, %"struct.kc::impl_casestring__Str"** %_file_addr
store i32 %_line, i32* %_line_addr
- %0 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %0 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
call void @_ZN2kc13impl_filelineC2Ev() nounwind
- %2 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %2 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
%4 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %3, i32 0, i32 0
%5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc22impl_fileline_FileLineE, i32 0, i32 2), i32 (...)*** %5, align 4
- %6 = load %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
+ %6 = load %"struct.kc::impl_casestring__Str"*, %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
%7 = icmp eq %"struct.kc::impl_casestring__Str"* %6, null
br i1 %7, label %bb, label %bb1
@@ -183,20 +183,20 @@ invcont: ; preds = %bb
br label %bb2
bb1: ; preds = %entry
- %9 = load %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
+ %9 = load %"struct.kc::impl_casestring__Str"*, %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
store %"struct.kc::impl_casestring__Str"* %9, %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
br label %bb2
bb2: ; preds = %bb1, %invcont
- %10 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %10 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
%12 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %11, i32 0, i32 1
- %13 = load %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
+ %13 = load %"struct.kc::impl_casestring__Str"*, %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
store %"struct.kc::impl_casestring__Str"* %13, %"struct.kc::impl_casestring__Str"** %12, align 4
- %14 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %14 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
%16 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %15, i32 0, i32 2
- %17 = load i32* %_line_addr, align 4
+ %17 = load i32, i32* %_line_addr, align 4
store i32 %17, i32* %16, align 4
ret void
@@ -205,21 +205,21 @@ lpad: ; preds = %bb
cleanup
%exn = extractvalue { i8*, i32 } %eh_ptr, 0
store i8* %exn, i8** %eh_exception
- %eh_ptr4 = load i8** %eh_exception
+ %eh_ptr4 = load i8*, i8** %eh_exception
%eh_select5 = extractvalue { i8*, i32 } %eh_ptr, 1
store i32 %eh_select5, i32* %eh_selector
- %eh_select = load i32* %eh_selector
+ %eh_select = load i32, i32* %eh_selector
store i32 %eh_select, i32* %save_filt.148, align 4
- %eh_value = load i8** %eh_exception
+ %eh_value = load i8*, i8** %eh_exception
store i8* %eh_value, i8** %save_eptr.147, align 4
- %18 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
+ %18 = load %"struct.kc::impl_fileline_FileLine"*, %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
%19 = bitcast %"struct.kc::impl_fileline_FileLine"* %18 to %"struct.kc::impl_fileline"*
call void @_ZN2kc13impl_filelineD2Ev(%"struct.kc::impl_fileline"* %19) nounwind
- %20 = load i8** %save_eptr.147, align 4
+ %20 = load i8*, i8** %save_eptr.147, align 4
store i8* %20, i8** %eh_exception, align 4
- %21 = load i32* %save_filt.148, align 4
+ %21 = load i32, i32* %save_filt.148, align 4
store i32 %21, i32* %eh_selector, align 4
- %eh_ptr6 = load i8** %eh_exception
+ %eh_ptr6 = load i8*, i8** %eh_exception
call void @_Unwind_Resume_or_Rethrow()
unreachable
}
diff --git a/llvm/test/Transforms/MergeFunc/address-spaces.ll b/llvm/test/Transforms/MergeFunc/address-spaces.ll
index c8b6f6eb4a1..1cfecae979d 100644
--- a/llvm/test/Transforms/MergeFunc/address-spaces.ll
+++ b/llvm/test/Transforms/MergeFunc/address-spaces.ll
@@ -10,7 +10,7 @@ define i32 @store_as0(i32* %x) {
; CHECK-LABEL: @store_as0(
; CHECK: call void @foo(
%gep = getelementptr i32, i32* %x, i32 4
- %y = load i32* %gep
+ %y = load i32, i32* %gep
call void @foo(i32 %y) nounwind
ret i32 %y
}
@@ -19,7 +19,7 @@ define i32 @store_as1(i32 addrspace(1)* %x) {
; CHECK-LABEL: @store_as1(
; CHECK: call void @foo(
%gep = getelementptr i32, i32 addrspace(1)* %x, i32 4
- %y = load i32 addrspace(1)* %gep
+ %y = load i32, i32 addrspace(1)* %gep
call void @foo(i32 %y) nounwind
ret i32 %y
}
@@ -28,7 +28,7 @@ define i32 @store_as2(i32 addrspace(2)* %x) {
; CHECK-LABEL: @store_as2(
; CHECK: call void @foo(
%gep = getelementptr i32, i32 addrspace(2)* %x, i32 4
- %y = load i32 addrspace(2)* %gep
+ %y = load i32, i32 addrspace(2)* %gep
call void @foo(i32 %y) nounwind
ret i32 %y
}
diff --git a/llvm/test/Transforms/MergeFunc/crash.ll b/llvm/test/Transforms/MergeFunc/crash.ll
index bc273803fd9..3319c8b0ba4 100644
--- a/llvm/test/Transforms/MergeFunc/crash.ll
+++ b/llvm/test/Transforms/MergeFunc/crash.ll
@@ -22,7 +22,7 @@ define internal i32 @func1(i32* %ptr, { i32, i32 }* nocapture %method) align 2 {
define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
%1 = getelementptr inbounds %.qux.2496, %.qux.2496* %this, i32 0, i32 1, i32 1
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
ret i32 %2
}
@@ -41,6 +41,6 @@ define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
define internal i8* @func35(%.qux.2585* nocapture %this) align 2 {
%1 = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0, i32 2
- %2 = load i8** %1, align 4
+ %2 = load i8*, i8** %1, align 4
ret i8* %2
}
diff --git a/llvm/test/Transforms/MergeFunc/inttoptr-address-space.ll b/llvm/test/Transforms/MergeFunc/inttoptr-address-space.ll
index f69dfeb667b..5f672debd91 100644
--- a/llvm/test/Transforms/MergeFunc/inttoptr-address-space.ll
+++ b/llvm/test/Transforms/MergeFunc/inttoptr-address-space.ll
@@ -12,7 +12,7 @@ target datalayout = "e-p:32:32:32-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
define internal i32 @func10(%.qux.2496 addrspace(1)* nocapture %this) align 2 {
bb:
%tmp = getelementptr inbounds %.qux.2496, %.qux.2496 addrspace(1)* %this, i32 0, i32 1, i32 1
- %tmp1 = load i32 addrspace(1)* %tmp, align 4
+ %tmp1 = load i32, i32 addrspace(1)* %tmp, align 4
ret i32 %tmp1
}
@@ -24,6 +24,6 @@ bb:
; CHECK: %[[V3:.+]] = tail call i32 @func10(%.qux.2496 addrspace(1)* %[[V2]])
; CHECK: %{{.*}} = inttoptr i32 %[[V3]] to i8*
%tmp = getelementptr inbounds %.qux.2585, %.qux.2585 addrspace(1)* %this, i32 0, i32 2
- %tmp1 = load i8* addrspace(1)* %tmp, align 4
+ %tmp1 = load i8*, i8* addrspace(1)* %tmp, align 4
ret i8* %tmp1
}
diff --git a/llvm/test/Transforms/MergeFunc/inttoptr.ll b/llvm/test/Transforms/MergeFunc/inttoptr.ll
index be8367f4f2a..5e952757396 100644
--- a/llvm/test/Transforms/MergeFunc/inttoptr.ll
+++ b/llvm/test/Transforms/MergeFunc/inttoptr.ll
@@ -24,7 +24,7 @@ bb2: ; preds = %bb1
define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
bb:
%tmp = getelementptr inbounds %.qux.2496, %.qux.2496* %this, i32 0, i32 1, i32 1
- %tmp1 = load i32* %tmp, align 4
+ %tmp1 = load i32, i32* %tmp, align 4
ret i32 %tmp1
}
@@ -51,6 +51,6 @@ bb:
; CHECK: %[[V3:.+]] = tail call i32 @func10(%.qux.2496* %[[V2]])
; CHECK: %{{.*}} = inttoptr i32 %[[V3]] to i8*
%tmp = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0, i32 2
- %tmp1 = load i8** %tmp, align 4
+ %tmp1 = load i8*, i8** %tmp, align 4
ret i8* %tmp1
}
diff --git a/llvm/test/Transforms/MergeFunc/mergefunc-struct-return.ll b/llvm/test/Transforms/MergeFunc/mergefunc-struct-return.ll
index 5af4421bba9..14db39995e7 100644
--- a/llvm/test/Transforms/MergeFunc/mergefunc-struct-return.ll
+++ b/llvm/test/Transforms/MergeFunc/mergefunc-struct-return.ll
@@ -18,7 +18,7 @@ define %kv1 @fn1() {
%v2 = getelementptr %kv1, %kv1* %tmp, i32 0, i32 0
store i32* null, i32** %v2
call void @noop()
- %v3 = load %kv1* %tmp
+ %v3 = load %kv1, %kv1* %tmp
ret %kv1 %v3
}
@@ -35,6 +35,6 @@ define %kv2 @fn2() {
store i8* null, i8** %v2
call void @noop()
- %v3 = load %kv2* %tmp
+ %v3 = load %kv2, %kv2* %tmp
ret %kv2 %v3
}
diff --git a/llvm/test/Transforms/MergeFunc/ranges.ll b/llvm/test/Transforms/MergeFunc/ranges.ll
index d3e4d942a9d..46a0c76cc7d 100644
--- a/llvm/test/Transforms/MergeFunc/ranges.ll
+++ b/llvm/test/Transforms/MergeFunc/ranges.ll
@@ -1,31 +1,31 @@
; RUN: opt -mergefunc -S < %s | FileCheck %s
define i1 @cmp_with_range(i8*, i8*) {
- %v1 = load i8* %0, !range !0
- %v2 = load i8* %1, !range !0
+ %v1 = load i8, i8* %0, !range !0
+ %v2 = load i8, i8* %1, !range !0
%out = icmp eq i8 %v1, %v2
ret i1 %out
}
define i1 @cmp_no_range(i8*, i8*) {
; CHECK-LABEL: @cmp_no_range
-; CHECK-NEXT %v1 = load i8* %0
-; CHECK-NEXT %v2 = load i8* %1
+; CHECK-NEXT %v1 = load i8, i8* %0
+; CHECK-NEXT %v2 = load i8, i8* %1
; CHECK-NEXT %out = icmp eq i8 %v1, %v2
; CHECK-NEXT ret i1 %out
- %v1 = load i8* %0
- %v2 = load i8* %1
+ %v1 = load i8, i8* %0
+ %v2 = load i8, i8* %1
%out = icmp eq i8 %v1, %v2
ret i1 %out
}
define i1 @cmp_different_range(i8*, i8*) {
; CHECK-LABEL: @cmp_different_range
-; CHECK-NEXT: %v1 = load i8* %0, !range !1
-; CHECK-NEXT: %v2 = load i8* %1, !range !1
+; CHECK-NEXT: %v1 = load i8, i8* %0, !range !1
+; CHECK-NEXT: %v2 = load i8, i8* %1, !range !1
; CHECK-NEXT: %out = icmp eq i8 %v1, %v2
; CHECK-NEXT: ret i1 %out
- %v1 = load i8* %0, !range !1
- %v2 = load i8* %1, !range !1
+ %v1 = load i8, i8* %0, !range !1
+ %v2 = load i8, i8* %1, !range !1
%out = icmp eq i8 %v1, %v2
ret i1 %out
}
@@ -33,8 +33,8 @@ define i1 @cmp_different_range(i8*, i8*) {
define i1 @cmp_with_same_range(i8*, i8*) {
; CHECK-LABEL: @cmp_with_same_range
; CHECK: tail call i1 @cmp_with_range
- %v1 = load i8* %0, !range !0
- %v2 = load i8* %1, !range !0
+ %v1 = load i8, i8* %0, !range !0
+ %v2 = load i8, i8* %1, !range !0
%out = icmp eq i8 %v1, %v2
ret i1 %out
}
diff --git a/llvm/test/Transforms/MergeFunc/vector.ll b/llvm/test/Transforms/MergeFunc/vector.ll
index 56f74e65c60..ef1375311e6 100644
--- a/llvm/test/Transforms/MergeFunc/vector.ll
+++ b/llvm/test/Transforms/MergeFunc/vector.ll
@@ -22,7 +22,7 @@ target triple = "x86_64-unknown-linux-gnu"
define linkonce_odr void @_ZNSt6vectorIlSaIlEED1Ev(%"class.std::vector"* nocapture %this) unnamed_addr align 2 {
entry:
%tmp2.i.i = bitcast %"class.std::vector"* %this to i64**
- %tmp3.i.i = load i64** %tmp2.i.i, align 8
+ %tmp3.i.i = load i64*, i64** %tmp2.i.i, align 8
%tobool.i.i.i = icmp eq i64* %tmp3.i.i, null
br i1 %tobool.i.i.i, label %_ZNSt6vectorIlSaIlEED2Ev.exit, label %if.then.i.i.i
@@ -40,7 +40,7 @@ declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*)
define linkonce_odr void @_ZNSt6vectorIPvSaIS0_EED1Ev(%"class.std::vector"* nocapture %this) unnamed_addr align 2 {
entry:
%tmp2.i.i = bitcast %"class.std::vector"* %this to i8***
- %tmp3.i.i = load i8*** %tmp2.i.i, align 8
+ %tmp3.i.i = load i8**, i8*** %tmp2.i.i, align 8
%tobool.i.i.i = icmp eq i8** %tmp3.i.i, null
br i1 %tobool.i.i.i, label %_ZNSt6vectorIPvSaIS0_EED2Ev.exit, label %if.then.i.i.i
diff --git a/llvm/test/Transforms/MetaRenamer/metarenamer.ll b/llvm/test/Transforms/MetaRenamer/metarenamer.ll
index d639ee5dcc2..e126bed9b23 100644
--- a/llvm/test/Transforms/MetaRenamer/metarenamer.ll
+++ b/llvm/test/Transforms/MetaRenamer/metarenamer.ll
@@ -59,7 +59,7 @@ define i32 @func_5_xxx(i32 %arg_1_xxx, i32 %arg_2_xxx, i32 %arg_3_xxx, i32 %arg_
br label %5
; <label>:5 ; preds = %9, %0
- %6 = load i32* %i, align 4
+ %6 = load i32, i32* %i, align 4
%7 = icmp slt i32 %6, 10
br i1 %7, label %8, label %12
@@ -67,24 +67,24 @@ define i32 @func_5_xxx(i32 %arg_1_xxx, i32 %arg_2_xxx, i32 %arg_3_xxx, i32 %arg_
br label %9
; <label>:9 ; preds = %8
- %10 = load i32* %i, align 4
+ %10 = load i32, i32* %i, align 4
%11 = add nsw i32 %10, 1
store i32 %11, i32* %i, align 4
br label %5
; <label>:12 ; preds = %5
- %13 = load i32* %local_1_xxx, align 4
- %14 = load i32* %1, align 4
+ %13 = load i32, i32* %local_1_xxx, align 4
+ %14 = load i32, i32* %1, align 4
%15 = add nsw i32 %13, %14
- %16 = load i32* %local_2_xxx, align 4
+ %16 = load i32, i32* %local_2_xxx, align 4
%17 = add nsw i32 %15, %16
- %18 = load i32* %2, align 4
+ %18 = load i32, i32* %2, align 4
%19 = add nsw i32 %17, %18
- %20 = load i32* @func_5_xxx.static_local_3_xxx, align 4
+ %20 = load i32, i32* @func_5_xxx.static_local_3_xxx, align 4
%21 = add nsw i32 %19, %20
- %22 = load i32* %3, align 4
+ %22 = load i32, i32* %3, align 4
%23 = add nsw i32 %21, %22
- %24 = load i32* %4, align 4
+ %24 = load i32, i32* %4, align 4
%25 = add nsw i32 %23, %24
ret i32 %25
}
diff --git a/llvm/test/Transforms/ObjCARC/allocas.ll b/llvm/test/Transforms/ObjCARC/allocas.ll
index 1fbb01c47d2..ac26a097d49 100644
--- a/llvm/test/Transforms/ObjCARC/allocas.ll
+++ b/llvm/test/Transforms/ObjCARC/allocas.ll
@@ -56,7 +56,7 @@ entry:
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %A, align 8
- %y = load i8** %A
+ %y = load i8*, i8** %A
call void @use_alloca(i8** %A)
call void @objc_release(i8* %y), !clang.imprecise_release !0
call void @use_pointer(i8* %x)
@@ -78,7 +78,7 @@ entry:
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %gep, align 8
- %y = load i8** %A
+ %y = load i8*, i8** %A
call void @use_alloca(i8** %A)
call void @objc_release(i8* %y), !clang.imprecise_release !0
call void @use_pointer(i8* %x)
@@ -101,7 +101,7 @@ entry:
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %gep, align 8
- %y = load i8** %gep
+ %y = load i8*, i8** %gep
call void @use_alloca(i8** %A)
call void @objc_release(i8* %y), !clang.imprecise_release !0
call void @use_pointer(i8* %x)
@@ -135,7 +135,7 @@ exit:
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %gep, align 8
- %y = load i8** %gep
+ %y = load i8*, i8** %gep
call void @use_alloca(i8** %A)
call void @objc_release(i8* %y), !clang.imprecise_release !0
call void @use_pointer(i8* %x)
@@ -168,7 +168,7 @@ exit:
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %gep, align 8
- %y = load i8** %gep
+ %y = load i8*, i8** %gep
call void @use_alloca(i8** %A)
call void @objc_release(i8* %y), !clang.imprecise_release !0
call void @use_pointer(i8* %x)
@@ -191,7 +191,7 @@ entry:
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %A, align 8
- %y = load i8** %A
+ %y = load i8*, i8** %A
call void @use_alloca(i8** %A)
call void @objc_release(i8* %y), !clang.imprecise_release !0
call void @use_pointer(i8* %x)
@@ -214,7 +214,7 @@ define void @test2a(i8* %x) {
entry:
%A = alloca i8*
store i8* %x, i8** %A, align 8
- %y = load i8** %A
+ %y = load i8*, i8** %A
br label %bb1
bb1:
@@ -246,7 +246,7 @@ entry:
%gep1 = getelementptr i8*, i8** %A, i32 0
store i8* %x, i8** %gep1, align 8
%gep2 = getelementptr i8*, i8** %A, i32 0
- %y = load i8** %gep2
+ %y = load i8*, i8** %gep2
br label %bb1
bb1:
@@ -278,7 +278,7 @@ entry:
%gep1 = getelementptr i8*, i8** %A, i32 2
store i8* %x, i8** %gep1, align 8
%gep2 = getelementptr i8*, i8** %A, i32 2
- %y = load i8** %gep2
+ %y = load i8*, i8** %gep2
tail call i8* @objc_retain(i8* %x)
br label %bb1
@@ -314,7 +314,7 @@ bb1:
%gepbb11 = getelementptr i8*, i8** %Abb1, i32 2
store i8* %x, i8** %gepbb11, align 8
%gepbb12 = getelementptr i8*, i8** %Abb1, i32 2
- %ybb1 = load i8** %gepbb12
+ %ybb1 = load i8*, i8** %gepbb12
br label %bb3
bb2:
@@ -322,7 +322,7 @@ bb2:
%gepbb21 = getelementptr i8*, i8** %Abb2, i32 2
store i8* %x, i8** %gepbb21, align 8
%gepbb22 = getelementptr i8*, i8** %Abb2, i32 2
- %ybb2 = load i8** %gepbb22
+ %ybb2 = load i8*, i8** %gepbb22
br label %bb3
bb3:
@@ -391,7 +391,7 @@ entry:
arraydestroy.body:
%arraydestroy.elementPast = phi i8** [ %gep, %entry ], [ %arraydestroy.element, %arraydestroy.body ]
%arraydestroy.element = getelementptr inbounds i8*, i8** %arraydestroy.elementPast, i64 -1
- %destroy_tmp = load i8** %arraydestroy.element, align 8
+ %destroy_tmp = load i8*, i8** %arraydestroy.element, align 8
call void @objc_release(i8* %destroy_tmp), !clang.imprecise_release !0
%objs_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
%arraydestroy.cmp = icmp eq i8** %arraydestroy.element, %objs_ptr
@@ -404,7 +404,7 @@ arraydestroy.done:
arraydestroy.body1:
%arraydestroy.elementPast1 = phi i8** [ %gep1, %arraydestroy.done ], [ %arraydestroy.element1, %arraydestroy.body1 ]
%arraydestroy.element1 = getelementptr inbounds i8*, i8** %arraydestroy.elementPast1, i64 -1
- %destroy_tmp1 = load i8** %arraydestroy.element1, align 8
+ %destroy_tmp1 = load i8*, i8** %arraydestroy.element1, align 8
call void @objc_release(i8* %destroy_tmp1), !clang.imprecise_release !0
%keys_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
%arraydestroy.cmp1 = icmp eq i8** %arraydestroy.element1, %keys_ptr
@@ -470,7 +470,7 @@ entry:
arraydestroy.body:
%arraydestroy.elementPast = phi i8** [ %gep, %entry ], [ %arraydestroy.element, %arraydestroy.body ]
%arraydestroy.element = getelementptr inbounds i8*, i8** %arraydestroy.elementPast, i64 -1
- %destroy_tmp = load i8** %arraydestroy.element, align 8
+ %destroy_tmp = load i8*, i8** %arraydestroy.element, align 8
call void @objc_release(i8* %destroy_tmp), !clang.imprecise_release !0
%objs_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
%arraydestroy.cmp = icmp eq i8** %arraydestroy.element, %objs_ptr
@@ -483,7 +483,7 @@ arraydestroy.done:
arraydestroy.body1:
%arraydestroy.elementPast1 = phi i8** [ %gep1, %arraydestroy.done ], [ %arraydestroy.element1, %arraydestroy.body1 ]
%arraydestroy.element1 = getelementptr inbounds i8*, i8** %arraydestroy.elementPast1, i64 -1
- %destroy_tmp1 = load i8** %arraydestroy.element1, align 8
+ %destroy_tmp1 = load i8*, i8** %arraydestroy.element1, align 8
call void @objc_release(i8* %destroy_tmp1), !clang.imprecise_release !0
%keys_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
%arraydestroy.cmp1 = icmp eq i8** %arraydestroy.element1, %keys_ptr
diff --git a/llvm/test/Transforms/ObjCARC/basic.ll b/llvm/test/Transforms/ObjCARC/basic.ll
index 9c91a05a6e7..be754366576 100644
--- a/llvm/test/Transforms/ObjCARC/basic.ll
+++ b/llvm/test/Transforms/ObjCARC/basic.ll
@@ -286,7 +286,7 @@ entry:
loop:
%c = bitcast i32* %x to i8*
call void @objc_release(i8* %c) nounwind
- %j = load volatile i1* %q
+ %j = load volatile i1, i1* %q
br i1 %j, label %loop, label %return
return:
@@ -306,7 +306,7 @@ entry:
loop:
%c = bitcast i32* %x to i8*
call void @objc_release(i8* %c) nounwind, !clang.imprecise_release !0
- %j = load volatile i1* %q
+ %j = load volatile i1, i1* %q
br i1 %j, label %loop, label %return
return:
@@ -330,7 +330,7 @@ entry:
loop:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
- %j = load volatile i1* %q
+ %j = load volatile i1, i1* %q
br i1 %j, label %loop, label %return
return:
@@ -350,7 +350,7 @@ entry:
loop:
%a = bitcast i32* %x to i8*
%0 = call i8* @objc_retain(i8* %a) nounwind
- %j = load volatile i1* %q
+ %j = load volatile i1, i1* %q
br i1 %j, label %loop, label %return
return:
@@ -1366,7 +1366,7 @@ C:
define void @test24(i8* %r, i8* %a) {
call i8* @objc_retain(i8* %a)
call void @use_pointer(i8* %r)
- %q = load i8* %a
+ %q = load i8, i8* %a
call void @objc_release(i8* %a)
ret void
}
@@ -2005,7 +2005,7 @@ entry:
; CHECK-NOT: objc_
; CHECK: }
define void @test44(i8** %pp) {
- %p = load i8** %pp
+ %p = load i8*, i8** %pp
%q = call i8* @objc_retain(i8* %p)
call void @objc_release(i8* %q)
ret void
@@ -2021,8 +2021,8 @@ define void @test44(i8** %pp) {
; CHECK: call void @objc_release(i8* %p)
; CHECK: }
define void @test45(i8** %pp, i8** %qq) {
- %p = load i8** %pp
- %q = load i8** %qq
+ %p = load i8*, i8** %pp
+ %q = load i8*, i8** %qq
call i8* @objc_retain(i8* %p)
call void @objc_release(i8* %q)
call void @use_pointer(i8* %p)
@@ -2154,10 +2154,10 @@ define void @test51b(i8* %p) {
; CHECK: ret void
; CHECK: }
define void @test52a(i8** %zz, i8** %pp) {
- %p = load i8** %pp
+ %p = load i8*, i8** %pp
%1 = call i8* @objc_retain(i8* %p)
call void @callee()
- %z = load i8** %zz
+ %z = load i8*, i8** %zz
call void @use_pointer(i8* %z)
call void @objc_release(i8* %p)
ret void
@@ -2171,10 +2171,10 @@ define void @test52a(i8** %zz, i8** %pp) {
; CHECK: ret void
; CHECK: }
define void @test52b(i8** %zz, i8** %pp) {
- %p = load i8** %pp
+ %p = load i8*, i8** %pp
%1 = call i8* @objc_retain(i8* %p)
call void @callee()
- %z = load i8** %zz
+ %z = load i8*, i8** %zz
call void @use_pointer(i8* %z)
call void @objc_release(i8* %p), !clang.imprecise_release !0
ret void
@@ -2189,10 +2189,10 @@ define void @test52b(i8** %zz, i8** %pp) {
; CHECK: @objc_
; CHECK: }
define void @test53(void ()** %zz, i8** %pp) {
- %p = load i8** %pp
+ %p = load i8*, i8** %pp
%1 = call i8* @objc_retain(i8* %p)
call void @callee()
- %z = load void ()** %zz
+ %z = load void ()*, void ()** %zz
call void @callee_fnptr(void ()* %z)
call void @objc_release(i8* %p)
ret void
@@ -2341,8 +2341,8 @@ entry:
; CHECK: call void @objc_release
; CHECK: }
define void @test60a() {
- %t = load i8** @constptr
- %s = load i8** @something
+ %t = load i8*, i8** @constptr
+ %s = load i8*, i8** @something
call i8* @objc_retain(i8* %s)
call void @callee()
call void @use_pointer(i8* %t)
@@ -2356,8 +2356,8 @@ define void @test60a() {
; CHECK-NOT: call i8* @objc_rrelease
; CHECK: }
define void @test60b() {
- %t = load i8** @constptr
- %s = load i8** @something
+ %t = load i8*, i8** @constptr
+ %s = load i8*, i8** @something
call i8* @objc_retain(i8* %s)
call i8* @objc_retain(i8* %s)
call void @callee()
@@ -2370,8 +2370,8 @@ define void @test60b() {
; CHECK-NOT: @objc_
; CHECK: }
define void @test60c() {
- %t = load i8** @constptr
- %s = load i8** @something
+ %t = load i8*, i8** @constptr
+ %s = load i8*, i8** @something
call i8* @objc_retain(i8* %s)
call void @callee()
call void @use_pointer(i8* %t)
@@ -2383,8 +2383,8 @@ define void @test60c() {
; CHECK-NOT: @objc_
; CHECK: }
define void @test60d() {
- %t = load i8** @constptr
- %s = load i8** @something
+ %t = load i8*, i8** @constptr
+ %s = load i8*, i8** @something
call i8* @objc_retain(i8* %t)
call void @callee()
call void @use_pointer(i8* %s)
@@ -2396,8 +2396,8 @@ define void @test60d() {
; CHECK-NOT: @objc_
; CHECK: }
define void @test60e() {
- %t = load i8** @constptr
- %s = load i8** @something
+ %t = load i8*, i8** @constptr
+ %s = load i8*, i8** @something
call i8* @objc_retain(i8* %t)
call void @callee()
call void @use_pointer(i8* %s)
@@ -2412,7 +2412,7 @@ define void @test60e() {
; CHECK-NOT: @objc_
; CHECK: }
define void @test61() {
- %t = load i8** @constptr
+ %t = load i8*, i8** @constptr
call i8* @objc_retain(i8* %t)
call void @callee()
call void @use_pointer(i8* %t)
@@ -2432,7 +2432,7 @@ entry:
loop:
call i8* @objc_retain(i8* %x)
- %q = load i1* %p
+ %q = load i1, i1* %p
br i1 %q, label %loop.more, label %exit
loop.more:
@@ -2459,7 +2459,7 @@ entry:
loop:
call i8* @objc_retain(i8* %x)
- %q = load i1* %p
+ %q = load i1, i1* %p
br i1 %q, label %loop.more, label %exit
loop.more:
@@ -2485,7 +2485,7 @@ entry:
loop:
call i8* @objc_retain(i8* %x)
- %q = load i1* %p
+ %q = load i1, i1* %p
br i1 %q, label %loop.more, label %exit
loop.more:
@@ -2681,31 +2681,31 @@ invoke.cont:
%1 = tail call i8* @objc_retain(i8* %0) nounwind
tail call void @llvm.dbg.value(metadata {}* %self, i64 0, metadata !0, metadata !{})
tail call void @llvm.dbg.value(metadata {}* %self, i64 0, metadata !0, metadata !{})
- %ivar = load i64* @"OBJC_IVAR_$_A.myZ", align 8
+ %ivar = load i64, i64* @"OBJC_IVAR_$_A.myZ", align 8
%add.ptr = getelementptr i8, i8* %0, i64 %ivar
%tmp1 = bitcast i8* %add.ptr to float*
- %tmp2 = load float* %tmp1, align 4
+ %tmp2 = load float, float* %tmp1, align 4
%conv = fpext float %tmp2 to double
%add.ptr.sum = add i64 %ivar, 4
%tmp6 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
%2 = bitcast i8* %tmp6 to float*
- %tmp7 = load float* %2, align 4
+ %tmp7 = load float, float* %2, align 4
%conv8 = fpext float %tmp7 to double
%add.ptr.sum36 = add i64 %ivar, 8
%tmp12 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum36
%arrayidx = bitcast i8* %tmp12 to float*
- %tmp13 = load float* %arrayidx, align 4
+ %tmp13 = load float, float* %arrayidx, align 4
%conv14 = fpext float %tmp13 to double
%tmp12.sum = add i64 %ivar, 12
%arrayidx19 = getelementptr inbounds i8, i8* %0, i64 %tmp12.sum
%3 = bitcast i8* %arrayidx19 to float*
- %tmp20 = load float* %3, align 4
+ %tmp20 = load float, float* %3, align 4
%conv21 = fpext float %tmp20 to double
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([33 x i8]* @.str4, i64 0, i64 0), double %conv, double %conv8, double %conv14, double %conv21)
- %ivar23 = load i64* @"OBJC_IVAR_$_A.myZ", align 8
+ %ivar23 = load i64, i64* @"OBJC_IVAR_$_A.myZ", align 8
%add.ptr24 = getelementptr i8, i8* %0, i64 %ivar23
%4 = bitcast i8* %add.ptr24 to i128*
- %srcval = load i128* %4, align 4
+ %srcval = load i128, i128* %4, align 4
tail call void @objc_release(i8* %0) nounwind
%tmp29 = trunc i128 %srcval to i64
%tmp30 = bitcast i64 %tmp29 to <2 x float>
@@ -2752,7 +2752,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i.010 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%1 = tail call i8* @objc_retain(i8* %x) nounwind
- %tmp5 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp5 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call = tail call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %1, i8* %tmp5)
tail call void @objc_release(i8* %1) nounwind, !clang.imprecise_release !0
%inc = add nsw i64 %i.010, 1
@@ -2828,12 +2828,12 @@ entry:
%tmp7 = bitcast %2* %self to i8*
%tmp8 = call i8* @objc_retain(i8* %tmp7) nounwind
store %4* null, %4** %err, align 8
- %tmp1 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_17", align 8
- %tmp2 = load %struct.__CFString** @kUTTypePlainText, align 8
- %tmp3 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_19", align 8
+ %tmp1 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_17", align 8
+ %tmp2 = load %struct.__CFString*, %struct.__CFString** @kUTTypePlainText, align 8
+ %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_19", align 8
%tmp4 = bitcast %struct._class_t* %tmp1 to i8*
%call5 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp4, i8* %tmp3, %struct.__CFString* %tmp2)
- %tmp5 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_21", align 8
+ %tmp5 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_21", align 8
%tmp6 = bitcast %3* %pboard to i8*
%call76 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp6, i8* %tmp5, i8* %call5)
%tmp9 = call i8* @objc_retain(i8* %call76) nounwind
@@ -2841,7 +2841,7 @@ entry:
br i1 %tobool, label %end, label %land.lhs.true
land.lhs.true: ; preds = %entry
- %tmp11 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_23", align 8
+ %tmp11 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_23", align 8
%call137 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp6, i8* %tmp11, i8* %tmp9)
%tmp = bitcast i8* %call137 to %1*
%tmp10 = call i8* @objc_retain(i8* %call137) nounwind
@@ -2852,14 +2852,14 @@ land.lhs.true: ; preds = %entry
br i1 %tobool16, label %end, label %if.then
if.then: ; preds = %land.lhs.true
- %tmp19 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_25", align 8
+ %tmp19 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_25", align 8
%call21 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %call137, i8* %tmp19)
%tobool22 = icmp eq i8 %call21, 0
br i1 %tobool22, label %if.then44, label %land.lhs.true23
land.lhs.true23: ; preds = %if.then
- %tmp24 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_26", align 8
- %tmp26 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_28", align 8
+ %tmp24 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_26", align 8
+ %tmp26 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_28", align 8
%tmp27 = bitcast %struct._class_t* %tmp24 to i8*
%call2822 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp27, i8* %tmp26, i8* %call137)
%tmp13 = bitcast i8* %call2822 to %5*
@@ -2869,38 +2869,38 @@ land.lhs.true23: ; preds = %if.then
br i1 %tobool30, label %if.then44, label %if.end
if.end: ; preds = %land.lhs.true23
- %tmp32 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_29", align 8
- %tmp33 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_31", align 8
+ %tmp32 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_29", align 8
+ %tmp33 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_31", align 8
%tmp34 = bitcast %struct._class_t* %tmp32 to i8*
%call35 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp34, i8* %tmp33)
- %tmp37 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_33", align 8
+ %tmp37 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_33", align 8
%call3923 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %call35, i8* %tmp37, i8* %call2822, i32 signext 1, %4** %err)
%cmp = icmp eq i8* %call3923, null
br i1 %cmp, label %if.then44, label %end
if.then44: ; preds = %if.end, %land.lhs.true23, %if.then
%url.025 = phi %5* [ %tmp13, %if.end ], [ %tmp13, %land.lhs.true23 ], [ null, %if.then ]
- %tmp49 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_35", align 8
+ %tmp49 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_35", align 8
%call51 = call %struct._NSRange bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %struct._NSRange (i8*, i8*, i64, i64)*)(i8* %call137, i8* %tmp49, i64 0, i64 0)
%call513 = extractvalue %struct._NSRange %call51, 0
%call514 = extractvalue %struct._NSRange %call51, 1
- %tmp52 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_37", align 8
+ %tmp52 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_37", align 8
%call548 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %call137, i8* %tmp52, i64 %call513, i64 %call514)
- %tmp55 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_38", align 8
- %tmp56 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_40", align 8
+ %tmp55 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_38", align 8
+ %tmp56 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_40", align 8
%tmp57 = bitcast %struct._class_t* %tmp55 to i8*
%call58 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp57, i8* %tmp56)
- %tmp59 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_42", align 8
+ %tmp59 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_42", align 8
%call6110 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %call548, i8* %tmp59, i8* %call58)
%tmp15 = call i8* @objc_retain(i8* %call6110) nounwind
call void @objc_release(i8* %call137) nounwind
- %tmp64 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_46", align 8
+ %tmp64 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_46", align 8
%call66 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, %1*)*)(i8* %call6110, i8* %tmp64, %1* bitcast (%struct.NSConstantString* @_unnamed_cfstring_44 to %1*))
%tobool67 = icmp eq i8 %call66, 0
br i1 %tobool67, label %if.end74, label %if.then68
if.then68: ; preds = %if.then44
- %tmp70 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_48", align 8
+ %tmp70 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_48", align 8
%call7220 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %call6110, i8* %tmp70)
%tmp16 = call i8* @objc_retain(i8* %call7220) nounwind
call void @objc_release(i8* %call6110) nounwind
@@ -2909,52 +2909,52 @@ if.then68: ; preds = %if.then44
if.end74: ; preds = %if.then68, %if.then44
%filename.0.in = phi i8* [ %call7220, %if.then68 ], [ %call6110, %if.then44 ]
%filename.0 = bitcast i8* %filename.0.in to %1*
- %tmp17 = load i8** bitcast (%0* @"\01l_objc_msgSend_fixup_isEqual_" to i8**), align 16
+ %tmp17 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_isEqual_" to i8**), align 16
%tmp18 = bitcast i8* %tmp17 to i8 (i8*, %struct._message_ref_t*, i8*, ...)*
%call78 = call signext i8 (i8*, %struct._message_ref_t*, i8*, ...)* %tmp18(i8* %call137, %struct._message_ref_t* bitcast (%0* @"\01l_objc_msgSend_fixup_isEqual_" to %struct._message_ref_t*), i8* %filename.0.in)
%tobool79 = icmp eq i8 %call78, 0
br i1 %tobool79, label %land.lhs.true80, label %if.then109
land.lhs.true80: ; preds = %if.end74
- %tmp82 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_25", align 8
+ %tmp82 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_25", align 8
%call84 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %filename.0.in, i8* %tmp82)
%tobool86 = icmp eq i8 %call84, 0
br i1 %tobool86, label %if.then109, label %if.end106
if.end106: ; preds = %land.lhs.true80
- %tmp88 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_26", align 8
- %tmp90 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_28", align 8
+ %tmp88 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_26", align 8
+ %tmp90 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_28", align 8
%tmp91 = bitcast %struct._class_t* %tmp88 to i8*
%call9218 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp91, i8* %tmp90, i8* %filename.0.in)
%tmp20 = bitcast i8* %call9218 to %5*
%tmp21 = call i8* @objc_retain(i8* %call9218) nounwind
%tmp22 = bitcast %5* %url.025 to i8*
call void @objc_release(i8* %tmp22) nounwind
- %tmp94 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_29", align 8
- %tmp95 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_31", align 8
+ %tmp94 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_29", align 8
+ %tmp95 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_31", align 8
%tmp96 = bitcast %struct._class_t* %tmp94 to i8*
%call97 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp96, i8* %tmp95)
- %tmp99 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_33", align 8
+ %tmp99 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_33", align 8
%call10119 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %call97, i8* %tmp99, i8* %call9218, i32 signext 1, %4** %err)
%phitmp = icmp eq i8* %call10119, null
br i1 %phitmp, label %if.then109, label %end
if.then109: ; preds = %if.end106, %land.lhs.true80, %if.end74
%url.129 = phi %5* [ %tmp20, %if.end106 ], [ %url.025, %if.end74 ], [ %url.025, %land.lhs.true80 ]
- %tmp110 = load %4** %err, align 8
+ %tmp110 = load %4*, %4** %err, align 8
%tobool111 = icmp eq %4* %tmp110, null
br i1 %tobool111, label %if.then112, label %if.end125
if.then112: ; preds = %if.then109
- %tmp113 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_50", align 8
- %tmp114 = load %1** @NSCocoaErrorDomain, align 8
- %tmp115 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_51", align 8
+ %tmp113 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_50", align 8
+ %tmp114 = load %1*, %1** @NSCocoaErrorDomain, align 8
+ %tmp115 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_51", align 8
%call117 = call %1* @truncatedString(%1* %filename.0, i64 1034)
- %tmp118 = load %1** @NSFilePathErrorKey, align 8
- %tmp119 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_53", align 8
+ %tmp118 = load %1*, %1** @NSFilePathErrorKey, align 8
+ %tmp119 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_53", align 8
%tmp120 = bitcast %struct._class_t* %tmp115 to i8*
%call12113 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp120, i8* %tmp119, %1* %call117, %1* %tmp118, i8* null)
- %tmp122 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_55", align 8
+ %tmp122 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_55", align 8
%tmp123 = bitcast %struct._class_t* %tmp113 to i8*
%call12414 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp123, i8* %tmp122, %1* %tmp114, i64 258, i8* %call12113)
%tmp23 = call i8* @objc_retain(i8* %call12414) nounwind
@@ -2965,11 +2965,11 @@ if.then112: ; preds = %if.then109
if.end125: ; preds = %if.then112, %if.then109
%tmp127 = phi %4* [ %tmp110, %if.then109 ], [ %tmp28, %if.then112 ]
- %tmp126 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_56", align 8
- %tmp128 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_58", align 8
+ %tmp126 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_56", align 8
+ %tmp128 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_58", align 8
%tmp129 = bitcast %struct._class_t* %tmp126 to i8*
%call13015 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %tmp129, i8* %tmp128, %4* %tmp127)
- %tmp131 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_60", align 8
+ %tmp131 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_60", align 8
%call13317 = call i8* (i8*, i8*, ...)* @objc_msgSend(i8* %call13015, i8* %tmp131)
br label %end
diff --git a/llvm/test/Transforms/ObjCARC/cfg-hazards.ll b/llvm/test/Transforms/ObjCARC/cfg-hazards.ll
index 746d56df8fa..8407e446b4f 100644
--- a/llvm/test/Transforms/ObjCARC/cfg-hazards.ll
+++ b/llvm/test/Transforms/ObjCARC/cfg-hazards.ll
@@ -421,7 +421,7 @@ loop:
store i8* %a, i8** %block, align 8
%casted_block = bitcast i8** %block to void ()*
call void @block_callee(void ()* %casted_block)
- %reloaded_a = load i8** %block, align 8
+ %reloaded_a = load i8*, i8** %block, align 8
call void @objc_release(i8* %reloaded_a) nounwind, !clang.imprecise_release !0
br i1 undef, label %loop, label %exit
diff --git a/llvm/test/Transforms/ObjCARC/contract-storestrong-ivar.ll b/llvm/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
index 078e45f5c6c..8b1a02f3feb 100644
--- a/llvm/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
+++ b/llvm/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
@@ -16,11 +16,11 @@ declare void @objc_release(i8*)
define hidden void @y(%0* nocapture %self, %1* %preferencesController) nounwind {
entry:
- %ivar = load i64* @"OBJC_IVAR_$_Controller.preferencesController", align 8
+ %ivar = load i64, i64* @"OBJC_IVAR_$_Controller.preferencesController", align 8
%tmp = bitcast %0* %self to i8*
%add.ptr = getelementptr inbounds i8, i8* %tmp, i64 %ivar
%tmp1 = bitcast i8* %add.ptr to %1**
- %tmp2 = load %1** %tmp1, align 8
+ %tmp2 = load %1*, %1** %tmp1, align 8
%tmp3 = bitcast %1* %preferencesController to i8*
%tmp4 = tail call i8* @objc_retain(i8* %tmp3) nounwind
%tmp5 = bitcast %1* %tmp2 to i8*
diff --git a/llvm/test/Transforms/ObjCARC/contract-storestrong.ll b/llvm/test/Transforms/ObjCARC/contract-storestrong.ll
index c218e330a68..aadc3a26539 100644
--- a/llvm/test/Transforms/ObjCARC/contract-storestrong.ll
+++ b/llvm/test/Transforms/ObjCARC/contract-storestrong.ll
@@ -16,7 +16,7 @@ declare void @use_pointer(i8*)
define void @test0(i8* %p) {
entry:
%0 = tail call i8* @objc_retain(i8* %p) nounwind
- %tmp = load i8** @x, align 8
+ %tmp = load i8*, i8** @x, align 8
store i8* %0, i8** @x, align 8
tail call void @objc_release(i8* %tmp) nounwind
ret void
@@ -27,7 +27,7 @@ entry:
; CHECK-LABEL: define void @test1(i8* %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) [[NUW]]
-; CHECK-NEXT: %tmp = load volatile i8** @x, align 8
+; CHECK-NEXT: %tmp = load volatile i8*, i8** @x, align 8
; CHECK-NEXT: store i8* %0, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) [[NUW]]
; CHECK-NEXT: ret void
@@ -35,7 +35,7 @@ entry:
define void @test1(i8* %p) {
entry:
%0 = tail call i8* @objc_retain(i8* %p) nounwind
- %tmp = load volatile i8** @x, align 8
+ %tmp = load volatile i8*, i8** @x, align 8
store i8* %0, i8** @x, align 8
tail call void @objc_release(i8* %tmp) nounwind
ret void
@@ -46,7 +46,7 @@ entry:
; CHECK-LABEL: define void @test2(i8* %p) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) [[NUW]]
-; CHECK-NEXT: %tmp = load i8** @x, align 8
+; CHECK-NEXT: %tmp = load i8*, i8** @x, align 8
; CHECK-NEXT: store volatile i8* %0, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) [[NUW]]
; CHECK-NEXT: ret void
@@ -54,7 +54,7 @@ entry:
define void @test2(i8* %p) {
entry:
%0 = tail call i8* @objc_retain(i8* %p) nounwind
- %tmp = load i8** @x, align 8
+ %tmp = load i8*, i8** @x, align 8
store volatile i8* %0, i8** @x, align 8
tail call void @objc_release(i8* %tmp) nounwind
ret void
@@ -66,7 +66,7 @@ entry:
; CHECK-LABEL: define void @test3(i8* %newValue) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %x0 = tail call i8* @objc_retain(i8* %newValue) [[NUW]]
-; CHECK-NEXT: %x1 = load i8** @x, align 8
+; CHECK-NEXT: %x1 = load i8*, i8** @x, align 8
; CHECK-NEXT: store i8* %x0, i8** @x, align 8
; CHECK-NEXT: tail call void @use_pointer(i8* %x1), !clang.arc.no_objc_arc_exceptions !0
; CHECK-NEXT: tail call void @objc_release(i8* %x1) [[NUW]], !clang.imprecise_release !0
@@ -75,7 +75,7 @@ entry:
define void @test3(i8* %newValue) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
- %x1 = load i8** @x, align 8
+ %x1 = load i8*, i8** @x, align 8
store i8* %newValue, i8** @x, align 8
tail call void @use_pointer(i8* %x1), !clang.arc.no_objc_arc_exceptions !0
tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
@@ -87,7 +87,7 @@ entry:
; CHECK-LABEL: define i1 @test4(i8* %newValue, i8* %foo) {
; CHECK-NEXT: entry:
; CHECK-NEXT: %x0 = tail call i8* @objc_retain(i8* %newValue) [[NUW]]
-; CHECK-NEXT: %x1 = load i8** @x, align 8
+; CHECK-NEXT: %x1 = load i8*, i8** @x, align 8
; CHECK-NEXT: store i8* %x0, i8** @x, align 8
; CHECK-NEXT: %t = icmp eq i8* %x1, %foo
; CHECK-NEXT: tail call void @objc_release(i8* %x1) [[NUW]], !clang.imprecise_release !0
@@ -96,7 +96,7 @@ entry:
define i1 @test4(i8* %newValue, i8* %foo) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
- %x1 = load i8** @x, align 8
+ %x1 = load i8*, i8** @x, align 8
store i8* %newValue, i8** @x, align 8
%t = icmp eq i8* %x1, %foo
tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
@@ -112,7 +112,7 @@ entry:
define i1 @test5(i8* %newValue, i8* %foo) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
- %x1 = load i8** @x, align 8
+ %x1 = load i8*, i8** @x, align 8
%t = icmp eq i8* %x1, %foo
store i8* %newValue, i8** @x, align 8
tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
@@ -128,7 +128,7 @@ entry:
define i1 @test6(i8* %newValue, i8* %foo) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
- %x1 = load i8** @x, align 8
+ %x1 = load i8*, i8** @x, align 8
tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
%t = icmp eq i8* %x1, %foo
store i8* %newValue, i8** @x, align 8
@@ -140,14 +140,14 @@ entry:
; CHECK-LABEL: define void @test7(
; CHECK-NEXT: entry:
; CHECK-NEXT: %0 = tail call i8* @objc_retain(i8* %p) [[NUW]]
-; CHECK-NEXT: %tmp = load i8** @x, align 8
+; CHECK-NEXT: %tmp = load i8*, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) [[NUW]]
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @test7(i8* %p) {
entry:
%0 = tail call i8* @objc_retain(i8* %p) nounwind
- %tmp = load i8** @x, align 8
+ %tmp = load i8*, i8** @x, align 8
tail call void @objc_release(i8* %tmp) nounwind
ret void
}
@@ -156,14 +156,14 @@ entry:
; CHECK-LABEL: define void @test8(
; CHECK-NEXT: entry:
-; CHECK-NEXT: %tmp = load i8** @x, align 8
+; CHECK-NEXT: %tmp = load i8*, i8** @x, align 8
; CHECK-NEXT: store i8* %p, i8** @x, align 8
; CHECK-NEXT: tail call void @objc_release(i8* %tmp) [[NUW]]
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @test8(i8* %p) {
entry:
- %tmp = load i8** @x, align 8
+ %tmp = load i8*, i8** @x, align 8
store i8* %p, i8** @x, align 8
tail call void @objc_release(i8* %tmp) nounwind
ret void
@@ -181,7 +181,7 @@ define i1 @test9(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
tail call void @objc_release(i8* %unrelated_ptr) nounwind, !clang.imprecise_release !0
- %x1 = load i8** @x, align 8
+ %x1 = load i8*, i8** @x, align 8
tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
%t = icmp eq i8* %x1, %foo
store i8* %newValue, i8** @x, align 8
@@ -196,7 +196,7 @@ define i1 @test10(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
call void @use_pointer(i8* %unrelated_ptr)
- %x1 = load i8** @x, align 8
+ %x1 = load i8*, i8** @x, align 8
tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
%t = icmp eq i8* %x1, %foo
store i8* %newValue, i8** @x, align 8
@@ -211,7 +211,7 @@ define i1 @test11(i8* %newValue, i8* %foo, i8* %unrelated_ptr) {
entry:
%x0 = tail call i8* @objc_retain(i8* %newValue) nounwind
%t = icmp eq i8* %newValue, %foo
- %x1 = load i8** @x, align 8
+ %x1 = load i8*, i8** @x, align 8
tail call void @objc_release(i8* %x1) nounwind, !clang.imprecise_release !0
store i8* %newValue, i8** @x, align 8
ret i1 %t
diff --git a/llvm/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll b/llvm/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
index c72566cd206..ff9a5ef6fb1 100644
--- a/llvm/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
+++ b/llvm/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
@@ -36,8 +36,8 @@ target triple = "x86_64-apple-macosx10.9.0"
define i32 @main() uwtable ssp {
entry:
- %tmp = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_", align 8, !dbg !37
- %tmp1 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !dbg !37, !invariant.load !38
+ %tmp = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_", align 8, !dbg !37
+ %tmp1 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !dbg !37, !invariant.load !38
%tmp2 = bitcast %struct._class_t* %tmp to i8*, !dbg !37
; CHECK: call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp2, i8* %tmp1)
%call = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp2, i8* %tmp1), !dbg !37, !clang.arc.no_objc_arc_exceptions !38
@@ -88,8 +88,8 @@ define internal fastcc void @ThrowFunc(i8* %obj) uwtable noinline ssp {
entry:
%tmp = call i8* @objc_retain(i8* %obj) nounwind
call void @llvm.dbg.value(metadata i8* %obj, i64 0, metadata !32, metadata !{}), !dbg !55
- %tmp1 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_1", align 8, !dbg !56
- %tmp2 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_5", align 8, !dbg !56, !invariant.load !38
+ %tmp1 = load %struct._class_t*, %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_1", align 8, !dbg !56
+ %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_5", align 8, !dbg !56, !invariant.load !38
%tmp3 = bitcast %struct._class_t* %tmp1 to i8*, !dbg !56
call void (i8*, i8*, %0*, %0*, ...)* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %0*, %0*, ...)*)(i8* %tmp3, i8* %tmp2, %0* bitcast (%struct.NSConstantString* @_unnamed_cfstring_3 to %0*), %0* bitcast (%struct.NSConstantString* @_unnamed_cfstring_3 to %0*)), !dbg !56, !clang.arc.no_objc_arc_exceptions !38
call void @objc_release(i8* %obj) nounwind, !dbg !58, !clang.imprecise_release !38
diff --git a/llvm/test/Transforms/ObjCARC/escape.ll b/llvm/test/Transforms/ObjCARC/escape.ll
index fe6009973b5..c7a1b03c16f 100644
--- a/llvm/test/Transforms/ObjCARC/escape.ll
+++ b/llvm/test/Transforms/ObjCARC/escape.ll
@@ -47,13 +47,13 @@ entry:
store i8* %tmp5, i8** %block.captured, align 8
%tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
%tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
- %tmp8 = load %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
+ %tmp8 = load %struct.__block_byref_weakLogNTimes*, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
%weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
%tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
%tmp10 = call i8* @objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
%tmp11 = getelementptr inbounds i8, i8* %tmp7, i64 16
%tmp12 = bitcast i8* %tmp11 to i8**
- %tmp13 = load i8** %tmp12, align 8
+ %tmp13 = load i8*, i8** %tmp12, align 8
%tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
call void %tmp14(i8* %tmp7, i32 10) nounwind, !clang.arc.no_objc_arc_exceptions !0
call void @objc_release(i8* %tmp7) nounwind, !clang.imprecise_release !0
@@ -102,13 +102,13 @@ entry:
store i8* %tmp5, i8** %block.captured, align 8
%tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
%tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
- %tmp8 = load %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
+ %tmp8 = load %struct.__block_byref_weakLogNTimes*, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
%weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
%tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
%tmp10 = call i8* @not_really_objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
%tmp11 = getelementptr inbounds i8, i8* %tmp7, i64 16
%tmp12 = bitcast i8* %tmp11 to i8**
- %tmp13 = load i8** %tmp12, align 8
+ %tmp13 = load i8*, i8** %tmp12, align 8
%tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
call void %tmp14(i8* %tmp7, i32 10) nounwind, !clang.arc.no_objc_arc_exceptions !0
call void @objc_release(i8* %tmp7) nounwind, !clang.imprecise_release !0
diff --git a/llvm/test/Transforms/ObjCARC/gvn.ll b/llvm/test/Transforms/ObjCARC/gvn.ll
index 2d120e7345a..6f828545bc6 100644
--- a/llvm/test/Transforms/ObjCARC/gvn.ll
+++ b/llvm/test/Transforms/ObjCARC/gvn.ll
@@ -11,15 +11,15 @@ declare i32 @objc_sync_exit(i8*)
; CHECK: define i8* @test0(i32 %n)
; CHECK-NEXT: entry:
-; CHECK-NEXT: %s = load i8** @x
+; CHECK-NEXT: %s = load i8*, i8** @x
; CHECK-NOT: load
; CHECK: ret i8* %s
; CHECK-NEXT: }
define i8* @test0(i32 %n) nounwind {
entry:
- %s = load i8** @x
+ %s = load i8*, i8** @x
%0 = tail call i8* @objc_retain(i8* %s) nounwind
- %t = load i8** @x
+ %t = load i8*, i8** @x
ret i8* %t
}
@@ -33,9 +33,9 @@ entry:
; CHECK: }
define i8* @test1(i32 %n) nounwind {
entry:
- %s = load i8** @x
+ %s = load i8*, i8** @x
%0 = call i32 @objc_sync_enter(i8* %s)
- %t = load i8** @x
+ %t = load i8*, i8** @x
%1 = call i32 @objc_sync_exit(i8* %s)
ret i8* %t
}
diff --git a/llvm/test/Transforms/ObjCARC/intrinsic-use.ll b/llvm/test/Transforms/ObjCARC/intrinsic-use.ll
index b1e56c8e9de..d85cb3ebc59 100644
--- a/llvm/test/Transforms/ObjCARC/intrinsic-use.ll
+++ b/llvm/test/Transforms/ObjCARC/intrinsic-use.ll
@@ -22,13 +22,13 @@ declare void @test0_helper(i8*, i8**)
; CHECK-NEXT: store i8* %y, i8** %temp0
; CHECK-NEXT: @objc_retain(i8* %y)
; CHECK-NEXT: call void @test0_helper
-; CHECK-NEXT: [[VAL1:%.*]] = load i8** %temp0
+; CHECK-NEXT: [[VAL1:%.*]] = load i8*, i8** %temp0
; CHECK-NEXT: call void (...)* @clang.arc.use(i8* %y)
; CHECK-NEXT: @objc_retain(i8* [[VAL1]])
; CHECK-NEXT: @objc_release(i8* %y)
; CHECK-NEXT: store i8* [[VAL1]], i8** %temp1
; CHECK-NEXT: call void @test0_helper
-; CHECK-NEXT: [[VAL2:%.*]] = load i8** %temp1
+; CHECK-NEXT: [[VAL2:%.*]] = load i8*, i8** %temp1
; CHECK-NEXT: call void (...)* @clang.arc.use(i8* [[VAL1]])
; CHECK-NEXT: @objc_retain(i8* [[VAL2]])
; CHECK-NEXT: @objc_release(i8* [[VAL1]])
@@ -47,13 +47,13 @@ entry:
%1 = call i8* @objc_retain(i8* %y) nounwind
store i8* %y, i8** %temp0
call void @test0_helper(i8* %x, i8** %temp0)
- %val1 = load i8** %temp0
+ %val1 = load i8*, i8** %temp0
%2 = call i8* @objc_retain(i8* %val1) nounwind
call void (...)* @clang.arc.use(i8* %y) nounwind
call void @objc_release(i8* %y) nounwind
store i8* %val1, i8** %temp1
call void @test0_helper(i8* %x, i8** %temp1)
- %val2 = load i8** %temp1
+ %val2 = load i8*, i8** %temp1
%3 = call i8* @objc_retain(i8* %val2) nounwind
call void (...)* @clang.arc.use(i8* %val1) nounwind
call void @objc_release(i8* %val1) nounwind
@@ -70,13 +70,13 @@ entry:
; CHECK-NEXT: store i8* %y, i8** %temp0
; CHECK-NEXT: @objc_retain(i8* %y)
; CHECK-NEXT: call void @test0_helper
-; CHECK-NEXT: [[VAL1:%.*]] = load i8** %temp0
+; CHECK-NEXT: [[VAL1:%.*]] = load i8*, i8** %temp0
; CHECK-NEXT: call void (...)* @clang.arc.use(i8* %y)
; CHECK-NEXT: @objc_retain(i8* [[VAL1]])
; CHECK-NEXT: @objc_release(i8* %y)
; CHECK-NEXT: store i8* [[VAL1]], i8** %temp1
; CHECK-NEXT: call void @test0_helper
-; CHECK-NEXT: [[VAL2:%.*]] = load i8** %temp1
+; CHECK-NEXT: [[VAL2:%.*]] = load i8*, i8** %temp1
; CHECK-NEXT: call void (...)* @clang.arc.use(i8* [[VAL1]])
; CHECK-NEXT: @objc_retain(i8* [[VAL2]])
; CHECK-NEXT: @objc_release(i8* [[VAL1]])
@@ -93,13 +93,13 @@ entry:
%1 = call i8* @objc_retain(i8* %y) nounwind
store i8* %y, i8** %temp0
call void @test0_helper(i8* %x, i8** %temp0)
- %val1 = load i8** %temp0
+ %val1 = load i8*, i8** %temp0
%2 = call i8* @objc_retain(i8* %val1) nounwind
call void (...)* @clang.arc.use(i8* %y) nounwind
call void @objc_release(i8* %y) nounwind, !clang.imprecise_release !0
store i8* %val1, i8** %temp1
call void @test0_helper(i8* %x, i8** %temp1)
- %val2 = load i8** %temp1
+ %val2 = load i8*, i8** %temp1
%3 = call i8* @objc_retain(i8* %val2) nounwind
call void (...)* @clang.arc.use(i8* %val1) nounwind
call void @objc_release(i8* %val1) nounwind, !clang.imprecise_release !0
diff --git a/llvm/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll b/llvm/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll
index 85fd06b557f..9894eb4f534 100644
--- a/llvm/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll
+++ b/llvm/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll
@@ -80,20 +80,20 @@ declare i8* @objc_explicit_autorelease(i8*)
define hidden %14* @foo(%15* %arg, %16* %arg2) {
bb:
- %tmp = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_3725", align 8
+ %tmp = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_3725", align 8
%tmp4 = bitcast %15* %arg to i8*
%tmp5 = tail call %18* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %18* (i8*, i8*)*)(i8* %tmp4, i8* %tmp)
%tmp6 = bitcast %18* %tmp5 to i8*
%tmp7 = tail call i8* @objc_retain(i8* %tmp6) nounwind
- %tmp8 = load %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_40", align 8
- %tmp9 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_4227", align 8
+ %tmp8 = load %2*, %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_40", align 8
+ %tmp9 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_4227", align 8
%tmp10 = bitcast %2* %tmp8 to i8*
%tmp11 = tail call %19* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %19* (i8*, i8*)*)(i8* %tmp10, i8* %tmp9)
- %tmp12 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_4631", align 8
+ %tmp12 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_4631", align 8
%tmp13 = bitcast %19* %tmp11 to i8*
%tmp14 = tail call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*, %13*)*)(i8* %tmp13, i8* %tmp12, %13* bitcast (%12* @_unnamed_cfstring_386 to %13*))
%tmp15 = bitcast %16* %arg2 to i8*
- %tmp16 = load i8** bitcast (%0* @"\01l_objc_msgSend_fixup_count" to i8**), align 16
+ %tmp16 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_count" to i8**), align 16
%tmp17 = bitcast i8* %tmp16 to i64 (i8*, %1*)*
%tmp18 = tail call i64 %tmp17(i8* %tmp15, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_count" to %1*))
%tmp19 = icmp eq i64 %tmp18, 0
@@ -110,104 +110,104 @@ bb22: ; preds = %bb
bb25: ; preds = %bb22, %bb20
%tmp26 = phi i1 [ %tmp21, %bb20 ], [ false, %bb22 ]
- %tmp27 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_188", align 8
+ %tmp27 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_188", align 8
%tmp28 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp7, i8* %tmp27)
%tmp29 = tail call i8* @objc_explicit_autorelease(i8* %tmp28) nounwind
%tmp30 = bitcast i8* %tmp29 to %18*
tail call void @objc_release(i8* %tmp7) nounwind
- %tmp31 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_389", align 8
+ %tmp31 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_389", align 8
%tmp32 = tail call %20* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %20* (i8*, i8*)*)(i8* %tmp29, i8* %tmp31)
- %tmp33 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_391", align 8
+ %tmp33 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_391", align 8
%tmp34 = bitcast %20* %tmp32 to i8*
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %16*)*)(i8* %tmp34, i8* %tmp33, %16* %arg2)
br i1 %tmp26, label %bb46, label %bb35
bb35: ; preds = %bb25
- %tmp36 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_389", align 8
+ %tmp36 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_389", align 8
%tmp37 = tail call %20* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %20* (i8*, i8*)*)(i8* %tmp29, i8* %tmp36)
- %tmp38 = load %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_70", align 8
- %tmp39 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_393", align 8
+ %tmp38 = load %2*, %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_70", align 8
+ %tmp39 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_393", align 8
%tmp40 = bitcast %2* %tmp38 to i8*
%tmp41 = tail call %21* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %21* (i8*, i8*, i8)*)(i8* %tmp40, i8* %tmp39, i8 signext 1)
%tmp42 = bitcast %21* %tmp41 to i8*
- %tmp43 = load %13** @NSPrintHeaderAndFooter, align 8
- %tmp44 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_159", align 8
+ %tmp43 = load %13*, %13** @NSPrintHeaderAndFooter, align 8
+ %tmp44 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_159", align 8
%tmp45 = bitcast %20* %tmp37 to i8*
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8*, %13*)*)(i8* %tmp45, i8* %tmp44, i8* %tmp42, %13* %tmp43)
br label %bb46
bb46: ; preds = %bb35, %bb25, %bb22
%tmp47 = phi %18* [ %tmp30, %bb35 ], [ %tmp30, %bb25 ], [ %tmp23, %bb22 ]
- %tmp48 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
+ %tmp48 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
%tmp49 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp48)
%tmp50 = bitcast %22* %tmp49 to i8*
- %tmp51 = load i8** bitcast (%0* @"\01l_objc_msgSend_fixup_count" to i8**), align 16
+ %tmp51 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_count" to i8**), align 16
%tmp52 = bitcast i8* %tmp51 to i64 (i8*, %1*)*
%tmp53 = tail call i64 %tmp52(i8* %tmp50, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_count" to %1*))
%tmp54 = icmp eq i64 %tmp53, 0
br i1 %tmp54, label %bb55, label %bb57
bb55: ; preds = %bb46
- %tmp56 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_395", align 8
+ %tmp56 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_395", align 8
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*)*)(i8* %tmp4, i8* %tmp56)
br label %bb57
bb57: ; preds = %bb55, %bb46
- %tmp58 = load %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_396", align 8
- %tmp59 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
+ %tmp58 = load %2*, %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_396", align 8
+ %tmp59 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
%tmp60 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp59)
%tmp61 = bitcast %22* %tmp60 to i8*
- %tmp62 = load i8** bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to i8**), align 16
+ %tmp62 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to i8**), align 16
%tmp63 = bitcast i8* %tmp62 to i8* (i8*, %1*, i64)*
%tmp64 = tail call i8* %tmp63(i8* %tmp61, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to %1*), i64 0)
- %tmp65 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_398", align 8
+ %tmp65 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_398", align 8
%tmp66 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp64, i8* %tmp65)
%tmp67 = bitcast i8* %tmp66 to %23*
- %tmp68 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_400", align 8
+ %tmp68 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_400", align 8
%tmp69 = bitcast %2* %tmp58 to i8*
%tmp70 = tail call %14* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %14* (i8*, i8*, %23*, %18*)*)(i8* %tmp69, i8* %tmp68, %23* %tmp67, %18* %tmp47)
%tmp71 = bitcast %14* %tmp70 to i8*
; hack to prevent the optimize from using objc_retainAutoreleasedReturnValue.
%tmp71x = getelementptr i8, i8* %tmp71, i64 1
%tmp72 = tail call i8* @objc_retain(i8* %tmp71x) nounwind
- %tmp73 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_402", align 8
+ %tmp73 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_402", align 8
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8)*)(i8* %tmp72, i8* %tmp73, i8 signext 1)
- %tmp74 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_404", align 8
+ %tmp74 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_404", align 8
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8)*)(i8* %tmp72, i8* %tmp74, i8 signext 1)
- %tmp75 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
+ %tmp75 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_328", align 8
%tmp76 = tail call %22* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %22* (i8*, i8*)*)(i8* %tmp4, i8* %tmp75)
%tmp77 = bitcast %22* %tmp76 to i8*
- %tmp78 = load i8** bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to i8**), align 16
+ %tmp78 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to i8**), align 16
%tmp79 = bitcast i8* %tmp78 to i8* (i8*, %1*, i64)*
%tmp80 = tail call i8* %tmp79(i8* %tmp77, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_objectAtIndex_" to %1*), i64 0)
- %tmp81 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_406", align 8
+ %tmp81 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_406", align 8
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i64)*)(i8* %tmp80, i8* %tmp81, i64 9223372036854775807)
- %tmp82 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_408", align 8
+ %tmp82 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_408", align 8
%tmp83 = tail call %24* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %24* (i8*, i8*)*)(i8* %tmp72, i8* %tmp82)
%tmp84 = bitcast %24* %tmp83 to i8*
%tmp85 = tail call i8* @objc_retain(i8* %tmp84) nounwind
- %tmp86 = load %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_409", align 8
+ %tmp86 = load %2*, %2** @"\01L_OBJC_CLASSLIST_REFERENCES_$_409", align 8
%tmp87 = bitcast %2* %tmp86 to i8*
- %tmp88 = load i8** bitcast (%0* @"\01l_objc_msgSend_fixup_alloc" to i8**), align 16
+ %tmp88 = load i8*, i8** bitcast (%0* @"\01l_objc_msgSend_fixup_alloc" to i8**), align 16
%tmp89 = bitcast i8* %tmp88 to i8* (i8*, %1*)*
%tmp90 = tail call i8* %tmp89(i8* %tmp87, %1* bitcast (%0* @"\01l_objc_msgSend_fixup_alloc" to %1*))
- %tmp91 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_8", align 8
+ %tmp91 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_8", align 8
%tmp92 = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp90, i8* %tmp91)
%tmp93 = tail call i8* @objc_explicit_autorelease(i8* %tmp92) nounwind
%tmp94 = bitcast i8* %tmp93 to %25*
- %tmp95 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_411", align 8
+ %tmp95 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_411", align 8
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %25*)*)(i8* %tmp85, i8* %tmp95, %25* %tmp94)
tail call void @objc_release(i8* %tmp93) nounwind
- %tmp96 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_148", align 8
+ %tmp96 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_148", align 8
%tmp97 = tail call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %tmp4, i8* %tmp96)
%tmp98 = icmp eq i8 %tmp97, 0
br i1 %tmp98, label %bb99, label %bb104
bb99: ; preds = %bb57
- %tmp100 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_413", align 8
+ %tmp100 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_413", align 8
%tmp101 = tail call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*)*)(i8* %tmp85, i8* %tmp100)
%tmp102 = or i64 %tmp101, 12
- %tmp103 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_415", align 8
+ %tmp103 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_415", align 8
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i64)*)(i8* %tmp85, i8* %tmp103, i64 %tmp102)
br label %bb104
diff --git a/llvm/test/Transforms/ObjCARC/move-and-merge-autorelease.ll b/llvm/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
index e5d2f07e45a..5d19f355be3 100644
--- a/llvm/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
+++ b/llvm/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
@@ -35,10 +35,10 @@ declare i8* @objc_autorelease(i8*)
define hidden %0* @foo(%1* %arg, %3* %arg3) {
bb:
- %tmp16 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_620", align 8
+ %tmp16 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_620", align 8
%tmp17 = bitcast %3* %arg3 to i8*
%tmp18 = call %4* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %4* (i8*, i8*)*)(i8* %tmp17, i8* %tmp16)
- %tmp19 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_622", align 8
+ %tmp19 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_622", align 8
%tmp20 = bitcast %4* %tmp18 to i8*
%tmp21 = call %5* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %5* (i8*, i8*)*)(i8* %tmp20, i8* %tmp19)
%tmp22 = bitcast %5* %tmp21 to i8*
@@ -48,11 +48,11 @@ bb:
br i1 %tmp26, label %bb81, label %bb27
bb27: ; preds = %bb
- %tmp29 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_11", align 8
+ %tmp29 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_11", align 8
%tmp30 = bitcast %1* %arg to i8*
%tmp31 = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %tmp30, i8* %tmp29)
%tmp34 = call i8* @objc_retain(i8* %tmp31) nounwind
- %tmp37 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_421455", align 8
+ %tmp37 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_421455", align 8
%tmp39 = call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* (i8*, i8*)*)(i8* %tmp34, i8* %tmp37)
%tmp40 = bitcast %0* %tmp39 to i8*
%tmp41 = call i8* @objc_retain(i8* %tmp40) nounwind
@@ -61,7 +61,7 @@ bb27: ; preds = %bb
br i1 %tmp44, label %bb45, label %bb55
bb45: ; preds = %bb27
- %tmp47 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_624", align 8
+ %tmp47 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_624", align 8
%tmp49 = call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* (i8*, i8*)*)(i8* %tmp34, i8* %tmp47)
%tmp51 = bitcast %0* %tmp49 to i8*
%tmp52 = call i8* @objc_retain(i8* %tmp51) nounwind
@@ -74,14 +74,14 @@ bb55: ; preds = %bb27, %bb45
br i1 %tmp57, label %bb76, label %bb58
bb58: ; preds = %bb55
- %tmp60 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_598", align 8
+ %tmp60 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_598", align 8
%tmp61 = bitcast %0* %tmp13.0 to i8*
%tmp62 = call signext i8 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8 (i8*, i8*)*)(i8* %tmp61, i8* %tmp60)
%tmp64 = icmp eq i8 %tmp62, 0
br i1 %tmp64, label %bb76, label %bb65
bb65: ; preds = %bb58
- %tmp68 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_626", align 8
+ %tmp68 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_626", align 8
%tmp69 = bitcast %0* %tmp13.0 to i8*
%tmp70 = call %0* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %0* (i8*, i8*, %5*)*)(i8* %tmp69, i8* %tmp68, %5* %tmp24)
%tmp72 = bitcast %0* %tmp70 to i8*
diff --git a/llvm/test/Transforms/ObjCARC/nested.ll b/llvm/test/Transforms/ObjCARC/nested.ll
index ac0e7c783f0..fdd67f73b3a 100644
--- a/llvm/test/Transforms/ObjCARC/nested.ll
+++ b/llvm/test/Transforms/ObjCARC/nested.ll
@@ -37,15 +37,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%1 = call i8* @objc_retain(i8* %0) nounwind
- %tmp2 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
%stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
@@ -57,8 +57,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
- %mutationsptr3 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr3, align 8
+ %mutationsptr3 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr3, align 8
%2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
@@ -67,16 +67,16 @@ forcoll.mutated:
br label %forcoll.notmutated
forcoll.notmutated:
- %stateitems = load i8*** %stateitems.ptr, align 8
+ %stateitems = load i8**, i8*** %stateitems.ptr, align 8
%currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
- %3 = load i8** %currentitem.ptr, align 8
+ %3 = load i8*, i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
%exitcond = icmp eq i64 %4, %umax
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp5 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp5 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call6 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp5, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%5 = icmp eq i64 %call6, 0
br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -102,15 +102,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%1 = call i8* @objc_retain(i8* %0) nounwind
- %tmp2 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call3, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
%stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
@@ -122,8 +122,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
- %mutationsptr4 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr4, align 8
+ %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr4, align 8
%2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
@@ -132,16 +132,16 @@ forcoll.mutated:
br label %forcoll.notmutated
forcoll.notmutated:
- %stateitems = load i8*** %stateitems.ptr, align 8
+ %stateitems = load i8**, i8*** %stateitems.ptr, align 8
%currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
- %3 = load i8** %currentitem.ptr, align 8
+ %3 = load i8*, i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
%exitcond = icmp eq i64 %4, %umax
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%5 = icmp eq i64 %call7, 0
br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -162,20 +162,20 @@ define void @test4() nounwind {
entry:
%state.ptr = alloca %struct.__objcFastEnumerationState, align 8
%items.ptr = alloca [16 x i8*], align 8
- %tmp = load i8** @g, align 8
+ %tmp = load i8*, i8** @g, align 8
%0 = call i8* @objc_retain(i8* %tmp) nounwind
%tmp2 = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp2, i8 0, i64 64, i32 8, i1 false)
%1 = call i8* @objc_retain(i8* %0) nounwind
- %tmp4 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp4 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp4, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
%stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
@@ -187,8 +187,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
- %mutationsptr5 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr5, align 8
+ %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr5, align 8
%2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
@@ -197,16 +197,16 @@ forcoll.mutated:
br label %forcoll.notmutated
forcoll.notmutated:
- %stateitems = load i8*** %stateitems.ptr, align 8
+ %stateitems = load i8**, i8*** %stateitems.ptr, align 8
%currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
- %3 = load i8** %currentitem.ptr, align 8
+ %3 = load i8*, i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
%exitcond = icmp eq i64 %4, %umax
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp7 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp7 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call8 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp7, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%5 = icmp eq i64 %call8, 0
br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -233,15 +233,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%1 = call i8* @objc_retain(i8* %0) nounwind
- %tmp2 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call3, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
%stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
@@ -253,8 +253,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
- %mutationsptr4 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr4, align 8
+ %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr4, align 8
%2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
@@ -263,16 +263,16 @@ forcoll.mutated:
br label %forcoll.notmutated
forcoll.notmutated:
- %stateitems = load i8*** %stateitems.ptr, align 8
+ %stateitems = load i8**, i8*** %stateitems.ptr, align 8
%currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
- %3 = load i8** %currentitem.ptr, align 8
+ %3 = load i8*, i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
%exitcond = icmp eq i64 %4, %umax
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%5 = icmp eq i64 %call7, 0
br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -300,15 +300,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%1 = call i8* @objc_retain(i8* %0) nounwind
- %tmp2 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call3, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
%stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
@@ -320,8 +320,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
- %mutationsptr4 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr4, align 8
+ %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr4, align 8
%2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
@@ -330,16 +330,16 @@ forcoll.mutated:
br label %forcoll.notmutated
forcoll.notmutated:
- %stateitems = load i8*** %stateitems.ptr, align 8
+ %stateitems = load i8**, i8*** %stateitems.ptr, align 8
%currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
- %3 = load i8** %currentitem.ptr, align 8
+ %3 = load i8*, i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
%exitcond = icmp eq i64 %4, %umax
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%5 = icmp eq i64 %call7, 0
br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -369,15 +369,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%1 = call i8* @objc_retain(i8* %0) nounwind
- %tmp2 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call3, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
%stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
@@ -389,8 +389,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.notmutated ]
- %mutationsptr4 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr4, align 8
+ %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr4, align 8
%2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
@@ -399,16 +399,16 @@ forcoll.mutated:
br label %forcoll.notmutated
forcoll.notmutated:
- %stateitems = load i8*** %stateitems.ptr, align 8
+ %stateitems = load i8**, i8*** %stateitems.ptr, align 8
%currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
- %3 = load i8** %currentitem.ptr, align 8
+ %3 = load i8*, i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
%exitcond = icmp eq i64 %4, %umax
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%5 = icmp eq i64 %call7, 0
br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -435,15 +435,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%1 = call i8* @objc_retain(i8* %0) nounwind
- %tmp2 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp2 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call3 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp2, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call3, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
%stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
@@ -455,8 +455,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ 0, %forcoll.loopbody.outer ], [ %4, %forcoll.next ]
- %mutationsptr4 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr4, align 8
+ %mutationsptr4 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr4, align 8
%2 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %2, label %forcoll.notmutated, label %forcoll.mutated
@@ -465,9 +465,9 @@ forcoll.mutated:
br label %forcoll.notmutated
forcoll.notmutated:
- %stateitems = load i8*** %stateitems.ptr, align 8
+ %stateitems = load i8**, i8*** %stateitems.ptr, align 8
%currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
- %3 = load i8** %currentitem.ptr, align 8
+ %3 = load i8*, i8** %currentitem.ptr, align 8
%tobool = icmp eq i8* %3, null
br i1 %tobool, label %forcoll.next, label %if.then
@@ -481,7 +481,7 @@ forcoll.next:
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %1, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%5 = icmp eq i64 %call7, 0
br i1 %5, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -512,15 +512,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%2 = call i8* @objc_retain(i8* %0) nounwind
- %tmp3 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call4, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
@@ -531,8 +531,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated.forcoll.loopbody_crit_edge ], [ 1, %forcoll.loopbody.outer ]
- %mutationsptr5 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr5, align 8
+ %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr5, align 8
%3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
@@ -549,7 +549,7 @@ forcoll.notmutated.forcoll.loopbody_crit_edge:
br label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%4 = icmp eq i64 %call7, 0
br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -579,15 +579,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%2 = call i8* @objc_retain(i8* %0) nounwind
- %tmp3 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call4, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
@@ -598,8 +598,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated ], [ 0, %forcoll.loopbody.outer ]
- %mutationsptr5 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr5, align 8
+ %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr5, align 8
%3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
@@ -613,7 +613,7 @@ forcoll.notmutated:
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%4 = icmp eq i64 %call7, 0
br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -646,15 +646,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%2 = call i8* @objc_retain(i8* %0) nounwind
- %tmp3 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call4, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
@@ -665,8 +665,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated.forcoll.loopbody_crit_edge ], [ 1, %forcoll.loopbody.outer ]
- %mutationsptr5 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr5, align 8
+ %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr5, align 8
%3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
@@ -683,7 +683,7 @@ forcoll.notmutated.forcoll.loopbody_crit_edge:
br label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%4 = icmp eq i64 %call7, 0
br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -714,15 +714,15 @@ entry:
%tmp = bitcast %struct.__objcFastEnumerationState* %state.ptr to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 8, i1 false)
%2 = call i8* @objc_retain(i8* %0) nounwind
- %tmp3 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp3 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call4 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp3, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%iszero = icmp eq i64 %call4, 0
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
%mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
- %mutationsptr = load i64** %mutationsptr.ptr, align 8
- %forcoll.initial-mutations = load i64* %mutationsptr, align 8
+ %mutationsptr = load i64*, i64** %mutationsptr.ptr, align 8
+ %forcoll.initial-mutations = load i64, i64* %mutationsptr, align 8
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
@@ -733,8 +733,8 @@ forcoll.loopbody.outer:
forcoll.loopbody:
%forcoll.index = phi i64 [ %phitmp, %forcoll.notmutated ], [ 0, %forcoll.loopbody.outer ]
- %mutationsptr5 = load i64** %mutationsptr.ptr, align 8
- %statemutations = load i64* %mutationsptr5, align 8
+ %mutationsptr5 = load i64*, i64** %mutationsptr.ptr, align 8
+ %statemutations = load i64, i64* %mutationsptr5, align 8
%3 = icmp eq i64 %statemutations, %forcoll.initial-mutations
br i1 %3, label %forcoll.notmutated, label %forcoll.mutated
@@ -748,7 +748,7 @@ forcoll.notmutated:
br i1 %exitcond, label %forcoll.refetch, label %forcoll.loopbody
forcoll.refetch:
- %tmp6 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
+ %tmp6 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8
%call7 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*, %struct.__objcFastEnumerationState*, [16 x i8*]*, i64)*)(i8* %2, i8* %tmp6, %struct.__objcFastEnumerationState* %state.ptr, [16 x i8*]* %items.ptr, i64 16)
%4 = icmp eq i64 %call7, 0
br i1 %4, label %forcoll.empty, label %forcoll.loopbody.outer
@@ -796,7 +796,7 @@ entry:
%foo5 = call i8* @objc_retainBlock(i8* %foo4) nounwind
call void @use(i8* %foo5), !clang.arc.no_objc_arc_exceptions !0
call void @objc_release(i8* %foo5) nounwind
- %strongdestroy = load i8** %foo, align 8
+ %strongdestroy = load i8*, i8** %foo, align 8
call void @objc_release(i8* %strongdestroy) nounwind, !clang.imprecise_release !0
%foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
%block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
@@ -815,7 +815,7 @@ entry:
%foo21 = call i8* @objc_retainBlock(i8* %foo20) nounwind
call void @use(i8* %foo21), !clang.arc.no_objc_arc_exceptions !0
call void @objc_release(i8* %foo21) nounwind
- %strongdestroy25 = load i8** %foo10, align 8
+ %strongdestroy25 = load i8*, i8** %foo10, align 8
call void @objc_release(i8* %strongdestroy25) nounwind, !clang.imprecise_release !0
call void @objc_release(i8* %call) nounwind, !clang.imprecise_release !0
ret void
diff --git a/llvm/test/Transforms/ObjCARC/provenance.ll b/llvm/test/Transforms/ObjCARC/provenance.ll
index 937c6892435..aa5a932a86e 100644
--- a/llvm/test/Transforms/ObjCARC/provenance.ll
+++ b/llvm/test/Transforms/ObjCARC/provenance.ll
@@ -10,28 +10,28 @@
declare void @g(i8)
define void @f(i8* %a, i8** %b, i8** %c) {
- %y1 = load i8* %a
+ %y1 = load i8, i8* %a
call void @g(i8 %y1)
- %y2 = load i8** %b
- %y3 = load i8** %c
+ %y2 = load i8*, i8** %b
+ %y3 = load i8*, i8** %c
- %x0 = load i8* @"\01l_objc_msgSend_fixup_"
+ %x0 = load i8, i8* @"\01l_objc_msgSend_fixup_"
call void @g(i8 %x0)
- %x1 = load i8* @g1
+ %x1 = load i8, i8* @g1
call void @g(i8 %x1)
- %x2 = load i8* @g2
+ %x2 = load i8, i8* @g2
call void @g(i8 %x2)
- %x3 = load i8* @g3
+ %x3 = load i8, i8* @g3
call void @g(i8 %x3)
- %x4 = load i8* @g4
+ %x4 = load i8, i8* @g4
call void @g(i8 %x4)
- %x5 = load i8* @g5
+ %x5 = load i8, i8* @g5
call void @g(i8 %x5)
ret void
}
diff --git a/llvm/test/Transforms/ObjCARC/retain-block-side-effects.ll b/llvm/test/Transforms/ObjCARC/retain-block-side-effects.ll
index b4f4089115e..5f5def9ff03 100644
--- a/llvm/test/Transforms/ObjCARC/retain-block-side-effects.ll
+++ b/llvm/test/Transforms/ObjCARC/retain-block-side-effects.ll
@@ -6,7 +6,7 @@
; CHECK: %tmp16 = call i8* @objc_retainBlock(i8* %tmp15) [[NUW:#[0-9]+]]
; CHECK: %tmp17 = bitcast i8* %tmp16 to void ()*
-; CHECK: %tmp18 = load %struct.__block_byref_repeater** %byref.forwarding, align 8
+; CHECK: %tmp18 = load %struct.__block_byref_repeater*, %struct.__block_byref_repeater** %byref.forwarding, align 8
; CHECK: %repeater12 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
; CHECK: store void ()* %tmp17, void ()** %repeater12, align 8
@@ -29,9 +29,9 @@ entry:
%tmp15 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>* %block to i8*
%tmp16 = call i8* @objc_retainBlock(i8* %tmp15) nounwind
%tmp17 = bitcast i8* %tmp16 to void ()*
- %tmp18 = load %struct.__block_byref_repeater** %byref.forwarding, align 8
+ %tmp18 = load %struct.__block_byref_repeater*, %struct.__block_byref_repeater** %byref.forwarding, align 8
%repeater12 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
- %tmp13 = load void ()** %repeater12, align 8
+ %tmp13 = load void ()*, void ()** %repeater12, align 8
store void ()* %tmp17, void ()** %repeater12, align 8
ret void
}
diff --git a/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll b/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
index 888895f0235..14a22f52dfa 100644
--- a/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
+++ b/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
@@ -29,11 +29,11 @@ entry:
store %struct.empty_base_t* %4, %struct.empty_base_t** %2, align 8
call void @_ZN7empty_tC1Ev(%struct.empty_base_t* %1) nounwind
%5 = call i32* @_ZN5boost15compressed_pairI7empty_tiE6secondEv(%"struct.boost::compressed_pair<empty_t,int>"* %x) ssp ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* %5, align 4 ; <i32> [#uses=1]
%7 = icmp ne i32 %6, -3 ; <i1> [#uses=1]
%8 = zext i1 %7 to i8 ; <i8> [#uses=1]
store i8 %8, i8* %retval.1, align 1
- %9 = load i8* %retval.1, align 1 ; <i8> [#uses=1]
+ %9 = load i8, i8* %retval.1, align 1 ; <i8> [#uses=1]
%toBool = icmp ne i8 %9, 0 ; <i1> [#uses=1]
br i1 %toBool, label %bb, label %bb1
@@ -44,14 +44,14 @@ bb: ; preds = %entry
bb1: ; preds = %entry
store i32 0, i32* %0, align 4
- %11 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %11 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %11, i32* %retval, align 4
br label %return
; CHECK-NOT: x.second() was clobbered
; CHECK: ret i32
return: ; preds = %bb1
- %retval2 = load i32* %retval ; <i32> [#uses=1]
+ %retval2 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval2
}
@@ -71,7 +71,7 @@ entry:
%this_addr = alloca %struct.empty_base_t*, align 8 ; <%struct.empty_base_t**> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %struct.empty_base_t* %this, %struct.empty_base_t** %this_addr
- %0 = load %struct.empty_base_t** %this_addr, align 8 ; <%struct.empty_base_t*> [#uses=1]
+ %0 = load %struct.empty_base_t*, %struct.empty_base_t** %this_addr, align 8 ; <%struct.empty_base_t*> [#uses=1]
call void @_ZN12empty_base_tC2Ev(%struct.empty_base_t* %0) nounwind
br label %return
@@ -86,15 +86,15 @@ entry:
%0 = alloca i32* ; <i32**> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %this, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr
- %1 = load %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
+ %1 = load %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
%2 = getelementptr inbounds %"struct.boost::details::compressed_pair_imp<empty_t,int,1>", %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %1, i32 0, i32 0 ; <i32*> [#uses=1]
store i32* %2, i32** %0, align 8
- %3 = load i32** %0, align 8 ; <i32*> [#uses=1]
+ %3 = load i32*, i32** %0, align 8 ; <i32*> [#uses=1]
store i32* %3, i32** %retval, align 8
br label %return
return: ; preds = %entry
- %retval1 = load i32** %retval ; <i32*> [#uses=1]
+ %retval1 = load i32*, i32** %retval ; <i32*> [#uses=1]
ret i32* %retval1
}
@@ -105,16 +105,16 @@ entry:
%0 = alloca i32* ; <i32**> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %"struct.boost::compressed_pair<empty_t,int>"* %this, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr
- %1 = load %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1]
+ %1 = load %"struct.boost::compressed_pair<empty_t,int>"*, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1]
%2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>", %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
%3 = call i32* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE6secondEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %2) nounwind ; <i32*> [#uses=1]
store i32* %3, i32** %0, align 8
- %4 = load i32** %0, align 8 ; <i32*> [#uses=1]
+ %4 = load i32*, i32** %0, align 8 ; <i32*> [#uses=1]
store i32* %4, i32** %retval, align 8
br label %return
return: ; preds = %entry
- %retval1 = load i32** %retval ; <i32*> [#uses=1]
+ %retval1 = load i32*, i32** %retval ; <i32*> [#uses=1]
ret i32* %retval1
}
@@ -125,15 +125,15 @@ entry:
%0 = alloca %struct.empty_base_t* ; <%struct.empty_base_t**> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %this, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr
- %1 = load %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
+ %1 = load %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
%2 = bitcast %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %1 to %struct.empty_base_t* ; <%struct.empty_base_t*> [#uses=1]
store %struct.empty_base_t* %2, %struct.empty_base_t** %0, align 8
- %3 = load %struct.empty_base_t** %0, align 8 ; <%struct.empty_base_t*> [#uses=1]
+ %3 = load %struct.empty_base_t*, %struct.empty_base_t** %0, align 8 ; <%struct.empty_base_t*> [#uses=1]
store %struct.empty_base_t* %3, %struct.empty_base_t** %retval, align 8
br label %return
return: ; preds = %entry
- %retval1 = load %struct.empty_base_t** %retval ; <%struct.empty_base_t*> [#uses=1]
+ %retval1 = load %struct.empty_base_t*, %struct.empty_base_t** %retval ; <%struct.empty_base_t*> [#uses=1]
ret %struct.empty_base_t* %retval1
}
@@ -144,16 +144,16 @@ entry:
%0 = alloca %struct.empty_base_t* ; <%struct.empty_base_t**> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %"struct.boost::compressed_pair<empty_t,int>"* %this, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr
- %1 = load %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1]
+ %1 = load %"struct.boost::compressed_pair<empty_t,int>"*, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1]
%2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>", %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
%3 = call %struct.empty_base_t* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE5firstEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %2) nounwind ; <%struct.empty_base_t*> [#uses=1]
store %struct.empty_base_t* %3, %struct.empty_base_t** %0, align 8
- %4 = load %struct.empty_base_t** %0, align 8 ; <%struct.empty_base_t*> [#uses=1]
+ %4 = load %struct.empty_base_t*, %struct.empty_base_t** %0, align 8 ; <%struct.empty_base_t*> [#uses=1]
store %struct.empty_base_t* %4, %struct.empty_base_t** %retval, align 8
br label %return
return: ; preds = %entry
- %retval1 = load %struct.empty_base_t** %retval ; <%struct.empty_base_t*> [#uses=1]
+ %retval1 = load %struct.empty_base_t*, %struct.empty_base_t** %retval ; <%struct.empty_base_t*> [#uses=1]
ret %struct.empty_base_t* %retval1
}
diff --git a/llvm/test/Transforms/PhaseOrdering/PR6627.ll b/llvm/test/Transforms/PhaseOrdering/PR6627.ll
index 0f8107971ac..f21495543b9 100644
--- a/llvm/test/Transforms/PhaseOrdering/PR6627.ll
+++ b/llvm/test/Transforms/PhaseOrdering/PR6627.ll
@@ -8,7 +8,7 @@ declare i32 @doo(...)
define void @test2(i8* %arrayidx) nounwind ssp {
entry:
%xx = bitcast i8* %arrayidx to i32*
- %x1 = load i32* %xx, align 4
+ %x1 = load i32, i32* %xx, align 4
%tmp = trunc i32 %x1 to i8
%conv = zext i8 %tmp to i32
%cmp = icmp eq i32 %conv, 127
@@ -16,21 +16,21 @@ entry:
land.lhs.true: ; preds = %entry
%arrayidx4 = getelementptr inbounds i8, i8* %arrayidx, i64 1
- %tmp5 = load i8* %arrayidx4, align 1
+ %tmp5 = load i8, i8* %arrayidx4, align 1
%conv6 = zext i8 %tmp5 to i32
%cmp7 = icmp eq i32 %conv6, 69
br i1 %cmp7, label %land.lhs.true9, label %if.end
land.lhs.true9: ; preds = %land.lhs.true
%arrayidx12 = getelementptr inbounds i8, i8* %arrayidx, i64 2
- %tmp13 = load i8* %arrayidx12, align 1
+ %tmp13 = load i8, i8* %arrayidx12, align 1
%conv14 = zext i8 %tmp13 to i32
%cmp15 = icmp eq i32 %conv14, 76
br i1 %cmp15, label %land.lhs.true17, label %if.end
land.lhs.true17: ; preds = %land.lhs.true9
%arrayidx20 = getelementptr inbounds i8, i8* %arrayidx, i64 3
- %tmp21 = load i8* %arrayidx20, align 1
+ %tmp21 = load i8, i8* %arrayidx20, align 1
%conv22 = zext i8 %tmp21 to i32
%cmp23 = icmp eq i32 %conv22, 70
br i1 %cmp23, label %if.then, label %if.end
@@ -43,7 +43,7 @@ if.end:
ret void
; CHECK-LABEL: @test2(
-; CHECK: %x1 = load i32* %xx, align 4
+; CHECK: %x1 = load i32, i32* %xx, align 4
; CHECK-NEXT: icmp eq i32 %x1, 1179403647
; CHECK-NEXT: br i1 {{.*}}, label %if.then, label %if.end
}
@@ -53,28 +53,28 @@ if.end:
; requiring widening.
define void @test2a(i8* %arrayidx) nounwind ssp {
entry:
- %x1 = load i8* %arrayidx, align 4
+ %x1 = load i8, i8* %arrayidx, align 4
%conv = zext i8 %x1 to i32
%cmp = icmp eq i32 %conv, 127
br i1 %cmp, label %land.lhs.true, label %if.end
land.lhs.true: ; preds = %entry
%arrayidx4 = getelementptr inbounds i8, i8* %arrayidx, i64 1
- %tmp5 = load i8* %arrayidx4, align 1
+ %tmp5 = load i8, i8* %arrayidx4, align 1
%conv6 = zext i8 %tmp5 to i32
%cmp7 = icmp eq i32 %conv6, 69
br i1 %cmp7, label %land.lhs.true9, label %if.end
land.lhs.true9: ; preds = %land.lhs.true
%arrayidx12 = getelementptr inbounds i8, i8* %arrayidx, i64 2
- %tmp13 = load i8* %arrayidx12, align 1
+ %tmp13 = load i8, i8* %arrayidx12, align 1
%conv14 = zext i8 %tmp13 to i32
%cmp15 = icmp eq i32 %conv14, 76
br i1 %cmp15, label %land.lhs.true17, label %if.end
land.lhs.true17: ; preds = %land.lhs.true9
%arrayidx20 = getelementptr inbounds i8, i8* %arrayidx, i64 3
- %tmp21 = load i8* %arrayidx20, align 1
+ %tmp21 = load i8, i8* %arrayidx20, align 1
%conv22 = zext i8 %tmp21 to i32
%cmp23 = icmp eq i32 %conv22, 70
br i1 %cmp23, label %if.then, label %if.end
@@ -87,7 +87,7 @@ if.end:
ret void
; CHECK-LABEL: @test2a(
-; CHECK: %x1 = load i32* {{.*}}, align 4
+; CHECK: %x1 = load i32, i32* {{.*}}, align 4
; CHECK-NEXT: icmp eq i32 %x1, 1179403647
; CHECK-NEXT: br i1 {{.*}}, label %if.then, label %if.end
}
diff --git a/llvm/test/Transforms/PhaseOrdering/basic.ll b/llvm/test/Transforms/PhaseOrdering/basic.ll
index a8d4bb8db76..ef57e55e15e 100644
--- a/llvm/test/Transforms/PhaseOrdering/basic.ll
+++ b/llvm/test/Transforms/PhaseOrdering/basic.ll
@@ -13,9 +13,9 @@ define void @test1() nounwind ssp {
%i = alloca i8*, align 8
%call = call i8* @malloc(i64 1)
store i8* %call, i8** %i, align 8
- %tmp = load i8** %i, align 8
+ %tmp = load i8*, i8** %i, align 8
store i8 1, i8* %tmp
- %tmp1 = load i8** %i, align 8
+ %tmp1 = load i8*, i8** %i, align 8
call void @free(i8* %tmp1)
ret void
@@ -37,9 +37,9 @@ entry:
%arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1
store i32 %add, i32* %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %p, i64 1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %p, i64 0
- %1 = load i32* %arrayidx3, align 4
+ %1 = load i32, i32* %arrayidx3, align 4
%mul = mul i32 2, %1
%sub = sub i32 %0, %mul
ret i32 %sub
diff --git a/llvm/test/Transforms/PhaseOrdering/gdce.ll b/llvm/test/Transforms/PhaseOrdering/gdce.ll
index 95f06757a78..56d5cbc2f34 100644
--- a/llvm/test/Transforms/PhaseOrdering/gdce.ll
+++ b/llvm/test/Transforms/PhaseOrdering/gdce.ll
@@ -27,7 +27,7 @@ entry:
store i32 0, i32* %retval
store i32 1, i32* %cleanup.dest.slot
call void @_ZN4BaseD1Ev(%class.Base* %b)
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
@@ -35,7 +35,7 @@ define linkonce_odr void @_ZN4BaseC1Ev(%class.Base* %this) unnamed_addr uwtable
entry:
%this.addr = alloca %class.Base*, align 8
store %class.Base* %this, %class.Base** %this.addr, align 8
- %this1 = load %class.Base** %this.addr
+ %this1 = load %class.Base*, %class.Base** %this.addr
call void @_ZN4BaseC2Ev(%class.Base* %this1)
ret void
}
@@ -44,7 +44,7 @@ define linkonce_odr void @_ZN4BaseD1Ev(%class.Base* %this) unnamed_addr uwtable
entry:
%this.addr = alloca %class.Base*, align 8
store %class.Base* %this, %class.Base** %this.addr, align 8
- %this1 = load %class.Base** %this.addr
+ %this1 = load %class.Base*, %class.Base** %this.addr
call void @_ZN4BaseD2Ev(%class.Base* %this1)
ret void
}
@@ -53,7 +53,7 @@ define linkonce_odr void @_ZN4BaseD2Ev(%class.Base* %this) unnamed_addr nounwind
entry:
%this.addr = alloca %class.Base*, align 8
store %class.Base* %this, %class.Base** %this.addr, align 8
- %this1 = load %class.Base** %this.addr
+ %this1 = load %class.Base*, %class.Base** %this.addr
ret void
}
@@ -61,7 +61,7 @@ define linkonce_odr void @_ZN4BaseC2Ev(%class.Base* %this) unnamed_addr nounwind
entry:
%this.addr = alloca %class.Base*, align 8
store %class.Base* %this, %class.Base** %this.addr, align 8
- %this1 = load %class.Base** %this.addr
+ %this1 = load %class.Base*, %class.Base** %this.addr
%0 = bitcast %class.Base* %this1 to i8***
store i8** getelementptr inbounds ([4 x i8*]* @_ZTV4Base, i64 0, i64 2), i8*** %0
ret void
@@ -73,7 +73,7 @@ entry:
%exn.slot = alloca i8*
%ehselector.slot = alloca i32
store %class.Base* %this, %class.Base** %this.addr, align 8
- %this1 = load %class.Base** %this.addr
+ %this1 = load %class.Base*, %class.Base** %this.addr
invoke void @_ZN4BaseD1Ev(%class.Base* %this1)
to label %invoke.cont unwind label %lpad
@@ -94,8 +94,8 @@ lpad: ; preds = %entry
br label %eh.resume
eh.resume: ; preds = %lpad
- %exn = load i8** %exn.slot
- %sel = load i32* %ehselector.slot
+ %exn = load i8*, i8** %exn.slot
+ %sel = load i32, i32* %ehselector.slot
%lpad.val = insertvalue { i8*, i32 } undef, i8* %exn, 0
%lpad.val2 = insertvalue { i8*, i32 } %lpad.val, i32 %sel, 1
resume { i8*, i32 } %lpad.val2
diff --git a/llvm/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll b/llvm/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll
index c4185d2fb37..1c8f0d2ea21 100644
--- a/llvm/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll
+++ b/llvm/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll
@@ -5,7 +5,7 @@ target triple = "i386-gnu-linux"
define void @exp_averages_intraday__deviation() {
entry:
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%1 = shl i32 %0, 2
%2 = add nsw i32 undef, %1
%3 = add nsw i32 %2, undef
diff --git a/llvm/test/Transforms/Reassociate/basictest.ll b/llvm/test/Transforms/Reassociate/basictest.ll
index 0194ce2e302..015d3b0bee9 100644
--- a/llvm/test/Transforms/Reassociate/basictest.ll
+++ b/llvm/test/Transforms/Reassociate/basictest.ll
@@ -28,9 +28,9 @@ define i32 @test2(i32 %reg109, i32 %reg1111) {
@f = external global i32
define void @test3() {
- %A = load i32* @a
- %B = load i32* @b
- %C = load i32* @c
+ %A = load i32, i32* @a
+ %B = load i32, i32* @b
+ %C = load i32, i32* @c
%t1 = add i32 %A, %B
%t2 = add i32 %t1, %C
%t3 = add i32 %C, %A
@@ -49,9 +49,9 @@ define void @test3() {
}
define void @test4() {
- %A = load i32* @a
- %B = load i32* @b
- %C = load i32* @c
+ %A = load i32, i32* @a
+ %B = load i32, i32* @b
+ %C = load i32, i32* @c
%t1 = add i32 %A, %B
%t2 = add i32 %t1, %C
%t3 = add i32 %C, %A
@@ -70,9 +70,9 @@ define void @test4() {
}
define void @test5() {
- %A = load i32* @a
- %B = load i32* @b
- %C = load i32* @c
+ %A = load i32, i32* @a
+ %B = load i32, i32* @b
+ %C = load i32, i32* @c
%t1 = add i32 %B, %A
%t2 = add i32 %t1, %C
%t3 = add i32 %C, %A
@@ -91,11 +91,11 @@ define void @test5() {
}
define i32 @test6() {
- %tmp.0 = load i32* @a
- %tmp.1 = load i32* @b
+ %tmp.0 = load i32, i32* @a
+ %tmp.1 = load i32, i32* @b
; (a+b)
%tmp.2 = add i32 %tmp.0, %tmp.1
- %tmp.4 = load i32* @c
+ %tmp.4 = load i32, i32* @c
; (a+b)+c
%tmp.5 = add i32 %tmp.2, %tmp.4
; (a+c)
diff --git a/llvm/test/Transforms/Reassociate/crash.ll b/llvm/test/Transforms/Reassociate/crash.ll
index 770f97371d7..f8774ea509a 100644
--- a/llvm/test/Transforms/Reassociate/crash.ll
+++ b/llvm/test/Transforms/Reassociate/crash.ll
@@ -17,7 +17,7 @@ for.cond: ; preds = %for.body, %entry
; PR5981
define i32 @test2() nounwind ssp {
entry:
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%1 = mul nsw i32 undef, %0
%2 = mul nsw i32 undef, %0
%3 = add nsw i32 undef, %1
@@ -50,7 +50,7 @@ _:
br label %_33
_33: ; preds = %_33, %_
- %tmp348 = load i8* %arg, align 1
+ %tmp348 = load i8, i8* %arg, align 1
%tmp349 = lshr i8 %tmp348, 7
%tmp350 = or i8 %tmp349, 42
%tmp351 = add i8 %tmp350, -42
@@ -123,7 +123,7 @@ for.cond: ; preds = %for.cond, %entry
; PR12963
@a = external global i8
define i8 @f0(i8 %x) {
- %t0 = load i8* @a
+ %t0 = load i8, i8* @a
%t1 = mul i8 %x, %x
%t2 = mul i8 %t1, %t1
%t3 = mul i8 %t2, %t2
diff --git a/llvm/test/Transforms/Reassociate/fast-basictest.ll b/llvm/test/Transforms/Reassociate/fast-basictest.ll
index 67b07f4d269..64b74e3e8c1 100644
--- a/llvm/test/Transforms/Reassociate/fast-basictest.ll
+++ b/llvm/test/Transforms/Reassociate/fast-basictest.ll
@@ -48,9 +48,9 @@ define void @test4() {
; CHECK-NOT: fadd fast float
; CHECK: ret void
- %A = load float* @fa
- %B = load float* @fb
- %C = load float* @fc
+ %A = load float, float* @fa
+ %B = load float, float* @fb
+ %C = load float, float* @fc
%t1 = fadd fast float %A, %B
%t2 = fadd fast float %t1, %C
%t3 = fadd fast float %C, %A
@@ -69,9 +69,9 @@ define void @test5() {
; CHECK-NOT: fadd
; CHECK: ret void
- %A = load float* @fa
- %B = load float* @fb
- %C = load float* @fc
+ %A = load float, float* @fa
+ %B = load float, float* @fb
+ %C = load float, float* @fc
%t1 = fadd fast float %A, %B
%t2 = fadd fast float %t1, %C
%t3 = fadd fast float %C, %A
@@ -90,9 +90,9 @@ define void @test6() {
; CHECK-NOT: fadd
; CHECK: ret void
- %A = load float* @fa
- %B = load float* @fb
- %C = load float* @fc
+ %A = load float, float* @fa
+ %B = load float, float* @fb
+ %C = load float, float* @fc
%t1 = fadd fast float %B, %A
%t2 = fadd fast float %t1, %C
%t3 = fadd fast float %C, %A
diff --git a/llvm/test/Transforms/Reassociate/pr12245.ll b/llvm/test/Transforms/Reassociate/pr12245.ll
index e9b5355cceb..0e7152e2a17 100644
--- a/llvm/test/Transforms/Reassociate/pr12245.ll
+++ b/llvm/test/Transforms/Reassociate/pr12245.ll
@@ -6,34 +6,34 @@
define i32 @fn2() nounwind uwtable ssp {
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%dec = add nsw i32 %0, -1
store i32 %dec, i32* @a, align 4
- %1 = load i32* @d, align 4
+ %1 = load i32, i32* @d, align 4
%sub = sub nsw i32 %dec, %1
store i32 %sub, i32* @d, align 4
- %2 = load i32* @a, align 4
+ %2 = load i32, i32* @a, align 4
%dec1 = add nsw i32 %2, -1
store i32 %dec1, i32* @a, align 4
- %3 = load i32* @d, align 4
+ %3 = load i32, i32* @d, align 4
%sub2 = sub nsw i32 %dec1, %3
store i32 %sub2, i32* @d, align 4
- %4 = load i32* @a, align 4
+ %4 = load i32, i32* @a, align 4
%dec3 = add nsw i32 %4, -1
store i32 %dec3, i32* @a, align 4
- %5 = load i32* @d, align 4
+ %5 = load i32, i32* @d, align 4
%sub4 = sub nsw i32 %dec3, %5
store i32 %sub4, i32* @d, align 4
- %6 = load i32* @a, align 4
+ %6 = load i32, i32* @a, align 4
%dec5 = add nsw i32 %6, -1
store i32 %dec5, i32* @a, align 4
- %7 = load i32* @d, align 4
+ %7 = load i32, i32* @d, align 4
%sub6 = sub nsw i32 %dec5, %7
store i32 %sub6, i32* @d, align 4
- %8 = load i32* @a, align 4
+ %8 = load i32, i32* @a, align 4
%dec7 = add nsw i32 %8, -1
store i32 %dec7, i32* @a, align 4
- %9 = load i32* @d, align 4
+ %9 = load i32, i32* @d, align 4
%sub8 = sub nsw i32 %dec7, %9
store i32 %sub8, i32* @d, align 4
ret i32 0
diff --git a/llvm/test/Transforms/Reassociate/pr21205.ll b/llvm/test/Transforms/Reassociate/pr21205.ll
index fcc7150478e..0c6fd3ab569 100644
--- a/llvm/test/Transforms/Reassociate/pr21205.ll
+++ b/llvm/test/Transforms/Reassociate/pr21205.ll
@@ -11,7 +11,7 @@
define i32 @test1() {
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%conv = sitofp i32 %0 to float
%sub = fsub fast float %conv, undef
%sub1 = fadd fast float %sub, -1.000000e+00
diff --git a/llvm/test/Transforms/RewriteStatepointsForGC/basics.ll b/llvm/test/Transforms/RewriteStatepointsForGC/basics.ll
index 252d5f1a71e..c1a1e4eba55 100644
--- a/llvm/test/Transforms/RewriteStatepointsForGC/basics.ll
+++ b/llvm/test/Transforms/RewriteStatepointsForGC/basics.ll
@@ -35,14 +35,14 @@ define i8 @test3(i8 addrspace(1)* %obj) gc "statepoint-example" {
; CHECK-NEXT: gc.statepoint
; CHECK-NEXT: %derived.relocated = call coldcc i8 addrspace(1)*
; CHECK-NEXT: %obj.relocated = call coldcc i8 addrspace(1)*
-; CHECK-NEXT: load i8 addrspace(1)* %derived.relocated
-; CHECK-NEXT: load i8 addrspace(1)* %obj.relocated
+; CHECK-NEXT: load i8, i8 addrspace(1)* %derived.relocated
+; CHECK-NEXT: load i8, i8 addrspace(1)* %obj.relocated
entry:
%derived = getelementptr i8, i8 addrspace(1)* %obj, i64 10
call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
- %a = load i8 addrspace(1)* %derived
- %b = load i8 addrspace(1)* %obj
+ %a = load i8, i8 addrspace(1)* %derived
+ %b = load i8, i8 addrspace(1)* %obj
%c = sub i8 %a, %b
ret i8 %c
}
diff --git a/llvm/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll b/llvm/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll
index 8cdc0778521..e5a1d679fc2 100644
--- a/llvm/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll
+++ b/llvm/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll
@@ -3,7 +3,7 @@
define void @cprop_test11(i32* %data.1) {
entry:
- %tmp.1 = load i32* %data.1 ; <i32> [#uses=3]
+ %tmp.1 = load i32, i32* %data.1 ; <i32> [#uses=3]
%tmp.41 = icmp sgt i32 %tmp.1, 1 ; <i1> [#uses=1]
br i1 %tmp.41, label %no_exit, label %loopexit
no_exit: ; preds = %endif, %then, %entry
diff --git a/llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll b/llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll
index a353e98a7f4..9724e6ebc24 100644
--- a/llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll
+++ b/llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll
@@ -37,7 +37,7 @@ cond_next50: ; preds = %entry
%tmp52 = icmp sgt i32 %D, 0 ; <i1> [#uses=1]
br i1 %tmp52, label %cond_true53, label %cond_next71
cond_true53: ; preds = %cond_next50
- %tmp54 = load i32* @JUMP ; <i32> [#uses=1]
+ %tmp54 = load i32, i32* @JUMP ; <i32> [#uses=1]
%tmp55 = icmp eq i32 %tmp54, 1 ; <i1> [#uses=1]
br i1 %tmp55, label %cond_true56, label %cond_next63
cond_true56: ; preds = %cond_true53
@@ -68,8 +68,8 @@ bb217: ; preds = %cond_true260
cond_next252: ; preds = %cond_next208, %entry
%D.0.0 = phi i32 [ 0, %entry ], [ %tmp229, %cond_next208 ] ; <i32> [#uses=1]
%tmp254 = getelementptr i8*, i8** null, i32 1 ; <i8**> [#uses=1]
- %tmp256 = load i8** %tmp254 ; <i8*> [#uses=1]
- %tmp258 = load i8* %tmp256 ; <i8> [#uses=1]
+ %tmp256 = load i8*, i8** %tmp254 ; <i8*> [#uses=1]
+ %tmp258 = load i8, i8* %tmp256 ; <i8> [#uses=1]
%tmp259 = icmp eq i8 %tmp258, 45 ; <i1> [#uses=1]
br i1 %tmp259, label %cond_true260, label %bb263
cond_true260: ; preds = %cond_next252
diff --git a/llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll b/llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll
index 34a5fb22cd2..05cf5fffd2b 100644
--- a/llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll
+++ b/llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll
@@ -104,13 +104,13 @@ target triple = "powerpc-apple-darwin8"
define void @gldLLVMVecPointRender(%struct.GLDContextRec* %ctx) {
entry:
%tmp.uip = getelementptr %struct.GLDContextRec, %struct.GLDContextRec* %ctx, i32 0, i32 22 ; <i32*> [#uses=1]
- %tmp = load i32* %tmp.uip ; <i32> [#uses=3]
+ %tmp = load i32, i32* %tmp.uip ; <i32> [#uses=3]
%tmp91 = lshr i32 %tmp, 5 ; <i32> [#uses=1]
%tmp92 = trunc i32 %tmp91 to i1 ; <i1> [#uses=1]
br i1 %tmp92, label %cond_true93, label %cond_next116
cond_true93: ; preds = %entry
%tmp.upgrd.1 = getelementptr %struct.GLDContextRec, %struct.GLDContextRec* %ctx, i32 0, i32 31, i32 14 ; <i32*> [#uses=1]
- %tmp95 = load i32* %tmp.upgrd.1 ; <i32> [#uses=1]
+ %tmp95 = load i32, i32* %tmp.upgrd.1 ; <i32> [#uses=1]
%tmp95.upgrd.2 = sitofp i32 %tmp95 to float ; <float> [#uses=1]
%tmp108 = fmul float undef, %tmp95.upgrd.2 ; <float> [#uses=1]
br label %cond_next116
diff --git a/llvm/test/Transforms/SCCP/apint-array.ll b/llvm/test/Transforms/SCCP/apint-array.ll
index 2cb420a2c3f..eff6cc997e3 100644
--- a/llvm/test/Transforms/SCCP/apint-array.ll
+++ b/llvm/test/Transforms/SCCP/apint-array.ll
@@ -8,14 +8,14 @@ define i101 @array()
Head:
%A = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 1
- %B = load i101* %A
+ %B = load i101, i101* %A
%C = icmp sge i101 %B, 1
br i1 %C, label %True, label %False
True:
%D = and i101 %B, 1
%E = trunc i101 %D to i32
%F = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 %E
- %G = load i101* %F
+ %G = load i101, i101* %F
br label %False
False:
%H = phi i101 [%G, %True], [-1, %Head]
diff --git a/llvm/test/Transforms/SCCP/apint-bigarray.ll b/llvm/test/Transforms/SCCP/apint-bigarray.ll
index 082bd90c21b..e0231997f1b 100644
--- a/llvm/test/Transforms/SCCP/apint-bigarray.ll
+++ b/llvm/test/Transforms/SCCP/apint-bigarray.ll
@@ -11,13 +11,13 @@ define internal i10000* @test(i10000 %Arg) {
define i10000 @caller()
{
%Y = call i10000* @test(i10000 -1)
- %Z = load i10000* %Y
+ %Z = load i10000, i10000* %Y
ret i10000 %Z
}
define i10000 @caller2()
{
%Y = call i10000* @test(i10000 1)
- %Z = load i10000* %Y
+ %Z = load i10000, i10000* %Y
ret i10000 %Z
}
diff --git a/llvm/test/Transforms/SCCP/apint-bigint2.ll b/llvm/test/Transforms/SCCP/apint-bigint2.ll
index 639e07c3c40..f28b966aceb 100644
--- a/llvm/test/Transforms/SCCP/apint-bigint2.ll
+++ b/llvm/test/Transforms/SCCP/apint-bigint2.ll
@@ -7,12 +7,12 @@ define i101 @array()
{
Head:
%A = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 1
- %B = load i101* %A
+ %B = load i101, i101* %A
%D = and i101 %B, 1
%DD = or i101 %D, 1
%E = trunc i101 %DD to i32
%F = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 %E
- %G = load i101* %F
+ %G = load i101, i101* %F
ret i101 %G
}
diff --git a/llvm/test/Transforms/SCCP/apint-ipsccp3.ll b/llvm/test/Transforms/SCCP/apint-ipsccp3.ll
index 68987aee249..c99ae5820b2 100644
--- a/llvm/test/Transforms/SCCP/apint-ipsccp3.ll
+++ b/llvm/test/Transforms/SCCP/apint-ipsccp3.ll
@@ -5,13 +5,13 @@
define void @foo() {
- %X = load i66* @G
+ %X = load i66, i66* @G
store i66 %X, i66* @G
ret void
}
define i66 @bar() {
- %V = load i66* @G
+ %V = load i66, i66* @G
%C = icmp eq i66 %V, 17
br i1 %C, label %T, label %F
T:
diff --git a/llvm/test/Transforms/SCCP/apint-ipsccp4.ll b/llvm/test/Transforms/SCCP/apint-ipsccp4.ll
index 33c5aadb6ac..be06d03f391 100644
--- a/llvm/test/Transforms/SCCP/apint-ipsccp4.ll
+++ b/llvm/test/Transforms/SCCP/apint-ipsccp4.ll
@@ -10,13 +10,13 @@
define internal float @test2() {
%A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
- %B = load float* %A
+ %B = load float, float* %A
ret float %B
}
define internal float @test3() {
%A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 0, i32 1
- %B = load float* %A
+ %B = load float, float* %A
ret float %B
}
diff --git a/llvm/test/Transforms/SCCP/apint-load.ll b/llvm/test/Transforms/SCCP/apint-load.ll
index 407237b6dd0..17506fc043c 100644
--- a/llvm/test/Transforms/SCCP/apint-load.ll
+++ b/llvm/test/Transforms/SCCP/apint-load.ll
@@ -7,19 +7,19 @@
@Y = constant [2 x { i212, float }] [ { i212, float } { i212 12, float 1.0 },
{ i212, float } { i212 37, float 0x3FF3B2FEC0000000 } ]
define i212 @test1() {
- %B = load i212* @X
+ %B = load i212, i212* @X
ret i212 %B
}
define internal float @test2() {
%A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
- %B = load float* %A
+ %B = load float, float* %A
ret float %B
}
define internal i212 @test3() {
%A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 0, i32 0
- %B = load i212* %A
+ %B = load i212, i212* %A
ret i212 %B
}
diff --git a/llvm/test/Transforms/SCCP/atomic-load-store.ll b/llvm/test/Transforms/SCCP/atomic-load-store.ll
index 53e4c10a720..45b5d7c80fe 100644
--- a/llvm/test/Transforms/SCCP/atomic-load-store.ll
+++ b/llvm/test/Transforms/SCCP/atomic-load-store.ll
@@ -6,7 +6,7 @@
@C = internal constant i32 222
define i32 @test1() {
- %V = load atomic i32* @G seq_cst, align 4
+ %V = load atomic i32, i32* @G seq_cst, align 4
%C = icmp eq i32 %V, 17
br i1 %C, label %T, label %F
T:
@@ -21,7 +21,7 @@ F:
; CHECK: ret i32 17
define i32 @test2() {
- %V = load atomic i32* @C seq_cst, align 4
+ %V = load atomic i32, i32* @C seq_cst, align 4
ret i32 %V
}
diff --git a/llvm/test/Transforms/SCCP/ipsccp-basic.ll b/llvm/test/Transforms/SCCP/ipsccp-basic.ll
index 107b7af2c1d..c74063f3300 100644
--- a/llvm/test/Transforms/SCCP/ipsccp-basic.ll
+++ b/llvm/test/Transforms/SCCP/ipsccp-basic.ll
@@ -50,7 +50,7 @@ define i32 @test2b() {
@G = internal global i32 undef
define void @test3a() {
- %X = load i32* @G
+ %X = load i32, i32* @G
store i32 %X, i32* @G
ret void
}
@@ -59,7 +59,7 @@ define void @test3a() {
define i32 @test3b() {
- %V = load i32* @G
+ %V = load i32, i32* @G
%C = icmp eq i32 %V, 17
br i1 %C, label %T, label %F
T:
@@ -203,7 +203,7 @@ define void @test8b(i32* %P) {
define void @test9() {
entry:
%local_foo = alloca { }
- load { }* @test9g
+ load { }, { }* @test9g
store { } %0, { }* %local_foo
ret void
}
diff --git a/llvm/test/Transforms/SCCP/loadtest.ll b/llvm/test/Transforms/SCCP/loadtest.ll
index 3a8ffe61b20..b88b44b7604 100644
--- a/llvm/test/Transforms/SCCP/loadtest.ll
+++ b/llvm/test/Transforms/SCCP/loadtest.ll
@@ -10,25 +10,25 @@
@Y = constant [2 x { i32, float }] [ { i32, float } { i32 12, float 1.000000e+00 }, { i32, float } { i32 37, float 0x3FF3B2FEC0000000 } ] ; <[2 x { i32, float }]*> [#uses=2]
define i32 @test1() {
- %B = load i32* @X ; <i32> [#uses=1]
+ %B = load i32, i32* @X ; <i32> [#uses=1]
ret i32 %B
}
define float @test2() {
%A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
- %B = load float* %A ; <float> [#uses=1]
+ %B = load float, float* %A ; <float> [#uses=1]
ret float %B
}
define i32 @test3() {
%A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
- %B = load i32* %A
+ %B = load i32, i32* %A
ret i32 %B
}
define i8 @test4() {
%A = bitcast i32* @X to i8*
- %B = load i8* %A
+ %B = load i8, i8* %A
ret i8 %B
}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll
index cee51fd74de..1cff73d9f69 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll
@@ -9,7 +9,7 @@ define void @test1(%structA* nocapture readonly %J, i32 %xmin, i32 %ymin) {
; CHECK: %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
; CHECK: %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
; CHECK: %3 = bitcast float* %arrayidx4 to <2 x float>*
-; CHECK: %4 = load <2 x float>* %3, align 4
+; CHECK: %4 = load <2 x float>, <2 x float>* %3, align 4
; CHECK: %5 = fsub fast <2 x float> %2, %4
; CHECK: %6 = fmul fast <2 x float> %5, %5
; CHECK: %7 = extractelement <2 x float> %6, i32 0
@@ -24,10 +24,10 @@ for.body3.lr.ph:
%conv5 = sitofp i32 %ymin to float
%conv = sitofp i32 %xmin to float
%arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
- %0 = load float* %arrayidx4, align 4
+ %0 = load float, float* %arrayidx4, align 4
%sub = fsub fast float %conv, %0
%arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
- %1 = load float* %arrayidx9, align 4
+ %1 = load float, float* %arrayidx9, align 4
%sub10 = fsub fast float %conv5, %1
%mul11 = fmul fast float %sub, %sub
%mul12 = fmul fast float %sub10, %sub10
@@ -44,7 +44,7 @@ define void @test2(%structA* nocapture readonly %J, i32 %xmin, i32 %ymin) {
; CHECK: %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
; CHECK: %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
; CHECK: %3 = bitcast float* %arrayidx4 to <2 x float>*
-; CHECK: %4 = load <2 x float>* %3, align 4
+; CHECK: %4 = load <2 x float>, <2 x float>* %3, align 4
; CHECK: %5 = fsub fast <2 x float> %2, %4
; CHECK: %6 = fmul fast <2 x float> %5, %5
; CHECK: %7 = extractelement <2 x float> %6, i32 0
@@ -59,10 +59,10 @@ for.body3.lr.ph:
%conv5 = sitofp i32 %ymin to float
%conv = sitofp i32 %xmin to float
%arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
- %0 = load float* %arrayidx4, align 4
+ %0 = load float, float* %arrayidx4, align 4
%sub = fsub fast float %conv, %0
%arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
- %1 = load float* %arrayidx9, align 4
+ %1 = load float, float* %arrayidx9, align 4
%sub10 = fsub fast float %conv5, %1
%mul11 = fmul fast float %sub, %sub
%mul12 = fmul fast float %sub10, %sub10
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll
index d3afc056d87..6ff1118d3cc 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll
@@ -15,8 +15,8 @@ target triple = "arm64-apple-ios5.0.0"
define void @f(double* %p, double* %q) {
%addr2 = getelementptr double, double* %q, i32 1
%addr = getelementptr double, double* %p, i32 1
- %x = load double* %p
- %y = load double* %addr
+ %x = load double, double* %p
+ %y = load double, double* %addr
call void @g()
store double %x, double* %q
store double %y, double* %addr2
@@ -40,7 +40,7 @@ loop:
store double %p1, double* %q
store double %p2, double* %addr2
- %x = load double* %p
- %y = load double* %addr
+ %x = load double, double* %p
+ %y = load double, double* %addr
br label %loop
}
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll
index dddcf3c94e7..72c70823e69 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll
@@ -9,31 +9,31 @@ target triple = "aarch64--linux-gnu"
define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c) {
entry:
- %0 = load i32* %b, align 4
- %1 = load i32* %c, align 4
+ %0 = load i32, i32* %b, align 4
+ %1 = load i32, i32* %c, align 4
%add = add nsw i32 %1, %0
%div = sdiv i32 %add, 2
store i32 %div, i32* %a, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1
- %3 = load i32* %arrayidx4, align 4
+ %3 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %3, %2
%div6 = sdiv i32 %add5, 2
%arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %div6, i32* %arrayidx7, align 4
%arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2
- %4 = load i32* %arrayidx8, align 4
+ %4 = load i32, i32* %arrayidx8, align 4
%arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
- %5 = load i32* %arrayidx9, align 4
+ %5 = load i32, i32* %arrayidx9, align 4
%add10 = add nsw i32 %5, %4
%div11 = sdiv i32 %add10, 2
%arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2
store i32 %div11, i32* %arrayidx12, align 4
%arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3
- %6 = load i32* %arrayidx13, align 4
+ %6 = load i32, i32* %arrayidx13, align 4
%arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3
- %7 = load i32* %arrayidx14, align 4
+ %7 = load i32, i32* %arrayidx14, align 4
%add15 = add nsw i32 %7, %6
%div16 = sdiv i32 %add15, 2
%arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/memory.ll b/llvm/test/Transforms/SLPVectorizer/ARM/memory.ll
index b8e9a038386..57d7cceac6b 100644
--- a/llvm/test/Transforms/SLPVectorizer/ARM/memory.ll
+++ b/llvm/test/Transforms/SLPVectorizer/ARM/memory.ll
@@ -10,10 +10,10 @@ target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-
; CHECK-NOT: store <2 x double>
define void @expensive_double_store(double* noalias %dst, double* noalias %src, i64 %count) {
entry:
- %0 = load double* %src, align 8
+ %0 = load double, double* %src, align 8
store double %0, double* %dst, align 8
%arrayidx2 = getelementptr inbounds double, double* %src, i64 1
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %dst, i64 1
store double %1, double* %arrayidx3, align 8
ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/R600/simplebb.ll b/llvm/test/Transforms/SLPVectorizer/R600/simplebb.ll
index f88b86de539..9ed86f88147 100644
--- a/llvm/test/Transforms/SLPVectorizer/R600/simplebb.ll
+++ b/llvm/test/Transforms/SLPVectorizer/R600/simplebb.ll
@@ -6,17 +6,17 @@ target datalayout = "e-p:32:32:32-p3:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-
; Simple 3-pair chain with loads and stores
define void @test1_as_3_3_3(double addrspace(3)* %a, double addrspace(3)* %b, double addrspace(3)* %c) {
; CHECK-LABEL: @test1_as_3_3_3(
-; CHECK: load <2 x double> addrspace(3)*
-; CHECK: load <2 x double> addrspace(3)*
+; CHECK: load <2 x double>, <2 x double> addrspace(3)*
+; CHECK: load <2 x double>, <2 x double> addrspace(3)*
; CHECK: store <2 x double> %{{.*}}, <2 x double> addrspace(3)* %
; CHECK: ret
- %i0 = load double addrspace(3)* %a, align 8
- %i1 = load double addrspace(3)* %b, align 8
+ %i0 = load double, double addrspace(3)* %a, align 8
+ %i1 = load double, double addrspace(3)* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double addrspace(3)* %a, i64 1
- %i3 = load double addrspace(3)* %arrayidx3, align 8
+ %i3 = load double, double addrspace(3)* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double addrspace(3)* %b, i64 1
- %i4 = load double addrspace(3)* %arrayidx4, align 8
+ %i4 = load double, double addrspace(3)* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double addrspace(3)* %c, align 8
%arrayidx5 = getelementptr inbounds double, double addrspace(3)* %c, i64 1
@@ -26,17 +26,17 @@ define void @test1_as_3_3_3(double addrspace(3)* %a, double addrspace(3)* %b, do
define void @test1_as_3_0_0(double addrspace(3)* %a, double* %b, double* %c) {
; CHECK-LABEL: @test1_as_3_0_0(
-; CHECK: load <2 x double> addrspace(3)*
-; CHECK: load <2 x double>*
+; CHECK: load <2 x double>, <2 x double> addrspace(3)*
+; CHECK: load <2 x double>, <2 x double>*
; CHECK: store <2 x double> %{{.*}}, <2 x double>* %
; CHECK: ret
- %i0 = load double addrspace(3)* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double addrspace(3)* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double addrspace(3)* %a, i64 1
- %i3 = load double addrspace(3)* %arrayidx3, align 8
+ %i3 = load double, double addrspace(3)* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
@@ -46,17 +46,17 @@ define void @test1_as_3_0_0(double addrspace(3)* %a, double* %b, double* %c) {
define void @test1_as_0_0_3(double* %a, double* %b, double addrspace(3)* %c) {
; CHECK-LABEL: @test1_as_0_0_3(
-; CHECK: load <2 x double>*
-; CHECK: load <2 x double>*
+; CHECK: load <2 x double>, <2 x double>*
+; CHECK: load <2 x double>, <2 x double>*
; CHECK: store <2 x double> %{{.*}}, <2 x double> addrspace(3)* %
; CHECK: ret
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double addrspace(3)* %c, align 8
%arrayidx5 = getelementptr inbounds double, double addrspace(3)* %c, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll b/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll
index 8c2777a71f7..bc0beec1bab 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll
@@ -21,35 +21,35 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define void @addsub() #0 {
entry:
- %0 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 0), align 4
- %1 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 0), align 4
+ %1 = load i32, i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 0), align 4
%add = add nsw i32 %0, %1
- %2 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 0), align 4
- %3 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 0), align 4
+ %2 = load i32, i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 0), align 4
+ %3 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 0), align 4
%add1 = add nsw i32 %2, %3
%add2 = add nsw i32 %add, %add1
store i32 %add2, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 0), align 4
- %4 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 1), align 4
- %5 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 1), align 4
+ %4 = load i32, i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 1), align 4
+ %5 = load i32, i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 1), align 4
%add3 = add nsw i32 %4, %5
- %6 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 1), align 4
- %7 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 1), align 4
+ %6 = load i32, i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 1), align 4
+ %7 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 1), align 4
%add4 = add nsw i32 %6, %7
%sub = sub nsw i32 %add3, %add4
store i32 %sub, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 1), align 4
- %8 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 2), align 4
- %9 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 2), align 4
+ %8 = load i32, i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 2), align 4
+ %9 = load i32, i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 2), align 4
%add5 = add nsw i32 %8, %9
- %10 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 2), align 4
- %11 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 2), align 4
+ %10 = load i32, i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 2), align 4
+ %11 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 2), align 4
%add6 = add nsw i32 %10, %11
%add7 = add nsw i32 %add5, %add6
store i32 %add7, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 2), align 4
- %12 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 3), align 4
- %13 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 3), align 4
+ %12 = load i32, i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 3), align 4
+ %13 = load i32, i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 3), align 4
%add8 = add nsw i32 %12, %13
- %14 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 3), align 4
- %15 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 3), align 4
+ %14 = load i32, i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 3), align 4
+ %15 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 3), align 4
%add9 = add nsw i32 %14, %15
%sub10 = sub nsw i32 %add8, %add9
store i32 %sub10, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 3), align 4
@@ -65,35 +65,35 @@ entry:
; Function Attrs: nounwind uwtable
define void @subadd() #0 {
entry:
- %0 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 0), align 4
- %1 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 0), align 4
+ %1 = load i32, i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 0), align 4
%add = add nsw i32 %0, %1
- %2 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 0), align 4
- %3 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 0), align 4
+ %2 = load i32, i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 0), align 4
+ %3 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 0), align 4
%add1 = add nsw i32 %2, %3
%sub = sub nsw i32 %add, %add1
store i32 %sub, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 0), align 4
- %4 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 1), align 4
- %5 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 1), align 4
+ %4 = load i32, i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 1), align 4
+ %5 = load i32, i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 1), align 4
%add2 = add nsw i32 %4, %5
- %6 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 1), align 4
- %7 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 1), align 4
+ %6 = load i32, i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 1), align 4
+ %7 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 1), align 4
%add3 = add nsw i32 %6, %7
%add4 = add nsw i32 %add2, %add3
store i32 %add4, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 1), align 4
- %8 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 2), align 4
- %9 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 2), align 4
+ %8 = load i32, i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 2), align 4
+ %9 = load i32, i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 2), align 4
%add5 = add nsw i32 %8, %9
- %10 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 2), align 4
- %11 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 2), align 4
+ %10 = load i32, i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 2), align 4
+ %11 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 2), align 4
%add6 = add nsw i32 %10, %11
%sub7 = sub nsw i32 %add5, %add6
store i32 %sub7, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 2), align 4
- %12 = load i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 3), align 4
- %13 = load i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 3), align 4
+ %12 = load i32, i32* getelementptr inbounds ([4 x i32]* @b, i32 0, i64 3), align 4
+ %13 = load i32, i32* getelementptr inbounds ([4 x i32]* @c, i32 0, i64 3), align 4
%add8 = add nsw i32 %12, %13
- %14 = load i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 3), align 4
- %15 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 3), align 4
+ %14 = load i32, i32* getelementptr inbounds ([4 x i32]* @d, i32 0, i64 3), align 4
+ %15 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i64 3), align 4
%add9 = add nsw i32 %14, %15
%add10 = add nsw i32 %add8, %add9
store i32 %add10, i32* getelementptr inbounds ([4 x i32]* @a, i32 0, i64 3), align 4
@@ -107,20 +107,20 @@ entry:
; Function Attrs: nounwind uwtable
define void @faddfsub() #0 {
entry:
- %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %0 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
%add = fadd float %0, %1
store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
%sub = fsub float %2, %3
store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
%add1 = fadd float %4, %5
store float %add1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ %6 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
%sub2 = fsub float %6, %7
store float %sub2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
ret void
@@ -133,20 +133,20 @@ entry:
; Function Attrs: nounwind uwtable
define void @fsubfadd() #0 {
entry:
- %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %0 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
%sub = fsub float %0, %1
store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
%add = fadd float %2, %3
store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
%sub1 = fsub float %4, %5
store float %sub1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ %6 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
%add2 = fadd float %6, %7
store float %add2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
ret void
@@ -159,20 +159,20 @@ entry:
; Function Attrs: nounwind uwtable
define void @No_faddfsub() #0 {
entry:
- %0 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %1 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
+ %0 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
%add = fadd float %0, %1
store float %add, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %3 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
%add1 = fadd float %2, %3
store float %add1, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
%add2 = fadd float %4, %5
store float %add2, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %6 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
+ %6 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
%sub = fsub float %6, %7
store float %sub, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
ret void
@@ -189,20 +189,20 @@ entry:
; CHECK: %4 = fsub <4 x float> %1, %2
; CHECK: %5 = shufflevector <4 x float> %3, <4 x float> %4, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
define void @reorder_alt() #0 {
- %1 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
%3 = fadd float %1, %2
store float %3, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
%6 = fsub float %4, %5
store float %6, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %8 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+ %8 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
%9 = fadd float %7, %8
store float %9, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
- %10 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
- %11 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %10 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ %11 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
%12 = fsub float %10, %11
store float %12, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
ret void
@@ -222,27 +222,27 @@ define void @reorder_alt() #0 {
; CHECK: %8 = fsub <4 x float> %1, %6
; CHECK: %9 = shufflevector <4 x float> %7, <4 x float> %8, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
define void @reorder_alt_subTree() #0 {
- %1 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %3 = load float, float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 0), align 4
%4 = fsub float %2, %3
%5 = fadd float %1, %4
store float %5, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
- %6 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
- %8 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 1), align 4
+ %6 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %8 = load float, float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 1), align 4
%9 = fadd float %7, %8
%10 = fsub float %6, %9
store float %10, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
- %11 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %12 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
- %13 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 2), align 4
+ %11 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+ %12 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %13 = load float, float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 2), align 4
%14 = fsub float %12, %13
%15 = fadd float %11, %14
store float %15, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
- %16 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
- %17 = load float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 3), align 4
- %18 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %16 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ %17 = load float, float* getelementptr inbounds ([4 x float]* @fd, i32 0, i64 3), align 4
+ %18 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
%19 = fadd float %17, %18
%20 = fsub float %16, %19
store float %20, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
@@ -258,18 +258,18 @@ define void @reorder_alt_subTree() #0 {
; CHECK: fsub <2 x double>
; CHECK: shufflevector <2 x double>
define void @reorder_alt_rightsubTree(double* nocapture %c, double* noalias nocapture readonly %a, double* noalias nocapture readonly %b, double* noalias nocapture readonly %d) {
- %1 = load double* %a
- %2 = load double* %b
+ %1 = load double, double* %a
+ %2 = load double, double* %b
%3 = fadd double %1, %2
- %4 = load double* %d
+ %4 = load double, double* %d
%5 = fsub double %3, %4
store double %5, double* %c
%6 = getelementptr inbounds double, double* %d, i64 1
- %7 = load double* %6
+ %7 = load double, double* %6
%8 = getelementptr inbounds double, double* %a, i64 1
- %9 = load double* %8
+ %9 = load double, double* %8
%10 = getelementptr inbounds double, double* %b, i64 1
- %11 = load double* %10
+ %11 = load double, double* %10
%12 = fadd double %9, %11
%13 = fadd double %7, %12
%14 = getelementptr inbounds double, double* %c, i64 1
@@ -290,20 +290,20 @@ define void @reorder_alt_rightsubTree(double* nocapture %c, double* noalias noca
; CHECK-NOT: fsub <4 x float>
; CHECK-NOT: shufflevector
define void @no_vec_shuff_reorder() #0 {
- %1 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
+ %1 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 0), align 4
+ %2 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 0), align 4
%3 = fadd float %1, %2
store float %3, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 0), align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
- %5 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
+ %4 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 1), align 4
+ %5 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 1), align 4
%6 = fsub float %4, %5
store float %6, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 1), align 4
- %7 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
- %8 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
+ %7 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 2), align 4
+ %8 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 2), align 4
%9 = fadd float %7, %8
store float %9, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 2), align 4
- %10 = load float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
- %11 = load float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
+ %10 = load float, float* getelementptr inbounds ([4 x float]* @fb, i32 0, i64 3), align 4
+ %11 = load float, float* getelementptr inbounds ([4 x float]* @fa, i32 0, i64 3), align 4
%12 = fsub float %10, %11
store float %12, float* getelementptr inbounds ([4 x float]* @fc, i32 0, i64 3), align 4
ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/align.ll b/llvm/test/Transforms/SLPVectorizer/X86/align.ll
index 2d6afaf3c04..b74b70900ee 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/align.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/align.ll
@@ -8,16 +8,16 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @test1(double* %a, double* %b, double* %c) {
entry:
%agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16
-; CHECK: %[[V0:[0-9]+]] = load <2 x double>* %[[V2:[0-9]+]], align 8
- %i0 = load double* %a
- %i1 = load double* %b
+; CHECK: %[[V0:[0-9]+]] = load <2 x double>, <2 x double>* %[[V2:[0-9]+]], align 8
+ %i0 = load double, double* %a
+ %i1 = load double, double* %b
%mul = fmul double %i0, %i1
%store1 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
%store2 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
store double %mul, double* %store1
@@ -37,13 +37,13 @@ entry:
define void @test2(float * %a, float * %b) {
entry:
- %l0 = load float* %a
+ %l0 = load float, float* %a
%a1 = getelementptr inbounds float, float* %a, i64 1
- %l1 = load float* %a1
+ %l1 = load float, float* %a1
%a2 = getelementptr inbounds float, float* %a, i64 2
- %l2 = load float* %a2
+ %l2 = load float, float* %a2
%a3 = getelementptr inbounds float, float* %a, i64 3
- %l3 = load float* %a3
+ %l3 = load float, float* %a3
store float %l0, float* %b
%b1 = getelementptr inbounds float, float* %b, i64 1
store float %l1, float* %b1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/call.ll b/llvm/test/Transforms/SLPVectorizer/X86/call.ll
index 1bb0382e6b1..b76ac2c15c6 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/call.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/call.ll
@@ -15,14 +15,14 @@ declare i64 @round(i64)
; CHECK: ret void
define void @sin_libm(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @sin(double %mul) nounwind readnone
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @sin(double %mul5) nounwind readnone
store double %call, double* %c, align 8
@@ -36,14 +36,14 @@ entry:
; CHECK: ret void
define void @cos_libm(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @cos(double %mul) nounwind readnone
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @cos(double %mul5) nounwind readnone
store double %call, double* %c, align 8
@@ -57,14 +57,14 @@ entry:
; CHECK: ret void
define void @pow_libm(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @pow(double %mul,double %mul) nounwind readnone
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @pow(double %mul5,double %mul5) nounwind readnone
store double %call, double* %c, align 8
@@ -79,14 +79,14 @@ entry:
; CHECK: ret void
define void @exp2_libm(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @exp2(double %mul) nounwind readnone
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @exp2(double %mul5) nounwind readnone
store double %call, double* %c, align 8
@@ -102,14 +102,14 @@ entry:
; CHECK: ret void
define void @round_custom(i64* %a, i64* %b, i64* %c) {
entry:
- %i0 = load i64* %a, align 8
- %i1 = load i64* %b, align 8
+ %i0 = load i64, i64* %a, align 8
+ %i1 = load i64, i64* %b, align 8
%mul = mul i64 %i0, %i1
%call = tail call i64 @round(i64 %mul) nounwind readnone
%arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
- %i3 = load i64* %arrayidx3, align 8
+ %i3 = load i64, i64* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
- %i4 = load i64* %arrayidx4, align 8
+ %i4 = load i64, i64* %arrayidx4, align 8
%mul5 = mul i64 %i3, %i4
%call5 = tail call i64 @round(i64 %mul5) nounwind readnone
store i64 %call, i64* %c, align 8
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cast.ll b/llvm/test/Transforms/SLPVectorizer/X86/cast.ll
index 357efc569e6..044db5d694b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/cast.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cast.ll
@@ -15,21 +15,21 @@ target triple = "x86_64-apple-macosx10.9.0"
;CHECK: store <4 x i32>
define i32 @foo(i32* noalias nocapture %A, i8* noalias nocapture %B) {
entry:
- %0 = load i8* %B, align 1
+ %0 = load i8, i8* %B, align 1
%conv = sext i8 %0 to i32
store i32 %conv, i32* %A, align 4
%arrayidx2 = getelementptr inbounds i8, i8* %B, i64 1
- %1 = load i8* %arrayidx2, align 1
+ %1 = load i8, i8* %arrayidx2, align 1
%conv3 = sext i8 %1 to i32
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
store i32 %conv3, i32* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i8, i8* %B, i64 2
- %2 = load i8* %arrayidx5, align 1
+ %2 = load i8, i8* %arrayidx5, align 1
%conv6 = sext i8 %2 to i32
%arrayidx7 = getelementptr inbounds i32, i32* %A, i64 2
store i32 %conv6, i32* %arrayidx7, align 4
%arrayidx8 = getelementptr inbounds i8, i8* %B, i64 3
- %3 = load i8* %arrayidx8, align 1
+ %3 = load i8, i8* %arrayidx8, align 1
%conv9 = sext i8 %3 to i32
%arrayidx10 = getelementptr inbounds i32, i32* %A, i64 3
store i32 %conv9, i32* %arrayidx10, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cmp_sel.ll b/llvm/test/Transforms/SLPVectorizer/X86/cmp_sel.ll
index 92efaa1eea3..a3e2b21ea6e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/cmp_sel.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cmp_sel.ll
@@ -17,12 +17,12 @@ target triple = "x86_64-apple-macosx10.8.0"
define i32 @foo(double* noalias nocapture %A, double* noalias nocapture %B, double %G) {
entry:
%arrayidx = getelementptr inbounds double, double* %B, i64 10
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%tobool = fcmp une double %0, 0.000000e+00
%cond = select i1 %tobool, double %G, double 1.000000e+00
store double %cond, double* %A, align 8
%arrayidx2 = getelementptr inbounds double, double* %B, i64 11
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%tobool3 = fcmp une double %1, 0.000000e+00
%cond7 = select i1 %tobool3, double %G, double 1.000000e+00
%arrayidx8 = getelementptr inbounds double, double* %A, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll b/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
index 4b78ac3e183..8555fe0df8f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
@@ -22,13 +22,13 @@ for.body: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%0 = shl nsw i64 %indvars.iv, 1
%arrayidx = getelementptr inbounds double, double* %A, i64 %0
- %1 = load double* %arrayidx, align 8
+ %1 = load double, double* %arrayidx, align 8
%mul1 = fmul double %conv, %1
%mul2 = fmul double %mul1, 7.000000e+00
%add = fadd double %mul2, 5.000000e+00
%2 = or i64 %0, 1
%arrayidx6 = getelementptr inbounds double, double* %A, i64 %2
- %3 = load double* %arrayidx6, align 8
+ %3 = load double, double* %arrayidx6, align 8
%mul8 = fmul double %conv, %3
%mul9 = fmul double %mul8, 4.000000e+00
%add10 = fadd double %mul9, 9.000000e+00
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll b/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
index ab7380af3b5..1ad4d694d34 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
@@ -21,25 +21,25 @@ entry:
%mul = mul nsw i32 %u, 3
%idxprom = sext i32 %mul to i64
%arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
- %1 = load double* %arrayidx4, align 8
+ %1 = load double, double* %arrayidx4, align 8
%add5 = fadd double %0, %1
store double %add5, double* %arrayidx, align 8
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
%arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
- %2 = load double* %arrayidx13, align 8
+ %2 = load double, double* %arrayidx13, align 8
%arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
- %3 = load double* %arrayidx17, align 8
+ %3 = load double, double* %arrayidx17, align 8
%add18 = fadd double %2, %3
store double %add18, double* %arrayidx13, align 8
%add24 = add nsw i32 %mul, 2
%idxprom25 = sext i32 %add24 to i64
%arrayidx26 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom25
- %4 = load double* %arrayidx26, align 8
+ %4 = load double, double* %arrayidx26, align 8
%arrayidx30 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom25
- %5 = load double* %arrayidx30, align 8
+ %5 = load double, double* %arrayidx30, align 8
%add31 = fadd double %4, %5
store double %add31, double* %arrayidx26, align 8
ret void
@@ -58,17 +58,17 @@ entry:
%mul = mul nsw i32 %u, 2
%idxprom = sext i32 %mul to i64
%arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
- %1 = load double* %arrayidx4, align 8
+ %1 = load double, double* %arrayidx4, align 8
%add5 = fadd double %0, %1
store double %add5, double* %arrayidx, align 8
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
%arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
- %2 = load double* %arrayidx13, align 8
+ %2 = load double, double* %arrayidx13, align 8
%arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
- %3 = load double* %arrayidx17, align 8
+ %3 = load double, double* %arrayidx17, align 8
%add18 = fadd double %2, %3
store double %add18, double* %arrayidx13, align 8
ret void
@@ -85,33 +85,33 @@ entry:
%mul = mul nsw i32 %u, 4
%idxprom = sext i32 %mul to i64
%arrayidx = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%arrayidx4 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom
- %1 = load float* %arrayidx4, align 4
+ %1 = load float, float* %arrayidx4, align 4
%add5 = fadd float %0, %1
store float %add5, float* %arrayidx, align 4
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
%arrayidx13 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom12
- %2 = load float* %arrayidx13, align 4
+ %2 = load float, float* %arrayidx13, align 4
%arrayidx17 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom12
- %3 = load float* %arrayidx17, align 4
+ %3 = load float, float* %arrayidx17, align 4
%add18 = fadd float %2, %3
store float %add18, float* %arrayidx13, align 4
%add24 = add nsw i32 %mul, 2
%idxprom25 = sext i32 %add24 to i64
%arrayidx26 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom25
- %4 = load float* %arrayidx26, align 4
+ %4 = load float, float* %arrayidx26, align 4
%arrayidx30 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom25
- %5 = load float* %arrayidx30, align 4
+ %5 = load float, float* %arrayidx30, align 4
%add31 = fadd float %4, %5
store float %add31, float* %arrayidx26, align 4
%add37 = add nsw i32 %mul, 3
%idxprom38 = sext i32 %add37 to i64
%arrayidx39 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom38
- %6 = load float* %arrayidx39, align 4
+ %6 = load float, float* %arrayidx39, align 4
%arrayidx43 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom38
- %7 = load float* %arrayidx43, align 4
+ %7 = load float, float* %arrayidx43, align 4
%add44 = fadd float %6, %7
store float %add44, float* %arrayidx39, align 4
ret void
@@ -143,12 +143,12 @@ for.body: ; preds = %for.body.lr.ph, %fo
%mul = mul nsw i32 %0, 2
%idxprom = sext i32 %mul to i64
%arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
- %2 = load double* %arrayidx, align 8
+ %2 = load double, double* %arrayidx, align 8
%mul1 = fmul double 7.000000e+00, %2
%add = add nsw i32 %mul, 1
%idxprom3 = sext i32 %add to i64
%arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
- %3 = load double* %arrayidx4, align 8
+ %3 = load double, double* %arrayidx4, align 8
%mul5 = fmul double 7.000000e+00, %3
%add6 = fadd double %mul1, %mul5
%add7 = fadd double %1, %add6
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll b/llvm/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
index b53169c3d01..ecae70ecc91 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
@@ -9,21 +9,21 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define void @test1(double* %a, double* %b, double* %c, double* %d) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
%0 = bitcast double* %a to <4 x i32>*
- %1 = load <4 x i32>* %0, align 8
+ %1 = load <4 x i32>, <4 x i32>* %0, align 8
%2 = bitcast double* %b to <4 x i32>*
- %3 = load <4 x i32>* %2, align 8
+ %3 = load <4 x i32>, <4 x i32>* %2, align 8
%4 = mul <4 x i32> %1, %3
%5 = bitcast double* %d to <4 x i32>*
store <4 x i32> %4, <4 x i32>* %5, align 8
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
index dc99366e1da..9046c356282 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-apple-darwin13.3.0"
define i32 @fn1() {
entry:
- %init = load double* @a, align 8
+ %init = load double, double* @a, align 8
br label %loop
loop:
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_bullet.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
index b5b2f262c47..1bad671fd82 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
@@ -45,7 +45,7 @@ define void @_ZN30GIM_TRIANGLE_CALCULATION_CACHE18triangle_collisionERK9btVector
entry:
%arrayidx26 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 1
%arrayidx36 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 2
- %0 = load float* %arrayidx36, align 4
+ %0 = load float, float* %arrayidx36, align 4
%add587 = fadd float undef, undef
%sub600 = fsub float %add587, undef
store float %sub600, float* undef, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
index 8ca63945929..f10c8626d41 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
@@ -13,7 +13,7 @@ for.body:
%s1.055 = phi float [ 0.000000e+00, %entry ], [ %cond.i40, %for.body ]
%s0.054 = phi float [ 0.000000e+00, %entry ], [ %cond.i44, %for.body ]
%arrayidx = getelementptr inbounds float, float* %src, i64 %indvars.iv
- %0 = load float* %arrayidx, align 4
+ %0 = load float, float* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%arrayidx2 = getelementptr inbounds float, float* %dest, i64 %indvars.iv
store float %acc1.056, float* %arrayidx2, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
index f1ef9572564..28b7aa3c4de 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
@@ -8,12 +8,12 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @_ZSt6uniqueISt15_Deque_iteratorIdRdPdEET_S4_S4_(%"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* nocapture %__last) {
entry:
%_M_cur2.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 0
- %0 = load double** %_M_cur2.i.i, align 8
+ %0 = load double*, double** %_M_cur2.i.i, align 8
%_M_first3.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 1
%_M_cur2.i.i81 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 0
- %1 = load double** %_M_cur2.i.i81, align 8
+ %1 = load double*, double** %_M_cur2.i.i81, align 8
%_M_first3.i.i83 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 1
- %2 = load double** %_M_first3.i.i83, align 8
+ %2 = load double*, double** %_M_first3.i.i83, align 8
br i1 undef, label %_ZSt13adjacent_findISt15_Deque_iteratorIdRdPdEET_S4_S4_.exit, label %while.cond.i.preheader
while.cond.i.preheader: ; preds = %entry
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_gep.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_gep.ll
index aa1857233af..bd1e8f7cc19 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_gep.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_gep.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
define i32 @fn1() {
entry:
- %0 = load i64** @a, align 8
+ %0 = load i64*, i64** @a, align 8
%add.ptr = getelementptr inbounds i64, i64* %0, i64 1
%1 = ptrtoint i64* %add.ptr to i64
%arrayidx = getelementptr inbounds i64, i64* %0, i64 2
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_lencod.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
index 4ddb27a0be9..70b13fd75f1 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
@@ -81,10 +81,10 @@ define fastcc void @dct36(double* %inbuf) {
entry:
%arrayidx41 = getelementptr inbounds double, double* %inbuf, i64 2
%arrayidx44 = getelementptr inbounds double, double* %inbuf, i64 1
- %0 = load double* %arrayidx44, align 8
+ %0 = load double, double* %arrayidx44, align 8
%add46 = fadd double %0, undef
store double %add46, double* %arrayidx41, align 8
- %1 = load double* %inbuf, align 8
+ %1 = load double, double* %inbuf, align 8
%add49 = fadd double %1, %0
store double %add49, double* %arrayidx44, align 8
ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
index 109c3c93d5d..f82343fb433 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
@@ -55,9 +55,9 @@ for.end48: ; preds = %for.end44
define void @zot(%struct.hoge* %arg) {
bb:
- %tmp = load double* undef, align 8
+ %tmp = load double, double* undef, align 8
%tmp1 = fsub double %tmp, undef
- %tmp2 = load double* undef, align 8
+ %tmp2 = load double, double* undef, align 8
%tmp3 = fsub double %tmp2, undef
%tmp4 = fmul double %tmp3, undef
%tmp5 = fmul double %tmp3, undef
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
index 8da3c34a027..9a5eb12c0aa 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
@@ -13,14 +13,14 @@ target triple = "x86_64-apple-macosx10.8.0"
define i32 @fn1() {
entry:
- %0 = load i32* getelementptr inbounds (%struct.DState* @b, i32 0, i32 0), align 4
- %1 = load i32* getelementptr inbounds (%struct.DState* @b, i32 0, i32 1), align 4
- %2 = load i32* @d, align 4
+ %0 = load i32, i32* getelementptr inbounds (%struct.DState* @b, i32 0, i32 0), align 4
+ %1 = load i32, i32* getelementptr inbounds (%struct.DState* @b, i32 0, i32 1), align 4
+ %2 = load i32, i32* @d, align 4
%cond = icmp eq i32 %2, 0
br i1 %cond, label %sw.bb, label %save_state_and_return
sw.bb: ; preds = %entry
- %3 = load i32* @c, align 4
+ %3 = load i32, i32* @c, align 4
%and = and i32 %3, 7
store i32 %and, i32* @a, align 4
switch i32 %and, label %if.end [
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
index 8f023f80a67..45ca99a3ea1 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
@@ -30,8 +30,8 @@ define void @bar() {
%9 = phi double [ 1.800000e+01, %0 ], [ %10, %18 ], [ %10, %17 ], [ %10, %17 ]
store double %9, double* %1, align 8
store double %8, double* %2, align 8
- %10 = load double* %3, align 8
- %11 = load double* %4, align 8
+ %10 = load double, double* %3, align 8
+ %11 = load double, double* %4, align 8
br i1 undef, label %12, label %13
; <label>:12 ; preds = %7
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll b/llvm/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
index 1f78f925f39..ea0064d4682 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
@@ -26,9 +26,9 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: ret
define i32 @foo(double* nocapture %A, float* nocapture %B, i32 %g) {
entry:
- %0 = load float* %B, align 4
+ %0 = load float, float* %B, align 4
%arrayidx1 = getelementptr inbounds float, float* %B, i64 1
- %1 = load float* %arrayidx1, align 4
+ %1 = load float, float* %arrayidx1, align 4
%add = fadd float %0, 5.000000e+00
%add2 = fadd float %1, 8.000000e+00
%tobool = icmp eq i32 %g, 0
@@ -40,12 +40,12 @@ if.then:
if.end:
%conv = fpext float %add to double
- %2 = load double* %A, align 8
+ %2 = load double, double* %A, align 8
%add4 = fadd double %conv, %2
store double %add4, double* %A, align 8
%conv5 = fpext float %add2 to double
%arrayidx6 = getelementptr inbounds double, double* %A, i64 1
- %3 = load double* %arrayidx6, align 8
+ %3 = load double, double* %arrayidx6, align 8
%add7 = fadd double %conv5, %3
store double %add7, double* %arrayidx6, align 8
ret i32 undef
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cse.ll b/llvm/test/Transforms/SLPVectorizer/X86/cse.ll
index a0db8865e12..9f56e219599 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/cse.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cse.ll
@@ -22,12 +22,12 @@ target triple = "i386-apple-macosx10.8.0"
define i32 @test(double* nocapture %G) {
entry:
%arrayidx = getelementptr inbounds double, double* %G, i64 5
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%mul = fmul double %0, 4.000000e+00
%add = fadd double %mul, 1.000000e+00
store double %add, double* %G, align 8
%arrayidx2 = getelementptr inbounds double, double* %G, i64 6
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%mul3 = fmul double %1, 3.000000e+00
%add4 = fadd double %mul3, 6.000000e+00
%arrayidx5 = getelementptr inbounds double, double* %G, i64 1
@@ -55,26 +55,26 @@ entry:
;CHECK: ret
define i32 @foo(double* nocapture %A, i32 %n) {
entry:
- %0 = load double* %A, align 8
+ %0 = load double, double* %A, align 8
%mul = fmul double %0, 7.900000e+00
%conv = sitofp i32 %n to double
%mul1 = fmul double %conv, %mul
%add = fadd double %mul1, 6.000000e+00
store double %add, double* %A, align 8
%arrayidx3 = getelementptr inbounds double, double* %A, i64 1
- %1 = load double* %arrayidx3, align 8
+ %1 = load double, double* %arrayidx3, align 8
%mul4 = fmul double %1, 7.700000e+00
%mul6 = fmul double %conv, %mul4
%add7 = fadd double %mul6, 2.000000e+00
store double %add7, double* %arrayidx3, align 8
%arrayidx9 = getelementptr inbounds double, double* %A, i64 2
- %2 = load double* %arrayidx9, align 8
+ %2 = load double, double* %arrayidx9, align 8
%mul10 = fmul double %2, 7.600000e+00
%mul12 = fmul double %conv, %mul10
%add13 = fadd double %mul12, 3.000000e+00
store double %add13, double* %arrayidx9, align 8
%arrayidx15 = getelementptr inbounds double, double* %A, i64 3
- %3 = load double* %arrayidx15, align 8
+ %3 = load double, double* %arrayidx15, align 8
%mul16 = fmul double %3, 7.400000e+00
%mul18 = fmul double %conv, %mul16
%add19 = fadd double %mul18, 4.000000e+00
@@ -102,7 +102,7 @@ entry:
define i32 @test2(double* nocapture %G, i32 %k) {
%1 = icmp eq i32 %k, 0
%2 = getelementptr inbounds double, double* %G, i64 5
- %3 = load double* %2, align 8
+ %3 = load double, double* %2, align 8
%4 = fmul double %3, 4.000000e+00
br i1 %1, label %12, label %5
@@ -110,7 +110,7 @@ define i32 @test2(double* nocapture %G, i32 %k) {
%6 = fadd double %4, 1.000000e+00
store double %6, double* %G, align 8
%7 = getelementptr inbounds double, double* %G, i64 6
- %8 = load double* %7, align 8
+ %8 = load double, double* %7, align 8
%9 = fmul double %8, 3.000000e+00
%10 = fadd double %9, 6.000000e+00
%11 = getelementptr inbounds double, double* %G, i64 1
@@ -122,7 +122,7 @@ define i32 @test2(double* nocapture %G, i32 %k) {
%14 = getelementptr inbounds double, double* %G, i64 2
store double %13, double* %14, align 8
%15 = getelementptr inbounds double, double* %G, i64 6
- %16 = load double* %15, align 8
+ %16 = load double, double* %15, align 8
%17 = fmul double %16, 3.000000e+00
%18 = fadd double %17, 8.000000e+00
%19 = getelementptr inbounds double, double* %G, i64 3
@@ -147,26 +147,26 @@ define i32 @test2(double* nocapture %G, i32 %k) {
;CHECK: ret
define i32 @foo4(double* nocapture %A, i32 %n) {
entry:
- %0 = load double* %A, align 8
+ %0 = load double, double* %A, align 8
%mul = fmul double %0, 7.900000e+00
%conv = sitofp i32 %n to double
%mul1 = fmul double %conv, %mul
%add = fadd double %mul1, 6.000000e+00
store double %add, double* %A, align 8
%arrayidx3 = getelementptr inbounds double, double* %A, i64 1
- %1 = load double* %arrayidx3, align 8
+ %1 = load double, double* %arrayidx3, align 8
%mul4 = fmul double %1, 7.900000e+00
%mul6 = fmul double %conv, %mul4
%add7 = fadd double %mul6, 6.000000e+00
store double %add7, double* %arrayidx3, align 8
%arrayidx9 = getelementptr inbounds double, double* %A, i64 2
- %2 = load double* %arrayidx9, align 8
+ %2 = load double, double* %arrayidx9, align 8
%mul10 = fmul double %2, 7.900000e+00
%mul12 = fmul double %conv, %mul10
%add13 = fadd double %mul12, 6.000000e+00
store double %add13, double* %arrayidx9, align 8
%arrayidx15 = getelementptr inbounds double, double* %A, i64 3
- %3 = load double* %arrayidx15, align 8
+ %3 = load double, double* %arrayidx15, align 8
%mul16 = fmul double %3, 7.900000e+00
%mul18 = fmul double %conv, %mul16
%add19 = fadd double %mul18, 6.000000e+00
@@ -189,12 +189,12 @@ entry:
;CHECK: ret
define i32 @partial_mrg(double* nocapture %A, i32 %n) {
entry:
- %0 = load double* %A, align 8
+ %0 = load double, double* %A, align 8
%conv = sitofp i32 %n to double
%mul = fmul double %conv, %0
store double %mul, double* %A, align 8
%arrayidx2 = getelementptr inbounds double, double* %A, i64 1
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%mul4 = fmul double %conv, %1
store double %mul4, double* %arrayidx2, align 8
%cmp = icmp slt i32 %n, 4
@@ -202,11 +202,11 @@ entry:
if.end: ; preds = %entry
%arrayidx7 = getelementptr inbounds double, double* %A, i64 2
- %2 = load double* %arrayidx7, align 8
+ %2 = load double, double* %arrayidx7, align 8
%mul9 = fmul double %conv, %2
store double %mul9, double* %arrayidx7, align 8
%arrayidx11 = getelementptr inbounds double, double* %A, i64 3
- %3 = load double* %arrayidx11, align 8
+ %3 = load double, double* %arrayidx11, align 8
%add = add nsw i32 %n, 4
%conv12 = sitofp i32 %add to double
%mul13 = fmul double %conv12, %3
@@ -228,18 +228,18 @@ entry:
sw.epilog7: ; No predecessors!
%.in = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 1
- %0 = load double* %.in, align 8
+ %0 = load double, double* %.in, align 8
%add = fadd double undef, 0.000000e+00
%add6 = fadd double %add, %0
- %1 = load double* @a, align 8
+ %1 = load double, double* @a, align 8
%add8 = fadd double %1, 0.000000e+00
%_dy = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 2
- %2 = load double* %_dy, align 8
+ %2 = load double, double* %_dy, align 8
%add10 = fadd double %add8, %2
br i1 undef, label %if.then12, label %if.end13
if.then12: ; preds = %sw.epilog7
- %3 = load double* undef, align 8
+ %3 = load double, double* undef, align 8
br label %if.end13
if.end13: ; preds = %if.then12, %sw.epilog7, %entry
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cycle_dup.ll b/llvm/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
index 59f2923261f..0a4e961c2e8 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
@@ -23,15 +23,15 @@ target triple = "x86_64-apple-macosx10.9.0"
;CHECK-NEXT:ret i32 undef
define i32 @foo(i32* nocapture %A) #0 {
entry:
- %0 = load i32* %A, align 4
+ %0 = load i32, i32* %A, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 1
- %1 = load i32* %arrayidx1, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 2
- %2 = load i32* %arrayidx2, align 4
+ %2 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 3
- %3 = load i32* %arrayidx3, align 4
+ %3 = load i32, i32* %arrayidx3, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 13
- %4 = load i32* %arrayidx4, align 4
+ %4 = load i32, i32* %arrayidx4, align 4
%cmp24 = icmp sgt i32 %4, 0
br i1 %cmp24, label %for.body, label %for.end
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/debug_info.ll b/llvm/test/Transforms/SLPVectorizer/X86/debug_info.ll
index d145a7d67ab..c28ccc5902d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/debug_info.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/debug_info.ll
@@ -15,7 +15,7 @@ target triple = "x86_64-apple-macosx10.7.0"
;CHECK: @depth
;CHECK: getelementptr inbounds {{.*}}, !dbg ![[LOC:[0-9]+]]
;CHECK: bitcast double* {{.*}}, !dbg ![[LOC]]
-;CHECK: load <2 x double>* {{.*}}, !dbg ![[LOC]]
+;CHECK: load <2 x double>, <2 x double>* {{.*}}, !dbg ![[LOC]]
;CHECK: store <2 x double> {{.*}}, !dbg ![[LOC2:[0-9]+]]
;CHECK: ret
;CHECK: ![[LOC]] = !MDLocation(line: 4, scope:
@@ -33,9 +33,9 @@ entry:
for.body.lr.ph: ; preds = %entry
%arrayidx = getelementptr inbounds double, double* %A, i64 4, !dbg !24
- %0 = load double* %arrayidx, align 8, !dbg !24
+ %0 = load double, double* %arrayidx, align 8, !dbg !24
%arrayidx1 = getelementptr inbounds double, double* %A, i64 5, !dbg !29
- %1 = load double* %arrayidx1, align 8, !dbg !29
+ %1 = load double, double* %arrayidx1, align 8, !dbg !29
br label %for.end, !dbg !23
for.end: ; preds = %for.body.lr.ph, %entry
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/diamond.ll b/llvm/test/Transforms/SLPVectorizer/X86/diamond.ll
index 692c0f633d5..4e2c02f6965 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/diamond.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/diamond.ll
@@ -18,22 +18,22 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define i32 @foo(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) #0 {
entry:
- %0 = load i32* %A, align 4
+ %0 = load i32, i32* %A, align 4
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
%arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
%arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
- %2 = load i32* %arrayidx10, align 4
+ %2 = load i32, i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
%arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
%arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
- %3 = load i32* %arrayidx16, align 4
+ %3 = load i32, i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
%arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
@@ -56,22 +56,22 @@ entry:
; CHECK-NEXT: ret
define i32 @extr_user(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) {
entry:
- %0 = load i32* %A, align 4
+ %0 = load i32, i32* %A, align 4
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
%arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
%arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
- %2 = load i32* %arrayidx10, align 4
+ %2 = load i32, i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
%arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
%arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
- %3 = load i32* %arrayidx16, align 4
+ %3 = load i32, i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
%arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
@@ -86,22 +86,22 @@ entry:
; CHECK-NEXT: ret
define i32 @extr_user1(i32* noalias nocapture %B, i32* noalias nocapture %A, i32 %n, i32 %m) {
entry:
- %0 = load i32* %A, align 4
+ %0 = load i32, i32* %A, align 4
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
%arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
%arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
- %2 = load i32* %arrayidx10, align 4
+ %2 = load i32, i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
%arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
%arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
- %3 = load i32* %arrayidx16, align 4
+ %3 = load i32, i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
%arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/external_user.ll b/llvm/test/Transforms/SLPVectorizer/X86/external_user.ll
index 68cef94ba74..bf2febda86b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/external_user.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/external_user.ll
@@ -34,8 +34,8 @@ target triple = "x86_64-apple-macosx10.8.0"
define double @ext_user(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) {
entry:
%arrayidx = getelementptr inbounds double, double* %A, i64 1
- %0 = load double* %arrayidx, align 8
- %1 = load double* %A, align 8
+ %0 = load double, double* %arrayidx, align 8
+ %1 = load double, double* %A, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -69,9 +69,9 @@ for.end: ; preds = %for.body
define i32 @needtogather(double *noalias %a, i32 *noalias %b, float * noalias %c,
i32 * noalias %d) {
entry:
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv = sitofp i32 %0 to float
- %1 = load float* %c
+ %1 = load float, float* %c
%sub = fsub float 0.000000e+00, %1
%mul = fmul float %sub, 0.000000e+00
%add = fadd float %conv, %mul
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extract.ll b/llvm/test/Transforms/SLPVectorizer/X86/extract.ll
index 5ac07a7683b..9a6ee2afc8e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/extract.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/extract.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: ret void
define void @fextr(double* %ptr) {
entry:
- %LD = load <2 x double>* undef
+ %LD = load <2 x double>, <2 x double>* undef
%V0 = extractelement <2 x double> %LD, i32 0
%V1 = extractelement <2 x double> %LD, i32 1
%P0 = getelementptr inbounds double, double* %ptr, i64 0
@@ -27,7 +27,7 @@ entry:
;CHECK: ret void
define void @fextr1(double* %ptr) {
entry:
- %LD = load <2 x double>* undef
+ %LD = load <2 x double>, <2 x double>* undef
%V0 = extractelement <2 x double> %LD, i32 0
%V1 = extractelement <2 x double> %LD, i32 1
%P0 = getelementptr inbounds double, double* %ptr, i64 1 ; <--- incorrect order
@@ -45,7 +45,7 @@ entry:
;CHECK: ret void
define void @fextr2(double* %ptr) {
entry:
- %LD = load <4 x double>* undef
+ %LD = load <4 x double>, <4 x double>* undef
%V0 = extractelement <4 x double> %LD, i32 0 ; <--- invalid size.
%V1 = extractelement <4 x double> %LD, i32 1
%P0 = getelementptr inbounds double, double* %ptr, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll b/llvm/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll
index a68ac7d2197..6e5415b3c70 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll
@@ -7,7 +7,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; Function Attrs: nounwind ssp uwtable
define i32 @fn1() {
entry:
- %0 = load i64** @a, align 8
+ %0 = load i64*, i64** @a, align 8
%add.ptr = getelementptr inbounds i64, i64* %0, i64 11
%1 = ptrtoint i64* %add.ptr to i64
store i64 %1, i64* %add.ptr, align 8
@@ -25,32 +25,32 @@ entry:
declare float @llvm.powi.f32(float, i32)
define void @fn2(i32* %a, i32* %b, float* %c) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%fp1 = sitofp i32 %add1 to float
%call1 = tail call float @llvm.powi.f32(float %fp1,i32 %add1) nounwind readnone
%arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
+ %i2 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%fp2 = sitofp i32 %add2 to float
%call2 = tail call float @llvm.powi.f32(float %fp2,i32 %add1) nounwind readnone
%arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
+ %i4 = load i32, i32* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%fp3 = sitofp i32 %add3 to float
%call3 = tail call float @llvm.powi.f32(float %fp3,i32 %add1) nounwind readnone
%arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
+ %i6 = load i32, i32* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%fp4 = sitofp i32 %add4 to float
%call4 = tail call float @llvm.powi.f32(float %fp4,i32 %add1) nounwind readnone
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/flag.ll b/llvm/test/Transforms/SLPVectorizer/X86/flag.ll
index 2890c9f41aa..7db8d75c20a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/flag.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/flag.ll
@@ -16,16 +16,16 @@ define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i6
%i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
%2 = shl i64 %i.019, 2
%3 = getelementptr inbounds i32, i32* %in, i64 %2
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
%5 = or i64 %2, 1
%6 = getelementptr inbounds i32, i32* %in, i64 %5
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
%8 = or i64 %2, 2
%9 = getelementptr inbounds i32, i32* %in, i64 %8
- %10 = load i32* %9, align 4
+ %10 = load i32, i32* %9, align 4
%11 = or i64 %2, 3
%12 = getelementptr inbounds i32, i32* %in, i64 %11
- %13 = load i32* %12, align 4
+ %13 = load i32, i32* %12, align 4
%14 = mul i32 %4, 7
%15 = add i32 %14, 7
%16 = mul i32 %7, 7
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/gep.ll b/llvm/test/Transforms/SLPVectorizer/X86/gep.ll
index 6c6a7bd3319..3f952d7b242 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/gep.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/gep.ll
@@ -10,12 +10,12 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: <2 x i32*>
define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
%1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
- %2 = load i32** %1, align 8
+ %2 = load i32*, i32** %1, align 8
%3 = getelementptr inbounds i32, i32* %2, i64 16
%4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
store i32* %3, i32** %4, align 8
%5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
- %6 = load i32** %5, align 8
+ %6 = load i32*, i32** %5, align 8
%7 = getelementptr inbounds i32, i32* %6, i64 16
%8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
store i32* %7, i32** %8, align 8
@@ -28,12 +28,12 @@ define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
; CHECK-NOT: <2 x i32*>
define void @foo2 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y, i32 %i) {
%1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
- %2 = load i32** %1, align 8
+ %2 = load i32*, i32** %1, align 8
%3 = getelementptr inbounds i32, i32* %2, i32 %i
%4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
store i32* %3, i32** %4, align 8
%5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
- %6 = load i32** %5, align 8
+ %6 = load i32*, i32** %5, align 8
%7 = getelementptr inbounds i32, i32* %6, i32 %i
%8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
store i32* %7, i32** %8, align 8
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/hoist.ll b/llvm/test/Transforms/SLPVectorizer/X86/hoist.ll
index c5e5b25b902..36c939b597e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/hoist.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/hoist.ll
@@ -31,22 +31,22 @@ entry:
for.body: ; preds = %entry, %for.body
%i.024 = phi i32 [ 0, %entry ], [ %add10, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.024
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %n
store i32 %add, i32* %arrayidx, align 4
%add121 = or i32 %i.024, 1
%arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %add121
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %1, %k
store i32 %add3, i32* %arrayidx2, align 4
%add422 = or i32 %i.024, 2
%arrayidx5 = getelementptr inbounds i32, i32* %A, i32 %add422
- %2 = load i32* %arrayidx5, align 4
+ %2 = load i32, i32* %arrayidx5, align 4
%add6 = add nsw i32 %2, %n
store i32 %add6, i32* %arrayidx5, align 4
%add723 = or i32 %i.024, 3
%arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add723
- %3 = load i32* %arrayidx8, align 4
+ %3 = load i32, i32* %arrayidx8, align 4
%add9 = add nsw i32 %3, %k
store i32 %add9, i32* %arrayidx8, align 4
%add10 = add nsw i32 %i.024, 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
index 21d38c4de40..83b2e01f04e 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll
@@ -33,21 +33,21 @@ for.body:
%sum.032 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add17, %for.body ]
%mul = shl nsw i64 %i.033, 2
%arrayidx = getelementptr inbounds float, float* %A, i64 %mul
- %1 = load float* %arrayidx, align 4
+ %1 = load float, float* %arrayidx, align 4
%mul2 = fmul float %1, 7.000000e+00
%add28 = or i64 %mul, 1
%arrayidx4 = getelementptr inbounds float, float* %A, i64 %add28
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%mul5 = fmul float %2, 7.000000e+00
%add6 = fadd fast float %mul2, %mul5
%add829 = or i64 %mul, 2
%arrayidx9 = getelementptr inbounds float, float* %A, i64 %add829
- %3 = load float* %arrayidx9, align 4
+ %3 = load float, float* %arrayidx9, align 4
%mul10 = fmul float %3, 7.000000e+00
%add11 = fadd fast float %add6, %mul10
%add1330 = or i64 %mul, 3
%arrayidx14 = getelementptr inbounds float, float* %A, i64 %add1330
- %4 = load float* %arrayidx14, align 4
+ %4 = load float, float* %arrayidx14, align 4
%mul15 = fmul float %4, 7.000000e+00
%add16 = fadd fast float %add11, %mul15
%add17 = fadd fast float %sum.032, %add16
@@ -85,13 +85,13 @@ entry:
br i1 %cmp38, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load float* %B, align 4
+ %0 = load float, float* %B, align 4
%arrayidx4 = getelementptr inbounds float, float* %B, i64 1
- %1 = load float* %arrayidx4, align 4
+ %1 = load float, float* %arrayidx4, align 4
%arrayidx9 = getelementptr inbounds float, float* %B, i64 2
- %2 = load float* %arrayidx9, align 4
+ %2 = load float, float* %arrayidx9, align 4
%arrayidx15 = getelementptr inbounds float, float* %B, i64 3
- %3 = load float* %arrayidx15, align 4
+ %3 = load float, float* %arrayidx15, align 4
%4 = sext i32 %n to i64
br label %for.body
@@ -100,21 +100,21 @@ for.body:
%sum.039 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %mul21, %for.body ]
%mul = shl nsw i64 %i.040, 2
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
- %5 = load float* %arrayidx2, align 4
+ %5 = load float, float* %arrayidx2, align 4
%mul3 = fmul float %0, %5
%add35 = or i64 %mul, 1
%arrayidx6 = getelementptr inbounds float, float* %A, i64 %add35
- %6 = load float* %arrayidx6, align 4
+ %6 = load float, float* %arrayidx6, align 4
%mul7 = fmul float %1, %6
%add8 = fadd fast float %mul3, %mul7
%add1136 = or i64 %mul, 2
%arrayidx12 = getelementptr inbounds float, float* %A, i64 %add1136
- %7 = load float* %arrayidx12, align 4
+ %7 = load float, float* %arrayidx12, align 4
%mul13 = fmul float %2, %7
%add14 = fadd fast float %add8, %mul13
%add1737 = or i64 %mul, 3
%arrayidx18 = getelementptr inbounds float, float* %A, i64 %add1737
- %8 = load float* %arrayidx18, align 4
+ %8 = load float, float* %arrayidx18, align 4
%mul19 = fmul float %3, %8
%add20 = fadd fast float %add14, %mul19
%mul21 = fmul float %sum.039, %add20
@@ -157,23 +157,23 @@ entry:
br i1 %cmp81, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load float* %B, align 4
+ %0 = load float, float* %B, align 4
%arrayidx4 = getelementptr inbounds float, float* %B, i64 1
- %1 = load float* %arrayidx4, align 4
+ %1 = load float, float* %arrayidx4, align 4
%arrayidx9 = getelementptr inbounds float, float* %B, i64 2
- %2 = load float* %arrayidx9, align 4
+ %2 = load float, float* %arrayidx9, align 4
%arrayidx15 = getelementptr inbounds float, float* %B, i64 3
- %3 = load float* %arrayidx15, align 4
+ %3 = load float, float* %arrayidx15, align 4
%arrayidx21 = getelementptr inbounds float, float* %B, i64 4
- %4 = load float* %arrayidx21, align 4
+ %4 = load float, float* %arrayidx21, align 4
%arrayidx27 = getelementptr inbounds float, float* %B, i64 5
- %5 = load float* %arrayidx27, align 4
+ %5 = load float, float* %arrayidx27, align 4
%arrayidx33 = getelementptr inbounds float, float* %B, i64 6
- %6 = load float* %arrayidx33, align 4
+ %6 = load float, float* %arrayidx33, align 4
%arrayidx39 = getelementptr inbounds float, float* %B, i64 7
- %7 = load float* %arrayidx39, align 4
+ %7 = load float, float* %arrayidx39, align 4
%arrayidx45 = getelementptr inbounds float, float* %B, i64 8
- %8 = load float* %arrayidx45, align 4
+ %8 = load float, float* %arrayidx45, align 4
%9 = sext i32 %n to i64
br label %for.body
@@ -182,46 +182,46 @@ for.body:
%sum.082 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add51, %for.body ]
%mul = mul nsw i64 %i.083, 6
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
- %10 = load float* %arrayidx2, align 4
+ %10 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %10
%add80 = or i64 %mul, 1
%arrayidx6 = getelementptr inbounds float, float* %A, i64 %add80
- %11 = load float* %arrayidx6, align 4
+ %11 = load float, float* %arrayidx6, align 4
%mul7 = fmul fast float %1, %11
%add8 = fadd fast float %mul3, %mul7
%add11 = add nsw i64 %mul, 2
%arrayidx12 = getelementptr inbounds float, float* %A, i64 %add11
- %12 = load float* %arrayidx12, align 4
+ %12 = load float, float* %arrayidx12, align 4
%mul13 = fmul fast float %2, %12
%add14 = fadd fast float %add8, %mul13
%add17 = add nsw i64 %mul, 3
%arrayidx18 = getelementptr inbounds float, float* %A, i64 %add17
- %13 = load float* %arrayidx18, align 4
+ %13 = load float, float* %arrayidx18, align 4
%mul19 = fmul fast float %3, %13
%add20 = fadd fast float %add14, %mul19
%add23 = add nsw i64 %mul, 4
%arrayidx24 = getelementptr inbounds float, float* %A, i64 %add23
- %14 = load float* %arrayidx24, align 4
+ %14 = load float, float* %arrayidx24, align 4
%mul25 = fmul fast float %4, %14
%add26 = fadd fast float %add20, %mul25
%add29 = add nsw i64 %mul, 5
%arrayidx30 = getelementptr inbounds float, float* %A, i64 %add29
- %15 = load float* %arrayidx30, align 4
+ %15 = load float, float* %arrayidx30, align 4
%mul31 = fmul fast float %5, %15
%add32 = fadd fast float %add26, %mul31
%add35 = add nsw i64 %mul, 6
%arrayidx36 = getelementptr inbounds float, float* %A, i64 %add35
- %16 = load float* %arrayidx36, align 4
+ %16 = load float, float* %arrayidx36, align 4
%mul37 = fmul fast float %6, %16
%add38 = fadd fast float %add32, %mul37
%add41 = add nsw i64 %mul, 7
%arrayidx42 = getelementptr inbounds float, float* %A, i64 %add41
- %17 = load float* %arrayidx42, align 4
+ %17 = load float, float* %arrayidx42, align 4
%mul43 = fmul fast float %7, %17
%add44 = fadd fast float %add38, %mul43
%add47 = add nsw i64 %mul, 8
%arrayidx48 = getelementptr inbounds float, float* %A, i64 %add47
- %18 = load float* %arrayidx48, align 4
+ %18 = load float, float* %arrayidx48, align 4
%mul49 = fmul fast float %8, %18
%add50 = fadd fast float %add44, %mul49
%add51 = fadd fast float %sum.082, %add50
@@ -259,13 +259,13 @@ entry:
br i1 %cmp41, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load float* %B, align 4
+ %0 = load float, float* %B, align 4
%arrayidx4 = getelementptr inbounds float, float* %B, i64 1
- %1 = load float* %arrayidx4, align 4
+ %1 = load float, float* %arrayidx4, align 4
%arrayidx10 = getelementptr inbounds float, float* %B, i64 2
- %2 = load float* %arrayidx10, align 4
+ %2 = load float, float* %arrayidx10, align 4
%arrayidx16 = getelementptr inbounds float, float* %B, i64 3
- %3 = load float* %arrayidx16, align 4
+ %3 = load float, float* %arrayidx16, align 4
%4 = sext i32 %n to i64
br label %for.body
@@ -274,22 +274,22 @@ for.body:
%sum.042 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add21, %for.body ]
%mul = shl nsw i64 %i.043, 2
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
- %5 = load float* %arrayidx2, align 4
+ %5 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %5
%add = fadd fast float %sum.042, %mul3
%add638 = or i64 %mul, 1
%arrayidx7 = getelementptr inbounds float, float* %A, i64 %add638
- %6 = load float* %arrayidx7, align 4
+ %6 = load float, float* %arrayidx7, align 4
%mul8 = fmul fast float %1, %6
%add9 = fadd fast float %add, %mul8
%add1239 = or i64 %mul, 2
%arrayidx13 = getelementptr inbounds float, float* %A, i64 %add1239
- %7 = load float* %arrayidx13, align 4
+ %7 = load float, float* %arrayidx13, align 4
%mul14 = fmul fast float %2, %7
%add15 = fadd fast float %add9, %mul14
%add1840 = or i64 %mul, 3
%arrayidx19 = getelementptr inbounds float, float* %A, i64 %add1840
- %8 = load float* %arrayidx19, align 4
+ %8 = load float, float* %arrayidx19, align 4
%mul20 = fmul fast float %3, %8
%add21 = fadd fast float %add15, %mul20
%inc = add nsw i64 %i.043, 1
@@ -335,27 +335,27 @@ for.body.lr.ph:
for.body:
%i.039 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%C.addr.038 = phi float* [ %C, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %1 = load float* %B, align 4
+ %1 = load float, float* %B, align 4
%mul = shl nsw i64 %i.039, 2
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
- %2 = load float* %arrayidx2, align 4
+ %2 = load float, float* %arrayidx2, align 4
%mul3 = fmul fast float %1, %2
- %3 = load float* %arrayidx4, align 4
+ %3 = load float, float* %arrayidx4, align 4
%add34 = or i64 %mul, 1
%arrayidx6 = getelementptr inbounds float, float* %A, i64 %add34
- %4 = load float* %arrayidx6, align 4
+ %4 = load float, float* %arrayidx6, align 4
%mul7 = fmul fast float %3, %4
%add8 = fadd fast float %mul3, %mul7
- %5 = load float* %arrayidx9, align 4
+ %5 = load float, float* %arrayidx9, align 4
%add1135 = or i64 %mul, 2
%arrayidx12 = getelementptr inbounds float, float* %A, i64 %add1135
- %6 = load float* %arrayidx12, align 4
+ %6 = load float, float* %arrayidx12, align 4
%mul13 = fmul fast float %5, %6
%add14 = fadd fast float %add8, %mul13
- %7 = load float* %arrayidx15, align 4
+ %7 = load float, float* %arrayidx15, align 4
%add1736 = or i64 %mul, 3
%arrayidx18 = getelementptr inbounds float, float* %A, i64 %add1736
- %8 = load float* %arrayidx18, align 4
+ %8 = load float, float* %arrayidx18, align 4
%mul19 = fmul fast float %7, %8
%add20 = fadd fast float %add14, %mul19
store float %add20, float* %C.addr.038, align 4
@@ -389,9 +389,9 @@ entry:
br i1 %cmp17, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %0 = load double* %B, align 8
+ %0 = load double, double* %B, align 8
%arrayidx4 = getelementptr inbounds double, double* %B, i64 1
- %1 = load double* %arrayidx4, align 8
+ %1 = load double, double* %arrayidx4, align 8
%2 = sext i32 %n to i64
br label %for.body
@@ -399,11 +399,11 @@ for.body:
%i.018 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%mul = shl nsw i64 %i.018, 2
%arrayidx2 = getelementptr inbounds double, double* %A, i64 %mul
- %3 = load double* %arrayidx2, align 8
+ %3 = load double, double* %arrayidx2, align 8
%mul3 = fmul fast double %0, %3
%add16 = or i64 %mul, 1
%arrayidx6 = getelementptr inbounds double, double* %A, i64 %add16
- %4 = load double* %arrayidx6, align 8
+ %4 = load double, double* %arrayidx6, align 8
%mul7 = fmul fast double %1, %4
%add8 = fadd fast double %mul3, %mul7
%arrayidx9 = getelementptr inbounds double, double* %C, i64 %i.018
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/implicitfloat.ll b/llvm/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
index 3b80472ce3c..f7283f0d027 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
@@ -9,13 +9,13 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define void @test1(double* %a, double* %b, double* %c) noimplicitfloat { ; <------ noimplicitfloat attribute here!
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll b/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
index 9bc44f21097..26f0b9bfabe 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
@@ -20,14 +20,14 @@ for.body: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%0 = shl nsw i64 %indvars.iv, 1
%arrayidx = getelementptr inbounds double, double* %A, i64 %0
- %1 = load double* %arrayidx, align 8
+ %1 = load double, double* %arrayidx, align 8
%mul1 = fmul double %conv, %1
%mul2 = fmul double %mul1, 7.000000e+00
%add = fadd double %mul2, 5.000000e+00
%InTreeUser = fadd double %add, %add ; <------------------ In tree user.
%2 = or i64 %0, 1
%arrayidx6 = getelementptr inbounds double, double* %A, i64 %2
- %3 = load double* %arrayidx6, align 8
+ %3 = load double, double* %arrayidx6, align 8
%mul8 = fmul double %conv, %3
%mul9 = fmul double %mul8, 4.000000e+00
%add10 = fadd double %mul9, 9.000000e+00
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/intrinsic.ll b/llvm/test/Transforms/SLPVectorizer/X86/intrinsic.ll
index 974d7e64b7b..cc5a4afe43d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/intrinsic.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/intrinsic.ll
@@ -13,14 +13,14 @@ declare double @llvm.fabs.f64(double) nounwind readnone
;CHECK: ret
define void @vec_fabs_f64(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @llvm.fabs.f64(double %mul) nounwind readnone
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @llvm.fabs.f64(double %mul5) nounwind readnone
store double %call, double* %c, align 8
@@ -39,31 +39,31 @@ declare float @llvm.copysign.f32(float, float) nounwind readnone
;CHECK: ret
define void @vec_copysign_f32(float* %a, float* %b, float* noalias %c) {
entry:
- %0 = load float* %a, align 4
- %1 = load float* %b, align 4
+ %0 = load float, float* %a, align 4
+ %1 = load float, float* %b, align 4
%call0 = tail call float @llvm.copysign.f32(float %0, float %1) nounwind readnone
store float %call0, float* %c, align 4
%ix2 = getelementptr inbounds float, float* %a, i64 1
- %2 = load float* %ix2, align 4
+ %2 = load float, float* %ix2, align 4
%ix3 = getelementptr inbounds float, float* %b, i64 1
- %3 = load float* %ix3, align 4
+ %3 = load float, float* %ix3, align 4
%call1 = tail call float @llvm.copysign.f32(float %2, float %3) nounwind readnone
%c1 = getelementptr inbounds float, float* %c, i64 1
store float %call1, float* %c1, align 4
%ix4 = getelementptr inbounds float, float* %a, i64 2
- %4 = load float* %ix4, align 4
+ %4 = load float, float* %ix4, align 4
%ix5 = getelementptr inbounds float, float* %b, i64 2
- %5 = load float* %ix5, align 4
+ %5 = load float, float* %ix5, align 4
%call2 = tail call float @llvm.copysign.f32(float %4, float %5) nounwind readnone
%c2 = getelementptr inbounds float, float* %c, i64 2
store float %call2, float* %c2, align 4
%ix6 = getelementptr inbounds float, float* %a, i64 3
- %6 = load float* %ix6, align 4
+ %6 = load float, float* %ix6, align 4
%ix7 = getelementptr inbounds float, float* %b, i64 3
- %7 = load float* %ix7, align 4
+ %7 = load float, float* %ix7, align 4
%call3 = tail call float @llvm.copysign.f32(float %6, float %7) nounwind readnone
%c3 = getelementptr inbounds float, float* %c, i64 3
store float %call3, float* %c3, align 4
@@ -75,29 +75,29 @@ declare i32 @llvm.bswap.i32(i32) nounwind readnone
define void @vec_bswap_i32(i32* %a, i32* %b, i32* %c) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.bswap.i32(i32 %add1) nounwind readnone
%arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
+ %i2 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.bswap.i32(i32 %add2) nounwind readnone
%arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
+ %i4 = load i32, i32* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.bswap.i32(i32 %add3) nounwind readnone
%arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
+ %i6 = load i32, i32* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.bswap.i32(i32 %add4) nounwind readnone
@@ -122,29 +122,29 @@ declare i32 @llvm.ctlz.i32(i32,i1) nounwind readnone
define void @vec_ctlz_i32(i32* %a, i32* %b, i32* %c, i1) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
%arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
+ %i2 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 true) nounwind readnone
%arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
+ %i4 = load i32, i32* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
%arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
+ %i6 = load i32, i32* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 true) nounwind readnone
@@ -167,29 +167,29 @@ entry:
define void @vec_ctlz_i32_neg(i32* %a, i32* %b, i32* %c, i1) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
%arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
+ %i2 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 false) nounwind readnone
%arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
+ %i4 = load i32, i32* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
%arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
+ %i6 = load i32, i32* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 false) nounwind readnone
@@ -212,29 +212,29 @@ declare i32 @llvm.cttz.i32(i32,i1) nounwind readnone
define void @vec_cttz_i32(i32* %a, i32* %b, i32* %c, i1) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
%arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
+ %i2 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 true) nounwind readnone
%arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
+ %i4 = load i32, i32* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
%arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
+ %i6 = load i32, i32* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 true) nounwind readnone
@@ -257,29 +257,29 @@ entry:
define void @vec_cttz_i32_neg(i32* %a, i32* %b, i32* %c, i1) {
entry:
- %i0 = load i32* %a, align 4
- %i1 = load i32* %b, align 4
+ %i0 = load i32, i32* %a, align 4
+ %i1 = load i32, i32* %b, align 4
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
%arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
- %i2 = load i32* %arrayidx2, align 4
+ %i2 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
- %i3 = load i32* %arrayidx3, align 4
+ %i3 = load i32, i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 false) nounwind readnone
%arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
- %i4 = load i32* %arrayidx4, align 4
+ %i4 = load i32, i32* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
- %i5 = load i32* %arrayidx5, align 4
+ %i5 = load i32, i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
%arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
- %i6 = load i32* %arrayidx6, align 4
+ %i6 = load i32, i32* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
- %i7 = load i32* %arrayidx7, align 4
+ %i7 = load i32, i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 false) nounwind readnone
@@ -300,29 +300,29 @@ entry:
declare float @llvm.powi.f32(float, i32)
define void @vec_powi_f32(float* %a, float* %b, float* %c, i32 %P) {
entry:
- %i0 = load float* %a, align 4
- %i1 = load float* %b, align 4
+ %i0 = load float, float* %a, align 4
+ %i1 = load float, float* %b, align 4
%add1 = fadd float %i0, %i1
%call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %a, i32 1
- %i2 = load float* %arrayidx2, align 4
+ %i2 = load float, float* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds float, float* %b, i32 1
- %i3 = load float* %arrayidx3, align 4
+ %i3 = load float, float* %arrayidx3, align 4
%add2 = fadd float %i2, %i3
%call2 = tail call float @llvm.powi.f32(float %add2,i32 %P) nounwind readnone
%arrayidx4 = getelementptr inbounds float, float* %a, i32 2
- %i4 = load float* %arrayidx4, align 4
+ %i4 = load float, float* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds float, float* %b, i32 2
- %i5 = load float* %arrayidx5, align 4
+ %i5 = load float, float* %arrayidx5, align 4
%add3 = fadd float %i4, %i5
%call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
%arrayidx6 = getelementptr inbounds float, float* %a, i32 3
- %i6 = load float* %arrayidx6, align 4
+ %i6 = load float, float* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds float, float* %b, i32 3
- %i7 = load float* %arrayidx7, align 4
+ %i7 = load float, float* %arrayidx7, align 4
%add4 = fadd float %i6, %i7
%call4 = tail call float @llvm.powi.f32(float %add4,i32 %P) nounwind readnone
@@ -346,29 +346,29 @@ entry:
define void @vec_powi_f32_neg(float* %a, float* %b, float* %c, i32 %P, i32 %Q) {
entry:
- %i0 = load float* %a, align 4
- %i1 = load float* %b, align 4
+ %i0 = load float, float* %a, align 4
+ %i1 = load float, float* %b, align 4
%add1 = fadd float %i0, %i1
%call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
%arrayidx2 = getelementptr inbounds float, float* %a, i32 1
- %i2 = load float* %arrayidx2, align 4
+ %i2 = load float, float* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds float, float* %b, i32 1
- %i3 = load float* %arrayidx3, align 4
+ %i3 = load float, float* %arrayidx3, align 4
%add2 = fadd float %i2, %i3
%call2 = tail call float @llvm.powi.f32(float %add2,i32 %Q) nounwind readnone
%arrayidx4 = getelementptr inbounds float, float* %a, i32 2
- %i4 = load float* %arrayidx4, align 4
+ %i4 = load float, float* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds float, float* %b, i32 2
- %i5 = load float* %arrayidx5, align 4
+ %i5 = load float, float* %arrayidx5, align 4
%add3 = fadd float %i4, %i5
%call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
%arrayidx6 = getelementptr inbounds float, float* %a, i32 3
- %i6 = load float* %arrayidx6, align 4
+ %i6 = load float, float* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds float, float* %b, i32 3
- %i7 = load float* %arrayidx7, align 4
+ %i7 = load float, float* %arrayidx7, align 4
%add4 = fadd float %i6, %i7
%call4 = tail call float @llvm.powi.f32(float %add4,i32 %Q) nounwind readnone
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/long_chains.ll b/llvm/test/Transforms/SLPVectorizer/X86/long_chains.ll
index cd9d59f5c34..f87dabf4c9f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/long_chains.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/long_chains.ll
@@ -13,9 +13,9 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define i32 @test(double* nocapture %A, i8* nocapture %B) {
entry:
- %0 = load i8* %B, align 1
+ %0 = load i8, i8* %B, align 1
%arrayidx1 = getelementptr inbounds i8, i8* %B, i64 1
- %1 = load i8* %arrayidx1, align 1
+ %1 = load i8, i8* %arrayidx1, align 1
%add = add i8 %0, 3
%add4 = add i8 %1, 3
%conv6 = sitofp i8 %add to double
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll b/llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
index c113d89140e..0c16c34a188 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
@@ -19,42 +19,42 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add1 = add nsw i32 %0, %n
store i32 %add1, i32* %arrayidx, align 4
%1 = or i64 %indvars.iv, 1
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %1
- %2 = load i32* %arrayidx4, align 4
+ %2 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %2, %n
store i32 %add5, i32* %arrayidx4, align 4
%3 = or i64 %indvars.iv, 2
%arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %3
- %4 = load i32* %arrayidx8, align 4
+ %4 = load i32, i32* %arrayidx8, align 4
%add9 = add nsw i32 %4, %n
store i32 %add9, i32* %arrayidx8, align 4
%5 = or i64 %indvars.iv, 3
%arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %5
- %6 = load i32* %arrayidx12, align 4
+ %6 = load i32, i32* %arrayidx12, align 4
%add13 = add nsw i32 %6, %n
store i32 %add13, i32* %arrayidx12, align 4
%7 = or i64 %indvars.iv, 4
%arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %7
- %8 = load i32* %arrayidx16, align 4
+ %8 = load i32, i32* %arrayidx16, align 4
%add17 = add nsw i32 %8, %n
store i32 %add17, i32* %arrayidx16, align 4
%9 = or i64 %indvars.iv, 5
%arrayidx20 = getelementptr inbounds i32, i32* %A, i64 %9
- %10 = load i32* %arrayidx20, align 4
+ %10 = load i32, i32* %arrayidx20, align 4
%add21 = add nsw i32 %10, %n
store i32 %add21, i32* %arrayidx20, align 4
%11 = or i64 %indvars.iv, 6
%arrayidx24 = getelementptr inbounds i32, i32* %A, i64 %11
- %12 = load i32* %arrayidx24, align 4
+ %12 = load i32, i32* %arrayidx24, align 4
%add25 = add nsw i32 %12, %n
store i32 %add25, i32* %arrayidx24, align 4
%13 = or i64 %indvars.iv, 7
%arrayidx28 = getelementptr inbounds i32, i32* %A, i64 %13
- %14 = load i32* %arrayidx28, align 4
+ %14 = load i32, i32* %arrayidx28, align 4
%add29 = add nsw i32 %14, %n
store i32 %add29, i32* %arrayidx28, align 4
%indvars.iv.next = add i64 %indvars.iv, 8
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/metadata.ll b/llvm/test/Transforms/SLPVectorizer/X86/metadata.ll
index 0aa1d12c674..ebef6b53c6a 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/metadata.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/metadata.ll
@@ -12,13 +12,13 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @test1(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8, !tbaa !4
- %i1 = load double* %b, align 8, !tbaa !4
+ %i0 = load double, double* %a, align 8, !tbaa !4
+ %i1 = load double, double* %b, align 8, !tbaa !4
%mul = fmul double %i0, %i1, !fpmath !0
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8, !tbaa !4
+ %i3 = load double, double* %arrayidx3, align 8, !tbaa !4
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8, !tbaa !4
+ %i4 = load double, double* %arrayidx4, align 8, !tbaa !4
%mul5 = fmul double %i3, %i4, !fpmath !0
store double %mul, double* %c, align 8, !tbaa !4
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
@@ -35,13 +35,13 @@ entry:
define void @test2(double* %a, double* %b, i8* %e) {
entry:
- %i0 = load double* %a, align 8, !tbaa !4
- %i1 = load double* %b, align 8, !tbaa !4
+ %i0 = load double, double* %a, align 8, !tbaa !4
+ %i1 = load double, double* %b, align 8, !tbaa !4
%mul = fmul double %i0, %i1, !fpmath !1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8, !tbaa !4
+ %i3 = load double, double* %arrayidx3, align 8, !tbaa !4
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8, !tbaa !4
+ %i4 = load double, double* %arrayidx4, align 8, !tbaa !4
%mul5 = fmul double %i3, %i4, !fpmath !1
%c = bitcast i8* %e to double*
store double %mul, double* %c, align 8, !tbaa !4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/multi_block.ll b/llvm/test/Transforms/SLPVectorizer/X86/multi_block.ll
index 91f9ad53d33..993054a090c 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/multi_block.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/multi_block.ll
@@ -25,9 +25,9 @@ target triple = "x86_64-apple-macosx10.7.0"
;CHECK: store <2 x double>
;CHECK: ret
define i32 @bar(double* nocapture %A, i32 %d) {
- %1 = load double* %A, align 8
+ %1 = load double, double* %A, align 8
%2 = getelementptr inbounds double, double* %A, i64 1
- %3 = load double* %2, align 8
+ %3 = load double, double* %2, align 8
%4 = fptrunc double %1 to float
%5 = fptrunc double %3 to float
%6 = icmp eq i32 %d, 0
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/multi_user.ll b/llvm/test/Transforms/SLPVectorizer/X86/multi_user.ll
index 93204e92982..3197f6db266 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/multi_user.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/multi_user.ll
@@ -20,27 +20,27 @@ target triple = "x86_64-apple-macosx10.7.0"
define i32 @foo(i32* nocapture %A, i32 %n) {
%1 = mul nsw i32 %n, 5
%2 = add nsw i32 %1, 7
- %3 = load i32* %A, align 4
+ %3 = load i32, i32* %A, align 4
%4 = add nsw i32 %2, %3
store i32 %4, i32* %A, align 4
%5 = add nsw i32 %1, 8
%6 = getelementptr inbounds i32, i32* %A, i64 1
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
%8 = add nsw i32 %5, %7
store i32 %8, i32* %6, align 4
%9 = add nsw i32 %1, 9
%10 = getelementptr inbounds i32, i32* %A, i64 2
- %11 = load i32* %10, align 4
+ %11 = load i32, i32* %10, align 4
%12 = add nsw i32 %9, %11
store i32 %12, i32* %10, align 4
%13 = add nsw i32 %1, 10
%14 = getelementptr inbounds i32, i32* %A, i64 3
- %15 = load i32* %14, align 4
+ %15 = load i32, i32* %14, align 4
%16 = add nsw i32 %13, %15
store i32 %16, i32* %14, align 4
%17 = add nsw i32 %1, 11
%18 = getelementptr inbounds i32, i32* %A, i64 4
- %19 = load i32* %18, align 4
+ %19 = load i32, i32* %18, align 4
%20 = add nsw i32 %17, %19
store i32 %20, i32* %18, align 4
ret i32 undef
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll b/llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll
index 6c8beb8d45c..25f049adf47 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/odd_store.ll
@@ -19,14 +19,14 @@ target triple = "x86_64-apple-macosx10.8.0"
;CHECK: ret
define i32 @foo(i8* noalias nocapture %A, float* noalias nocapture %B, float %T) {
%1 = getelementptr inbounds float, float* %B, i64 10
- %2 = load float* %1, align 4
+ %2 = load float, float* %1, align 4
%3 = fmul float %2, %T
%4 = fpext float %3 to double
%5 = fadd double %4, 4.000000e+00
%6 = fptosi double %5 to i8
store i8 %6, i8* %A, align 1
%7 = getelementptr inbounds float, float* %B, i64 11
- %8 = load float* %7, align 4
+ %8 = load float, float* %7, align 4
%9 = fmul float %8, %T
%10 = fpext float %9 to double
%11 = fadd double %10, 5.000000e+00
@@ -34,7 +34,7 @@ define i32 @foo(i8* noalias nocapture %A, float* noalias nocapture %B, float %T)
%13 = getelementptr inbounds i8, i8* %A, i64 1
store i8 %12, i8* %13, align 1
%14 = getelementptr inbounds float, float* %B, i64 12
- %15 = load float* %14, align 4
+ %15 = load float, float* %14, align 4
%16 = fmul float %15, %T
%17 = fpext float %16 to double
%18 = fadd double %17, 6.000000e+00
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll
index 929c3c21c25..4a88dbf0de4 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll
@@ -14,8 +14,8 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
define void @shuffle_operands1(double * noalias %from, double * noalias %to,
double %v1, double %v2) {
%from_1 = getelementptr double, double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_1, %v1
%v1_2 = fadd double %v2, %v0_2
%to_2 = getelementptr double, double * %to, i64 1
@@ -36,8 +36,8 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
%from_1 = getelementptr double, double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_1, %p
%v1_2 = fadd double %v0_1, %v0_2
%to_2 = getelementptr double, double * %to, i64 1
@@ -61,8 +61,8 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
%from_1 = getelementptr double, double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %p, %v0_1
%v1_2 = fadd double %v0_2, %v0_1
%to_2 = getelementptr double, double * %to, i64 1
@@ -86,8 +86,8 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
%from_1 = getelementptr double, double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %p, %v0_1
%v1_2 = fadd double %v0_1, %v0_2
%to_2 = getelementptr double, double * %to, i64 1
@@ -112,8 +112,8 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
%from_1 = getelementptr double, double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_2, %v0_1
%v1_2 = fadd double %p, %v0_1
%to_2 = getelementptr double, double * %to, i64 1
@@ -137,8 +137,8 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
%from_1 = getelementptr double, double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_1, %v0_2
%v1_2 = fadd double %p, %v0_1
%to_2 = getelementptr double, double * %to, i64 1
@@ -163,8 +163,8 @@ br label %lp
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
%from_1 = getelementptr double, double *%from, i64 1
- %v0_1 = load double * %from
- %v0_2 = load double * %from_1
+ %v0_1 = load double , double * %from
+ %v0_2 = load double , double * %from_1
%v1_1 = fadd double %v0_1, %v0_2
%v1_2 = fadd double %v0_1, %p
%to_2 = getelementptr double, double * %to, i64 1
@@ -181,7 +181,7 @@ ext:
; CHECK-LABEL: good_load_order
-; CHECK: %[[V1:[0-9]+]] = load <4 x float>*
+; CHECK: %[[V1:[0-9]+]] = load <4 x float>, <4 x float>*
; CHECK: %[[V2:[0-9]+]] = insertelement <4 x float> undef, float %1, i32 0
; CHECK: %[[V3:[0-9]+]] = shufflevector <4 x float> %[[V2]], <4 x float> %[[V1]], <4 x i32> <i32 0, i32 4, i32 5, i32 6>
; CHECK: = fmul <4 x float> %[[V1]], %[[V3]]
@@ -193,7 +193,7 @@ entry:
br label %for.cond1.preheader
for.cond1.preheader:
- %0 = load float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), align 16
+ %0 = load float, float* getelementptr inbounds ([32000 x float]* @a, i64 0, i64 0), align 16
br label %for.body3
for.body3:
@@ -201,28 +201,28 @@ for.body3:
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
%2 = add nsw i64 %indvars.iv, 1
%arrayidx = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %2
- %3 = load float* %arrayidx, align 4
+ %3 = load float, float* %arrayidx, align 4
%arrayidx5 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv
%mul6 = fmul float %3, %1
store float %mul6, float* %arrayidx5, align 4
%4 = add nsw i64 %indvars.iv, 2
%arrayidx11 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %4
- %5 = load float* %arrayidx11, align 4
+ %5 = load float, float* %arrayidx11, align 4
%mul15 = fmul float %5, %3
store float %mul15, float* %arrayidx, align 4
%6 = add nsw i64 %indvars.iv, 3
%arrayidx21 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %6
- %7 = load float* %arrayidx21, align 4
+ %7 = load float, float* %arrayidx21, align 4
%mul25 = fmul float %7, %5
store float %mul25, float* %arrayidx11, align 4
%8 = add nsw i64 %indvars.iv, 4
%arrayidx31 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %8
- %9 = load float* %arrayidx31, align 4
+ %9 = load float, float* %arrayidx31, align 4
%mul35 = fmul float %9, %7
store float %mul35, float* %arrayidx21, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 5
%arrayidx41 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.next
- %10 = load float* %arrayidx41, align 4
+ %10 = load float, float* %arrayidx41, align 4
%mul45 = fmul float %10, %9
store float %mul45, float* %arrayidx31, align 4
%11 = trunc i64 %indvars.iv.next to i32
@@ -238,17 +238,17 @@ for.end:
; c[1] = b[1]+a[1]; // swapped b[1] and a[1]
; CHECK-LABEL: load_reorder_double
-; CHECK: load <2 x double>*
+; CHECK: load <2 x double>, <2 x double>*
; CHECK: fadd <2 x double>
define void @load_reorder_double(double* nocapture %c, double* noalias nocapture readonly %a, double* noalias nocapture readonly %b){
- %1 = load double* %a
- %2 = load double* %b
+ %1 = load double, double* %a
+ %2 = load double, double* %b
%3 = fadd double %1, %2
store double %3, double* %c
%4 = getelementptr inbounds double, double* %b, i64 1
- %5 = load double* %4
+ %5 = load double, double* %4
%6 = getelementptr inbounds double, double* %a, i64 1
- %7 = load double* %6
+ %7 = load double, double* %6
%8 = fadd double %5, %7
%9 = getelementptr inbounds double, double* %c, i64 1
store double %8, double* %9
@@ -262,31 +262,31 @@ define void @load_reorder_double(double* nocapture %c, double* noalias nocapture
; c[3] = a[3]+b[3];
; CHECK-LABEL: load_reorder_float
-; CHECK: load <4 x float>*
+; CHECK: load <4 x float>, <4 x float>*
; CHECK: fadd <4 x float>
define void @load_reorder_float(float* nocapture %c, float* noalias nocapture readonly %a, float* noalias nocapture readonly %b){
- %1 = load float* %a
- %2 = load float* %b
+ %1 = load float, float* %a
+ %2 = load float, float* %b
%3 = fadd float %1, %2
store float %3, float* %c
%4 = getelementptr inbounds float, float* %b, i64 1
- %5 = load float* %4
+ %5 = load float, float* %4
%6 = getelementptr inbounds float, float* %a, i64 1
- %7 = load float* %6
+ %7 = load float, float* %6
%8 = fadd float %5, %7
%9 = getelementptr inbounds float, float* %c, i64 1
store float %8, float* %9
%10 = getelementptr inbounds float, float* %a, i64 2
- %11 = load float* %10
+ %11 = load float, float* %10
%12 = getelementptr inbounds float, float* %b, i64 2
- %13 = load float* %12
+ %13 = load float, float* %12
%14 = fadd float %11, %13
%15 = getelementptr inbounds float, float* %c, i64 2
store float %14, float* %15
%16 = getelementptr inbounds float, float* %a, i64 3
- %17 = load float* %16
+ %17 = load float, float* %16
%18 = getelementptr inbounds float, float* %b, i64 3
- %19 = load float* %18
+ %19 = load float, float* %18
%20 = fadd float %17, %19
%21 = getelementptr inbounds float, float* %c, i64 3
store float %20, float* %21
@@ -300,43 +300,43 @@ define void @load_reorder_float(float* nocapture %c, float* noalias nocapture re
; a[3] = (b[3]+c[3])+d[3];
; CHECK-LABEL: opcode_reorder
-; CHECK: load <4 x float>*
+; CHECK: load <4 x float>, <4 x float>*
; CHECK: fadd <4 x float>
define void @opcode_reorder(float* noalias nocapture %a, float* noalias nocapture readonly %b,
float* noalias nocapture readonly %c,float* noalias nocapture readonly %d){
- %1 = load float* %b
- %2 = load float* %c
+ %1 = load float, float* %b
+ %2 = load float, float* %c
%3 = fadd float %1, %2
- %4 = load float* %d
+ %4 = load float, float* %d
%5 = fadd float %3, %4
store float %5, float* %a
%6 = getelementptr inbounds float, float* %d, i64 1
- %7 = load float* %6
+ %7 = load float, float* %6
%8 = getelementptr inbounds float, float* %b, i64 1
- %9 = load float* %8
+ %9 = load float, float* %8
%10 = getelementptr inbounds float, float* %c, i64 1
- %11 = load float* %10
+ %11 = load float, float* %10
%12 = fadd float %9, %11
%13 = fadd float %7, %12
%14 = getelementptr inbounds float, float* %a, i64 1
store float %13, float* %14
%15 = getelementptr inbounds float, float* %b, i64 2
- %16 = load float* %15
+ %16 = load float, float* %15
%17 = getelementptr inbounds float, float* %c, i64 2
- %18 = load float* %17
+ %18 = load float, float* %17
%19 = fadd float %16, %18
%20 = getelementptr inbounds float, float* %d, i64 2
- %21 = load float* %20
+ %21 = load float, float* %20
%22 = fadd float %19, %21
%23 = getelementptr inbounds float, float* %a, i64 2
store float %22, float* %23
%24 = getelementptr inbounds float, float* %b, i64 3
- %25 = load float* %24
+ %25 = load float, float* %24
%26 = getelementptr inbounds float, float* %c, i64 3
- %27 = load float* %26
+ %27 = load float, float* %26
%28 = fadd float %25, %27
%29 = getelementptr inbounds float, float* %d, i64 3
- %30 = load float* %29
+ %30 = load float, float* %29
%31 = fadd float %28, %30
%32 = getelementptr inbounds float, float* %a, i64 3
store float %31, float* %32
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/opt.ll b/llvm/test/Transforms/SLPVectorizer/X86/opt.ll
index d6954caadc5..824e9992af0 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/opt.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/opt.ll
@@ -15,13 +15,13 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @test1(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/ordering.ll b/llvm/test/Transforms/SLPVectorizer/X86/ordering.ll
index d2ecd4546dd..0fa72c94c27 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/ordering.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/ordering.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @updateModelQPFrame(i32 %m_Bits) {
entry:
- %0 = load double* undef, align 8
+ %0 = load double, double* undef, align 8
%mul = fmul double undef, %0
%mul2 = fmul double undef, %mul
%mul4 = fmul double %0, %mul2
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi.ll b/llvm/test/Transforms/SLPVectorizer/X86/phi.ll
index 7654577c365..ef94467f509 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/phi.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/phi.ll
@@ -30,9 +30,9 @@ entry:
if.else: ; preds = %entry
%arrayidx = getelementptr inbounds double, double* %A, i64 10
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
%arrayidx1 = getelementptr inbounds double, double* %A, i64 11
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
br label %if.end
if.end: ; preds = %entry, %if.else
@@ -70,8 +70,8 @@ if.end: ; preds = %entry, %if.else
define i32 @foo2(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) #0 {
entry:
%arrayidx = getelementptr inbounds double, double* %A, i64 1
- %0 = load double* %arrayidx, align 8
- %1 = load double* %A, align 8
+ %0 = load double, double* %arrayidx, align 8
+ %1 = load double, double* %A, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -123,15 +123,15 @@ for.end: ; preds = %for.body
define float @foo3(float* nocapture readonly %A) #0 {
entry:
- %0 = load float* %A, align 4
+ %0 = load float, float* %A, align 4
%arrayidx1 = getelementptr inbounds float, float* %A, i64 1
- %1 = load float* %arrayidx1, align 4
+ %1 = load float, float* %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds float, float* %A, i64 2
- %2 = load float* %arrayidx2, align 4
+ %2 = load float, float* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds float, float* %A, i64 3
- %3 = load float* %arrayidx3, align 4
+ %3 = load float, float* %arrayidx3, align 4
%arrayidx4 = getelementptr inbounds float, float* %A, i64 4
- %4 = load float* %arrayidx4, align 4
+ %4 = load float, float* %arrayidx4, align 4
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -149,17 +149,17 @@ for.body: ; preds = %for.body, %entry
%add11 = fadd float %G.053, %mul10
%7 = add nsw i64 %indvars.iv, 2
%arrayidx14 = getelementptr inbounds float, float* %A, i64 %7
- %8 = load float* %arrayidx14, align 4
+ %8 = load float, float* %arrayidx14, align 4
%mul15 = fmul float %8, 9.000000e+00
%add16 = fadd float %B.054, %mul15
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
%arrayidx19 = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
- %9 = load float* %arrayidx19, align 4
+ %9 = load float, float* %arrayidx19, align 4
%mul20 = fmul float %9, 1.000000e+01
%add21 = fadd float %Y.055, %mul20
%10 = add nsw i64 %indvars.iv, 4
%arrayidx24 = getelementptr inbounds float, float* %A, i64 %10
- %11 = load float* %arrayidx24, align 4
+ %11 = load float, float* %arrayidx24, align 4
%mul25 = fmul float %11, 1.100000e+01
%add26 = fadd float %P.056, %mul25
%12 = trunc i64 %indvars.iv.next to i32
@@ -215,22 +215,22 @@ define void @test(x86_fp80* %i1, x86_fp80* %i2, x86_fp80* %o) {
; We disable the vectorization of x86_fp80 for now.
entry:
- %i1.0 = load x86_fp80* %i1, align 16
+ %i1.0 = load x86_fp80, x86_fp80* %i1, align 16
%i1.gep1 = getelementptr x86_fp80, x86_fp80* %i1, i64 1
- %i1.1 = load x86_fp80* %i1.gep1, align 16
-; CHECK: load x86_fp80*
-; CHECK: load x86_fp80*
+ %i1.1 = load x86_fp80, x86_fp80* %i1.gep1, align 16
+; CHECK: load x86_fp80, x86_fp80*
+; CHECK: load x86_fp80, x86_fp80*
; CHECK-NOT: insertelement <2 x x86_fp80>
; CHECK-NOT: insertelement <2 x x86_fp80>
br i1 undef, label %then, label %end
then:
%i2.gep0 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 0
- %i2.0 = load x86_fp80* %i2.gep0, align 16
+ %i2.0 = load x86_fp80, x86_fp80* %i2.gep0, align 16
%i2.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 1
- %i2.1 = load x86_fp80* %i2.gep1, align 16
-; CHECK: load x86_fp80*
-; CHECK: load x86_fp80*
+ %i2.1 = load x86_fp80, x86_fp80* %i2.gep1, align 16
+; CHECK: load x86_fp80, x86_fp80*
+; CHECK: load x86_fp80, x86_fp80*
; CHECK-NOT: insertelement <2 x x86_fp80>
; CHECK-NOT: insertelement <2 x x86_fp80>
br label %end
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi3.ll b/llvm/test/Transforms/SLPVectorizer/X86/phi3.ll
index fd8d3613720..61628301aec 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/phi3.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/phi3.ll
@@ -12,7 +12,7 @@ declare %struct.GPar.0.16.26* @Rf_gpptr(...)
define void @Rf_GReset() {
entry:
%sub = fsub double -0.000000e+00, undef
- %0 = load double* @d, align 8
+ %0 = load double, double* @d, align 8
%sub1 = fsub double -0.000000e+00, %0
br i1 icmp eq (%struct.GPar.0.16.26* (...)* inttoptr (i64 115 to %struct.GPar.0.16.26* (...)*), %struct.GPar.0.16.26* (...)* @Rf_gpptr), label %if.then, label %if.end7
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll b/llvm/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
index 3da83f9502f..fa08effcd64 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
@@ -12,22 +12,22 @@ define void @test(double* %i1, double* %i2, double* %o) {
; size is less than the alignment, and through various different GEP formations.
entry:
- %i1.0 = load double* %i1, align 16
+ %i1.0 = load double, double* %i1, align 16
%i1.gep1 = getelementptr double, double* %i1, i64 1
- %i1.1 = load double* %i1.gep1, align 16
-; CHECK: load double*
-; CHECK: load double*
+ %i1.1 = load double, double* %i1.gep1, align 16
+; CHECK: load double, double*
+; CHECK: load double, double*
; CHECK: insertelement <2 x double>
; CHECK: insertelement <2 x double>
br i1 undef, label %then, label %end
then:
%i2.gep0 = getelementptr inbounds double, double* %i2, i64 0
- %i2.0 = load double* %i2.gep0, align 16
+ %i2.0 = load double, double* %i2.gep0, align 16
%i2.gep1 = getelementptr inbounds double, double* %i2, i64 1
- %i2.1 = load double* %i2.gep1, align 16
-; CHECK: load double*
-; CHECK: load double*
+ %i2.1 = load double, double* %i2.gep1, align 16
+; CHECK: load double, double*
+; CHECK: load double, double*
; CHECK: insertelement <2 x double>
; CHECK: insertelement <2 x double>
br label %end
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/powof2div.ll b/llvm/test/Transforms/SLPVectorizer/X86/powof2div.ll
index 02512b33795..a97b870f3f4 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/powof2div.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/powof2div.ll
@@ -4,36 +4,36 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
;CHECK-LABEL: @powof2div(
-;CHECK: load <4 x i32>*
+;CHECK: load <4 x i32>, <4 x i32>*
;CHECK: add nsw <4 x i32>
;CHECK: sdiv <4 x i32>
define void @powof2div(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c){
entry:
- %0 = load i32* %b, align 4
- %1 = load i32* %c, align 4
+ %0 = load i32, i32* %b, align 4
+ %1 = load i32, i32* %c, align 4
%add = add nsw i32 %1, %0
%div = sdiv i32 %add, 2
store i32 %div, i32* %a, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1
- %3 = load i32* %arrayidx4, align 4
+ %3 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %3, %2
%div6 = sdiv i32 %add5, 2
%arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %div6, i32* %arrayidx7, align 4
%arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2
- %4 = load i32* %arrayidx8, align 4
+ %4 = load i32, i32* %arrayidx8, align 4
%arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
- %5 = load i32* %arrayidx9, align 4
+ %5 = load i32, i32* %arrayidx9, align 4
%add10 = add nsw i32 %5, %4
%div11 = sdiv i32 %add10, 2
%arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2
store i32 %div11, i32* %arrayidx12, align 4
%arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3
- %6 = load i32* %arrayidx13, align 4
+ %6 = load i32, i32* %arrayidx13, align 4
%arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3
- %7 = load i32* %arrayidx14, align 4
+ %7 = load i32, i32* %arrayidx14, align 4
%add15 = add nsw i32 %7, %6
%div16 = sdiv i32 %add15, 2
%arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr16628.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr16628.ll
index 3f9d775eeeb..c22ed34d33d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr16628.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr16628.ll
@@ -11,10 +11,10 @@ target triple = "x86_64-apple-macosx10.9.0"
define void @f() {
entry:
%call = tail call i32 (...)* @g()
- %0 = load i32* @c, align 4
+ %0 = load i32, i32* @c, align 4
%lnot = icmp eq i32 %0, 0
%lnot.ext = zext i1 %lnot to i32
- %1 = load i16* @a, align 2
+ %1 = load i16, i16* @a, align 2
%lnot2 = icmp eq i16 %1, 0
%lnot.ext3 = zext i1 %lnot2 to i32
%or = or i32 %lnot.ext3, %lnot.ext
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr16899.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr16899.ll
index 5fe038eb814..0de14ec3585 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr16899.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr16899.ll
@@ -7,10 +7,10 @@ target triple = "i386--netbsd"
; Function Attrs: noreturn nounwind readonly
define i32 @fn1() #0 {
entry:
- %0 = load i32** @a, align 4, !tbaa !4
- %1 = load i32* %0, align 4, !tbaa !5
+ %0 = load i32*, i32** @a, align 4, !tbaa !4
+ %1 = load i32, i32* %0, align 4, !tbaa !5
%arrayidx1 = getelementptr inbounds i32, i32* %0, i32 1
- %2 = load i32* %arrayidx1, align 4, !tbaa !5
+ %2 = load i32, i32* %arrayidx1, align 4, !tbaa !5
br label %do.body
do.body: ; preds = %do.body, %entry
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr19657.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr19657.ll
index f5e24674e8f..a687aec7610 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr19657.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr19657.ll
@@ -3,7 +3,7 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-;CHECK: load <2 x double>*
+;CHECK: load <2 x double>, <2 x double>*
;CHECK: fadd <2 x double>
;CHECK: store <2 x double>
@@ -11,60 +11,60 @@ target triple = "x86_64-unknown-linux-gnu"
define void @foo(double* %x) #0 {
%1 = alloca double*, align 8
store double* %x, double** %1, align 8
- %2 = load double** %1, align 8
+ %2 = load double*, double** %1, align 8
%3 = getelementptr inbounds double, double* %2, i64 0
- %4 = load double* %3, align 8
- %5 = load double** %1, align 8
+ %4 = load double, double* %3, align 8
+ %5 = load double*, double** %1, align 8
%6 = getelementptr inbounds double, double* %5, i64 0
- %7 = load double* %6, align 8
+ %7 = load double, double* %6, align 8
%8 = fadd double %4, %7
- %9 = load double** %1, align 8
+ %9 = load double*, double** %1, align 8
%10 = getelementptr inbounds double, double* %9, i64 0
- %11 = load double* %10, align 8
+ %11 = load double, double* %10, align 8
%12 = fadd double %8, %11
- %13 = load double** %1, align 8
+ %13 = load double*, double** %1, align 8
%14 = getelementptr inbounds double, double* %13, i64 0
store double %12, double* %14, align 8
- %15 = load double** %1, align 8
+ %15 = load double*, double** %1, align 8
%16 = getelementptr inbounds double, double* %15, i64 1
- %17 = load double* %16, align 8
- %18 = load double** %1, align 8
+ %17 = load double, double* %16, align 8
+ %18 = load double*, double** %1, align 8
%19 = getelementptr inbounds double, double* %18, i64 1
- %20 = load double* %19, align 8
+ %20 = load double, double* %19, align 8
%21 = fadd double %17, %20
- %22 = load double** %1, align 8
+ %22 = load double*, double** %1, align 8
%23 = getelementptr inbounds double, double* %22, i64 1
- %24 = load double* %23, align 8
+ %24 = load double, double* %23, align 8
%25 = fadd double %21, %24
- %26 = load double** %1, align 8
+ %26 = load double*, double** %1, align 8
%27 = getelementptr inbounds double, double* %26, i64 1
store double %25, double* %27, align 8
- %28 = load double** %1, align 8
+ %28 = load double*, double** %1, align 8
%29 = getelementptr inbounds double, double* %28, i64 2
- %30 = load double* %29, align 8
- %31 = load double** %1, align 8
+ %30 = load double, double* %29, align 8
+ %31 = load double*, double** %1, align 8
%32 = getelementptr inbounds double, double* %31, i64 2
- %33 = load double* %32, align 8
+ %33 = load double, double* %32, align 8
%34 = fadd double %30, %33
- %35 = load double** %1, align 8
+ %35 = load double*, double** %1, align 8
%36 = getelementptr inbounds double, double* %35, i64 2
- %37 = load double* %36, align 8
+ %37 = load double, double* %36, align 8
%38 = fadd double %34, %37
- %39 = load double** %1, align 8
+ %39 = load double*, double** %1, align 8
%40 = getelementptr inbounds double, double* %39, i64 2
store double %38, double* %40, align 8
- %41 = load double** %1, align 8
+ %41 = load double*, double** %1, align 8
%42 = getelementptr inbounds double, double* %41, i64 3
- %43 = load double* %42, align 8
- %44 = load double** %1, align 8
+ %43 = load double, double* %42, align 8
+ %44 = load double*, double** %1, align 8
%45 = getelementptr inbounds double, double* %44, i64 3
- %46 = load double* %45, align 8
+ %46 = load double, double* %45, align 8
%47 = fadd double %43, %46
- %48 = load double** %1, align 8
+ %48 = load double*, double** %1, align 8
%49 = getelementptr inbounds double, double* %48, i64 3
- %50 = load double* %49, align 8
+ %50 = load double, double* %49, align 8
%51 = fadd double %47, %50
- %52 = load double** %1, align 8
+ %52 = load double*, double** %1, align 8
%53 = getelementptr inbounds double, double* %52, i64 3
store double %51, double* %53, align 8
ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll b/llvm/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
index 49ddd9bf8d6..a3b0c8ff027 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
@@ -15,10 +15,10 @@ define void @exact(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = lshr exact i32 %load1, 1
%op2 = lshr exact i32 %load2, 1
@@ -41,10 +41,10 @@ define void @not_exact(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = lshr exact i32 %load1, 1
%op2 = lshr i32 %load2, 1
@@ -67,10 +67,10 @@ define void @nsw(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nsw i32 %load1, 1
%op2 = add nsw i32 %load2, 1
@@ -93,10 +93,10 @@ define void @not_nsw(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nsw i32 %load1, 1
%op2 = add nsw i32 %load2, 1
@@ -119,10 +119,10 @@ define void @nuw(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nuw i32 %load1, 1
%op2 = add nuw i32 %load2, 1
@@ -145,10 +145,10 @@ define void @not_nuw(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nuw i32 %load1, 1
%op2 = add i32 %load2, 1
@@ -171,10 +171,10 @@ define void @nnan(float* %x) {
%idx3 = getelementptr inbounds float, float* %x, i64 2
%idx4 = getelementptr inbounds float, float* %x, i64 3
- %load1 = load float* %idx1, align 4
- %load2 = load float* %idx2, align 4
- %load3 = load float* %idx3, align 4
- %load4 = load float* %idx4, align 4
+ %load1 = load float, float* %idx1, align 4
+ %load2 = load float, float* %idx2, align 4
+ %load3 = load float, float* %idx3, align 4
+ %load4 = load float, float* %idx4, align 4
%op1 = fadd fast nnan float %load1, 1.0
%op2 = fadd nnan ninf float %load2, 1.0
@@ -197,10 +197,10 @@ define void @not_nnan(float* %x) {
%idx3 = getelementptr inbounds float, float* %x, i64 2
%idx4 = getelementptr inbounds float, float* %x, i64 3
- %load1 = load float* %idx1, align 4
- %load2 = load float* %idx2, align 4
- %load3 = load float* %idx3, align 4
- %load4 = load float* %idx4, align 4
+ %load1 = load float, float* %idx1, align 4
+ %load2 = load float, float* %idx2, align 4
+ %load3 = load float, float* %idx3, align 4
+ %load4 = load float, float* %idx4, align 4
%op1 = fadd nnan float %load1, 1.0
%op2 = fadd ninf float %load2, 1.0
@@ -223,10 +223,10 @@ define void @only_fast(float* %x) {
%idx3 = getelementptr inbounds float, float* %x, i64 2
%idx4 = getelementptr inbounds float, float* %x, i64 3
- %load1 = load float* %idx1, align 4
- %load2 = load float* %idx2, align 4
- %load3 = load float* %idx3, align 4
- %load4 = load float* %idx4, align 4
+ %load1 = load float, float* %idx1, align 4
+ %load2 = load float, float* %idx2, align 4
+ %load3 = load float, float* %idx3, align 4
+ %load4 = load float, float* %idx4, align 4
%op1 = fadd fast nnan float %load1, 1.0
%op2 = fadd fast nnan ninf float %load2, 1.0
@@ -249,10 +249,10 @@ define void @only_arcp(float* %x) {
%idx3 = getelementptr inbounds float, float* %x, i64 2
%idx4 = getelementptr inbounds float, float* %x, i64 3
- %load1 = load float* %idx1, align 4
- %load2 = load float* %idx2, align 4
- %load3 = load float* %idx3, align 4
- %load4 = load float* %idx4, align 4
+ %load1 = load float, float* %idx1, align 4
+ %load2 = load float, float* %idx2, align 4
+ %load3 = load float, float* %idx3, align 4
+ %load4 = load float, float* %idx4, align 4
%op1 = fadd fast float %load1, 1.0
%op2 = fadd fast float %load2, 1.0
@@ -276,10 +276,10 @@ define void @addsub_all_nsw(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nsw i32 %load1, 1
%op2 = sub nsw i32 %load2, 1
@@ -303,10 +303,10 @@ define void @addsub_some_nsw(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add nsw i32 %load1, 1
%op2 = sub nsw i32 %load2, 1
@@ -330,10 +330,10 @@ define void @addsub_no_nsw(i32* %x) {
%idx3 = getelementptr inbounds i32, i32* %x, i64 2
%idx4 = getelementptr inbounds i32, i32* %x, i64 3
- %load1 = load i32* %idx1, align 4
- %load2 = load i32* %idx2, align 4
- %load3 = load i32* %idx3, align 4
- %load4 = load i32* %idx4, align 4
+ %load1 = load i32, i32* %idx1, align 4
+ %load2 = load i32, i32* %idx2, align 4
+ %load3 = load i32, i32* %idx3, align 4
+ %load4 = load i32, i32* %idx4, align 4
%op1 = add i32 %load1, 1
%op2 = sub nsw i32 %load2, 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll
index 1dc63563c02..4c5f1266336 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction.ll
@@ -24,11 +24,11 @@ for.body: ; preds = %entry, %for.body
%sum.014 = phi double [ %add6, %for.body ], [ 0.000000e+00, %entry ]
%mul = shl nsw i32 %i.015, 1
%arrayidx = getelementptr inbounds double, double* %A, i32 %mul
- %0 = load double* %arrayidx, align 4
+ %0 = load double, double* %arrayidx, align 4
%mul1 = fmul double %0, 7.000000e+00
%add12 = or i32 %mul, 1
%arrayidx3 = getelementptr inbounds double, double* %A, i32 %add12
- %1 = load double* %arrayidx3, align 4
+ %1 = load double, double* %arrayidx3, align 4
%mul4 = fmul double %1, 7.000000e+00
%add5 = fadd double %mul1, %mul4
%add6 = fadd double %sum.014, %add5
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
index cd3175c41d5..507a61aa16f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction2.ll
@@ -14,12 +14,12 @@ define double @foo(double* nocapture %D) {
%sum.01 = phi double [ 0.000000e+00, %0 ], [ %9, %1 ]
%2 = shl nsw i32 %i.02, 1
%3 = getelementptr inbounds double, double* %D, i32 %2
- %4 = load double* %3, align 4
+ %4 = load double, double* %3, align 4
%A4 = fmul double %4, %4
%A42 = fmul double %A4, %A4
%5 = or i32 %2, 1
%6 = getelementptr inbounds double, double* %D, i32 %5
- %7 = load double* %6, align 4
+ %7 = load double, double* %6, align 4
%A7 = fmul double %7, %7
%A72 = fmul double %A7, %A7
%8 = fadd double %A42, %A72
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/return.ll b/llvm/test/Transforms/SLPVectorizer/X86/return.ll
index 13a6cf4df66..242edf5885f 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/return.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/return.ll
@@ -13,17 +13,17 @@ target triple = "x86_64--linux-gnu"
; }
; CHECK-LABEL: @return1
-; CHECK: %0 = load <2 x double>*
-; CHECK: %1 = load <2 x double>*
+; CHECK: %0 = load <2 x double>, <2 x double>*
+; CHECK: %1 = load <2 x double>, <2 x double>*
; CHECK: %2 = fadd <2 x double>
define double @return1() {
entry:
- %a0 = load double* getelementptr inbounds ([4 x double]* @a, i32 0, i32 0), align 8
- %b0 = load double* getelementptr inbounds ([4 x double]* @b, i32 0, i32 0), align 8
+ %a0 = load double, double* getelementptr inbounds ([4 x double]* @a, i32 0, i32 0), align 8
+ %b0 = load double, double* getelementptr inbounds ([4 x double]* @b, i32 0, i32 0), align 8
%add0 = fadd double %a0, %b0
- %a1 = load double* getelementptr inbounds ([4 x double]* @a, i32 0, i32 1), align 8
- %b1 = load double* getelementptr inbounds ([4 x double]* @b, i32 0, i32 1), align 8
+ %a1 = load double, double* getelementptr inbounds ([4 x double]* @a, i32 0, i32 1), align 8
+ %b1 = load double, double* getelementptr inbounds ([4 x double]* @b, i32 0, i32 1), align 8
%add1 = fadd double %a1, %b1
%add2 = fadd double %add0, %add1
ret double %add2
@@ -34,20 +34,20 @@ entry:
; }
; CHECK-LABEL: @return2
-; CHECK: %1 = load <2 x double>*
-; CHECK: %3 = load <2 x double>* %2
+; CHECK: %1 = load <2 x double>, <2 x double>*
+; CHECK: %3 = load <2 x double>, <2 x double>* %2
; CHECK: %4 = fadd <2 x double> %1, %3
define double @return2(double* nocapture readonly %x) {
entry:
- %x0 = load double* %x, align 4
+ %x0 = load double, double* %x, align 4
%arrayidx1 = getelementptr inbounds double, double* %x, i32 2
- %x2 = load double* %arrayidx1, align 4
+ %x2 = load double, double* %arrayidx1, align 4
%add3 = fadd double %x0, %x2
%arrayidx2 = getelementptr inbounds double, double* %x, i32 1
- %x1 = load double* %arrayidx2, align 4
+ %x1 = load double, double* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds double, double* %x, i32 3
- %x3 = load double* %arrayidx3, align 4
+ %x3 = load double, double* %arrayidx3, align 4
%add4 = fadd double %x1, %x3
%add5 = fadd double %add3, %add4
ret double %add5
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/rgb_phi.ll b/llvm/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
index 2a3cc6dc40f..0bdb7dab172 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
@@ -33,11 +33,11 @@ target triple = "i386-apple-macosx10.9.0"
define float @foo(float* nocapture readonly %A) {
entry:
- %0 = load float* %A, align 4
+ %0 = load float, float* %A, align 4
%arrayidx1 = getelementptr inbounds float, float* %A, i64 1
- %1 = load float* %arrayidx1, align 4
+ %1 = load float, float* %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds float, float* %A, i64 2
- %2 = load float* %arrayidx2, align 4
+ %2 = load float, float* %arrayidx2, align 4
br label %for.body
for.body: ; preds = %for.body.for.body_crit_edge, %entry
@@ -50,12 +50,12 @@ for.body: ; preds = %for.body.for.body_c
%add4 = fadd float %R.030, %mul
%4 = add nsw i64 %indvars.iv, 1
%arrayidx7 = getelementptr inbounds float, float* %A, i64 %4
- %5 = load float* %arrayidx7, align 4
+ %5 = load float, float* %arrayidx7, align 4
%mul8 = fmul float %5, 8.000000e+00
%add9 = fadd float %G.031, %mul8
%6 = add nsw i64 %indvars.iv, 2
%arrayidx12 = getelementptr inbounds float, float* %A, i64 %6
- %7 = load float* %arrayidx12, align 4
+ %7 = load float, float* %arrayidx12, align 4
%mul13 = fmul float %7, 9.000000e+00
%add14 = fadd float %B.032, %mul13
%indvars.iv.next = add i64 %indvars.iv, 3
@@ -65,7 +65,7 @@ for.body: ; preds = %for.body.for.body_c
for.body.for.body_crit_edge: ; preds = %for.body
%arrayidx3.phi.trans.insert = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
- %.pre = load float* %arrayidx3.phi.trans.insert, align 4
+ %.pre = load float, float* %arrayidx3.phi.trans.insert, align 4
br label %for.body
for.end: ; preds = %for.body
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/saxpy.ll b/llvm/test/Transforms/SLPVectorizer/X86/saxpy.ll
index da2654ad4bd..a9ca093c0cd 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/saxpy.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/saxpy.ll
@@ -10,34 +10,34 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a, i64 %i) {
%1 = getelementptr inbounds i32, i32* %x, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = mul nsw i32 %2, %a
%4 = getelementptr inbounds i32, i32* %y, i64 %i
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = add nsw i32 %3, %5
store i32 %6, i32* %1, align 4
%7 = add i64 %i, 1
%8 = getelementptr inbounds i32, i32* %x, i64 %7
- %9 = load i32* %8, align 4
+ %9 = load i32, i32* %8, align 4
%10 = mul nsw i32 %9, %a
%11 = getelementptr inbounds i32, i32* %y, i64 %7
- %12 = load i32* %11, align 4
+ %12 = load i32, i32* %11, align 4
%13 = add nsw i32 %10, %12
store i32 %13, i32* %8, align 4
%14 = add i64 %i, 2
%15 = getelementptr inbounds i32, i32* %x, i64 %14
- %16 = load i32* %15, align 4
+ %16 = load i32, i32* %15, align 4
%17 = mul nsw i32 %16, %a
%18 = getelementptr inbounds i32, i32* %y, i64 %14
- %19 = load i32* %18, align 4
+ %19 = load i32, i32* %18, align 4
%20 = add nsw i32 %17, %19
store i32 %20, i32* %15, align 4
%21 = add i64 %i, 3
%22 = getelementptr inbounds i32, i32* %x, i64 %21
- %23 = load i32* %22, align 4
+ %23 = load i32, i32* %22, align 4
%24 = mul nsw i32 %23, %a
%25 = getelementptr inbounds i32, i32* %y, i64 %21
- %26 = load i32* %25, align 4
+ %26 = load i32, i32* %25, align 4
%27 = add nsw i32 %24, %26
store i32 %27, i32* %22, align 4
ret void
@@ -48,13 +48,13 @@ define void @SAXPY_crash(i32* noalias nocapture %x, i32* noalias nocapture %y, i
%1 = add i64 %i, 1
%2 = getelementptr inbounds i32, i32* %x, i64 %1
%3 = getelementptr inbounds i32, i32* %y, i64 %1
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
%5 = add nsw i32 undef, %4
store i32 %5, i32* %2, align 4
%6 = add i64 %i, 2
%7 = getelementptr inbounds i32, i32* %x, i64 %6
%8 = getelementptr inbounds i32, i32* %y, i64 %6
- %9 = load i32* %8, align 4
+ %9 = load i32, i32* %8, align 4
%10 = add nsw i32 undef, %9
store i32 %10, i32* %7, align 4
ret void
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll b/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll
index 9e23a6a42bf..33bdc6a6b04 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll
@@ -25,40 +25,40 @@ for.body: ; preds = %for.body, %entry
%a.088 = phi i32 [ 0, %entry ], [ %add52, %for.body ]
%1 = shl i64 %indvars.iv, 3
%arrayidx = getelementptr inbounds i32, i32* %diff, i64 %1
- %2 = load i32* %arrayidx, align 4
+ %2 = load i32, i32* %arrayidx, align 4
%3 = or i64 %1, 4
%arrayidx2 = getelementptr inbounds i32, i32* %diff, i64 %3
- %4 = load i32* %arrayidx2, align 4
+ %4 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %4, %2
%arrayidx6 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
store i32 %add3, i32* %arrayidx6, align 16
%add10 = add nsw i32 %add3, %a.088
%5 = or i64 %1, 1
%arrayidx13 = getelementptr inbounds i32, i32* %diff, i64 %5
- %6 = load i32* %arrayidx13, align 4
+ %6 = load i32, i32* %arrayidx13, align 4
%7 = or i64 %1, 5
%arrayidx16 = getelementptr inbounds i32, i32* %diff, i64 %7
- %8 = load i32* %arrayidx16, align 4
+ %8 = load i32, i32* %arrayidx16, align 4
%add17 = add nsw i32 %8, %6
%arrayidx20 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
store i32 %add17, i32* %arrayidx20, align 4
%add24 = add nsw i32 %add10, %add17
%9 = or i64 %1, 2
%arrayidx27 = getelementptr inbounds i32, i32* %diff, i64 %9
- %10 = load i32* %arrayidx27, align 4
+ %10 = load i32, i32* %arrayidx27, align 4
%11 = or i64 %1, 6
%arrayidx30 = getelementptr inbounds i32, i32* %diff, i64 %11
- %12 = load i32* %arrayidx30, align 4
+ %12 = load i32, i32* %arrayidx30, align 4
%add31 = add nsw i32 %12, %10
%arrayidx34 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
store i32 %add31, i32* %arrayidx34, align 8
%add38 = add nsw i32 %add24, %add31
%13 = or i64 %1, 3
%arrayidx41 = getelementptr inbounds i32, i32* %diff, i64 %13
- %14 = load i32* %arrayidx41, align 4
+ %14 = load i32, i32* %arrayidx41, align 4
%15 = or i64 %1, 7
%arrayidx44 = getelementptr inbounds i32, i32* %diff, i64 %15
- %16 = load i32* %arrayidx44, align 4
+ %16 = load i32, i32* %arrayidx44, align 4
%add45 = add nsw i32 %16, %14
%arrayidx48 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
store i32 %add45, i32* %arrayidx48, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll b/llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll
index ccb165fec66..c9bb88497ac 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/simple-loop.ll
@@ -13,16 +13,16 @@ define i32 @rollable(i32* noalias nocapture %in, i32* noalias nocapture %out, i6
%2 = shl i64 %i.019, 2
%3 = getelementptr inbounds i32, i32* %in, i64 %2
;CHECK:load <4 x i32>
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
%5 = or i64 %2, 1
%6 = getelementptr inbounds i32, i32* %in, i64 %5
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
%8 = or i64 %2, 2
%9 = getelementptr inbounds i32, i32* %in, i64 %8
- %10 = load i32* %9, align 4
+ %10 = load i32, i32* %9, align 4
%11 = or i64 %2, 3
%12 = getelementptr inbounds i32, i32* %in, i64 %11
- %13 = load i32* %12, align 4
+ %13 = load i32, i32* %12, align 4
;CHECK:mul <4 x i32>
%14 = mul i32 %4, 7
;CHECK:add <4 x i32>
@@ -62,16 +62,16 @@ define i32 @unrollable(i32* %in, i32* %out, i64 %n) nounwind ssp uwtable {
%i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
%2 = shl i64 %i.019, 2
%3 = getelementptr inbounds i32, i32* %in, i64 %2
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
%5 = or i64 %2, 1
%6 = getelementptr inbounds i32, i32* %in, i64 %5
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
%8 = or i64 %2, 2
%9 = getelementptr inbounds i32, i32* %in, i64 %8
- %10 = load i32* %9, align 4
+ %10 = load i32, i32* %9, align 4
%11 = or i64 %2, 3
%12 = getelementptr inbounds i32, i32* %in, i64 %11
- %13 = load i32* %12, align 4
+ %13 = load i32, i32* %12, align 4
%14 = mul i32 %4, 7
%15 = add i32 %14, 7
%16 = mul i32 %7, 7
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll b/llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll
index 83c0e82e990..a5d9ad9685c 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll
@@ -9,13 +9,13 @@ target triple = "x86_64-apple-macosx10.8.0"
; CHECK: ret
define void @test1(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
@@ -29,13 +29,13 @@ entry:
; CHECK: ret
define void @test2(double* %a, double* %b, i8* %e) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%c = bitcast i8* %e to double*
store double %mul, double* %c, align 8
@@ -52,13 +52,13 @@ entry:
; CHECK: ret
define void @test_volatile_load(double* %a, double* %b, double* %c) {
entry:
- %i0 = load volatile double* %a, align 8
- %i1 = load volatile double* %b, align 8
+ %i0 = load volatile double, double* %a, align 8
+ %i1 = load volatile double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
@@ -72,13 +72,13 @@ entry:
; CHECK: ret
define void @test_volatile_store(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store volatile double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/tiny-tree.ll b/llvm/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
index cbce6877a86..6c93222ef93 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
@@ -17,10 +17,10 @@ for.body: ; preds = %entry, %for.body
%i.015 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%dst.addr.014 = phi double* [ %add.ptr4, %for.body ], [ %dst, %entry ]
%src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
- %0 = load double* %src.addr.013, align 8
+ %0 = load double, double* %src.addr.013, align 8
store double %0, double* %dst.addr.014, align 8
%arrayidx2 = getelementptr inbounds double, double* %src.addr.013, i64 1
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %dst.addr.014, i64 1
store double %1, double* %arrayidx3, align 8
%add.ptr = getelementptr inbounds double, double* %src.addr.013, i64 %i.015
@@ -47,18 +47,18 @@ for.body: ; preds = %entry, %for.body
%i.023 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%dst.addr.022 = phi float* [ %add.ptr8, %for.body ], [ %dst, %entry ]
%src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
- %0 = load float* %src.addr.021, align 4
+ %0 = load float, float* %src.addr.021, align 4
store float %0, float* %dst.addr.022, align 4
%arrayidx2 = getelementptr inbounds float, float* %src.addr.021, i64 1
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds float, float* %dst.addr.022, i64 1
store float %1, float* %arrayidx3, align 4
%arrayidx4 = getelementptr inbounds float, float* %src.addr.021, i64 2
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds float, float* %dst.addr.022, i64 2
store float %2, float* %arrayidx5, align 4
%arrayidx6 = getelementptr inbounds float, float* %src.addr.021, i64 3
- %3 = load float* %arrayidx6, align 4
+ %3 = load float, float* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds float, float* %dst.addr.022, i64 3
store float %3, float* %arrayidx7, align 4
%add.ptr = getelementptr inbounds float, float* %src.addr.021, i64 %i.023
@@ -85,10 +85,10 @@ for.body: ; preds = %entry, %for.body
%i.015 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%dst.addr.014 = phi double* [ %add.ptr4, %for.body ], [ %dst, %entry ]
%src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
- %0 = load double* %src.addr.013, align 8
+ %0 = load double, double* %src.addr.013, align 8
store double %0, double* %dst.addr.014, align 8
%arrayidx2 = getelementptr inbounds double, double* %src.addr.013, i64 2
- %1 = load double* %arrayidx2, align 8
+ %1 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %dst.addr.014, i64 1
store double %1, double* %arrayidx3, align 8
%add.ptr = getelementptr inbounds double, double* %src.addr.013, i64 %i.015
@@ -115,18 +115,18 @@ for.body: ; preds = %entry, %for.body
%i.023 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%dst.addr.022 = phi float* [ %add.ptr8, %for.body ], [ %dst, %entry ]
%src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
- %0 = load float* %src.addr.021, align 4
+ %0 = load float, float* %src.addr.021, align 4
store float %0, float* %dst.addr.022, align 4
%arrayidx2 = getelementptr inbounds float, float* %src.addr.021, i64 4
- %1 = load float* %arrayidx2, align 4
+ %1 = load float, float* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds float, float* %dst.addr.022, i64 1
store float %1, float* %arrayidx3, align 4
%arrayidx4 = getelementptr inbounds float, float* %src.addr.021, i64 2
- %2 = load float* %arrayidx4, align 4
+ %2 = load float, float* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds float, float* %dst.addr.022, i64 2
store float %2, float* %arrayidx5, align 4
%arrayidx6 = getelementptr inbounds float, float* %src.addr.021, i64 3
- %3 = load float* %arrayidx6, align 4
+ %3 = load float, float* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds float, float* %dst.addr.022, i64 3
store float %3, float* %arrayidx7, align 4
%add.ptr = getelementptr inbounds float, float* %src.addr.021, i64 %i.023
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/unreachable.ll b/llvm/test/Transforms/SLPVectorizer/X86/unreachable.ll
index b1d23e9cc0e..f29f69d7e82 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/unreachable.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/unreachable.ll
@@ -12,15 +12,15 @@ entry:
bb1: ; an unreachable block
%t3 = getelementptr inbounds i32, i32* %x, i64 4
- %t4 = load i32* %t3, align 4
+ %t4 = load i32, i32* %t3, align 4
%t5 = getelementptr inbounds i32, i32* %x, i64 5
- %t6 = load i32* %t5, align 4
+ %t6 = load i32, i32* %t5, align 4
%bad = fadd float %bad, 0.000000e+00 ; <- an instruction with self dependency,
; but legal in unreachable code
%t7 = getelementptr inbounds i32, i32* %x, i64 6
- %t8 = load i32* %t7, align 4
+ %t8 = load i32, i32* %t7, align 4
%t9 = getelementptr inbounds i32, i32* %x, i64 7
- %t10 = load i32* %t9, align 4
+ %t10 = load i32, i32* %t9, align 4
br label %bb2
bb2:
diff --git a/llvm/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll b/llvm/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll
index cca309bd87f..efd5386f520 100644
--- a/llvm/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll
+++ b/llvm/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll
@@ -8,13 +8,13 @@ target triple = "xcore"
; CHECK-NOT: <2 x double>
define void @test1(double* %a, double* %b, double* %c) {
entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
+ %i0 = load double, double* %a, align 8
+ %i1 = load double, double* %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
+ %i3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
+ %i4 = load double, double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
%arrayidx5 = getelementptr inbounds double, double* %c, i64 1
diff --git a/llvm/test/Transforms/SROA/address-spaces.ll b/llvm/test/Transforms/SROA/address-spaces.ll
index c709834daa4..004695dcedd 100644
--- a/llvm/test/Transforms/SROA/address-spaces.ll
+++ b/llvm/test/Transforms/SROA/address-spaces.ll
@@ -10,7 +10,7 @@ declare void @llvm.memcpy.p1i8.p1i8.i32(i8 addrspace(1)* nocapture, i8 addrspace
; Make sure an illegal bitcast isn't introduced
define void @test_address_space_1_1(<2 x i64> addrspace(1)* %a, i16 addrspace(1)* %b) {
; CHECK-LABEL: @test_address_space_1_1(
-; CHECK: load <2 x i64> addrspace(1)* %a, align 2
+; CHECK: load <2 x i64>, <2 x i64> addrspace(1)* %a, align 2
; CHECK: store <2 x i64> {{.*}}, <2 x i64> addrspace(1)* {{.*}}, align 2
; CHECK: ret void
%aa = alloca <2 x i64>, align 16
@@ -24,7 +24,7 @@ define void @test_address_space_1_1(<2 x i64> addrspace(1)* %a, i16 addrspace(1)
define void @test_address_space_1_0(<2 x i64> addrspace(1)* %a, i16* %b) {
; CHECK-LABEL: @test_address_space_1_0(
-; CHECK: load <2 x i64> addrspace(1)* %a, align 2
+; CHECK: load <2 x i64>, <2 x i64> addrspace(1)* %a, align 2
; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
; CHECK: ret void
%aa = alloca <2 x i64>, align 16
@@ -38,7 +38,7 @@ define void @test_address_space_1_0(<2 x i64> addrspace(1)* %a, i16* %b) {
define void @test_address_space_0_1(<2 x i64>* %a, i16 addrspace(1)* %b) {
; CHECK-LABEL: @test_address_space_0_1(
-; CHECK: load <2 x i64>* %a, align 2
+; CHECK: load <2 x i64>, <2 x i64>* %a, align 2
; CHECK: store <2 x i64> {{.*}}, <2 x i64> addrspace(1)* {{.*}}, align 2
; CHECK: ret void
%aa = alloca <2 x i64>, align 16
diff --git a/llvm/test/Transforms/SROA/alignment.ll b/llvm/test/Transforms/SROA/alignment.ll
index e631bbac266..455d14246e5 100644
--- a/llvm/test/Transforms/SROA/alignment.ll
+++ b/llvm/test/Transforms/SROA/alignment.ll
@@ -6,9 +6,9 @@ declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1)
define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
; CHECK-LABEL: @test1(
; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 0
-; CHECK: %[[a0:.*]] = load i8* %[[gep_a0]], align 16
+; CHECK: %[[a0:.*]] = load i8, i8* %[[gep_a0]], align 16
; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 1
-; CHECK: %[[a1:.*]] = load i8* %[[gep_a1]], align 1
+; CHECK: %[[a1:.*]] = load i8, i8* %[[gep_a1]], align 1
; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 0
; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 1
@@ -31,7 +31,7 @@ entry:
define void @test2() {
; CHECK-LABEL: @test2(
; CHECK: alloca i16
-; CHECK: load i8* %{{.*}}
+; CHECK: load i8, i8* %{{.*}}
; CHECK: store i8 42, i8* %{{.*}}
; CHECK: ret void
@@ -41,7 +41,7 @@ entry:
%cast1 = bitcast i8* %gep1 to i16*
store volatile i16 0, i16* %cast1
%gep2 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 2
- %result = load i8* %gep2
+ %result = load i8, i8* %gep2
store i8 42, i8* %gep2
ret void
}
@@ -49,7 +49,7 @@ entry:
define void @PR13920(<2 x i64>* %a, i16* %b) {
; Test that alignments on memcpy intrinsics get propagated to loads and stores.
; CHECK-LABEL: @PR13920(
-; CHECK: load <2 x i64>* %a, align 2
+; CHECK: load <2 x i64>, <2 x i64>* %a, align 2
; CHECK: store <2 x i64> {{.*}}, <2 x i64>* {{.*}}, align 2
; CHECK: ret void
@@ -93,10 +93,10 @@ define void @test5() {
; CHECK: alloca [9 x i8]
; CHECK: alloca [9 x i8]
; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
-; CHECK: load volatile i16* %{{.*}}, align 1
-; CHECK: load double* %{{.*}}, align 1
+; CHECK: load volatile i16, i16* %{{.*}}, align 1
+; CHECK: load double, double* %{{.*}}, align 1
; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
-; CHECK: load volatile i16* %{{.*}}, align 1
+; CHECK: load volatile i16, i16* %{{.*}}, align 1
; CHECK: ret void
entry:
@@ -106,15 +106,15 @@ entry:
store volatile double 0.0, double* %ptr1, align 1
%weird_gep1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 7
%weird_cast1 = bitcast i8* %weird_gep1 to i16*
- %weird_load1 = load volatile i16* %weird_cast1, align 1
+ %weird_load1 = load volatile i16, i16* %weird_cast1, align 1
%raw2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 9
%ptr2 = bitcast i8* %raw2 to double*
- %d1 = load double* %ptr1, align 1
+ %d1 = load double, double* %ptr1, align 1
store volatile double %d1, double* %ptr2, align 1
%weird_gep2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 16
%weird_cast2 = bitcast i8* %weird_gep2 to i16*
- %weird_load2 = load volatile i16* %weird_cast2, align 1
+ %weird_load2 = load volatile i16, i16* %weird_cast2, align 1
ret void
}
@@ -136,7 +136,7 @@ entry:
%raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
%ptr2 = bitcast i8* %raw2 to double*
- %val = load double* %ptr1, align 1
+ %val = load double, double* %ptr1, align 1
store volatile double %val, double* %ptr2, align 1
ret void
@@ -156,11 +156,11 @@ entry:
%ptr2 = bitcast i8* %raw2 to double*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i32 0, i1 false)
-; CHECK: %[[val2:.*]] = load double* %{{.*}}, align 1
-; CHECK: %[[val1:.*]] = load double* %{{.*}}, align 1
+; CHECK: %[[val2:.*]] = load double, double* %{{.*}}, align 1
+; CHECK: %[[val1:.*]] = load double, double* %{{.*}}, align 1
- %val1 = load double* %ptr2, align 1
- %val2 = load double* %ptr1, align 1
+ %val1 = load double, double* %ptr2, align 1
+ %val2 = load double, double* %ptr1, align 1
store double %val1, double* %ptr1, align 1
store double %val2, double* %ptr2, align 1
diff --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll
index f6b1c2787ee..a59192d718c 100644
--- a/llvm/test/Transforms/SROA/basictest.ll
+++ b/llvm/test/Transforms/SROA/basictest.ll
@@ -19,7 +19,7 @@ entry:
call void @llvm.lifetime.start(i64 4, i8* %a1.i8)
store i32 0, i32* %a1
- %v1 = load i32* %a1
+ %v1 = load i32, i32* %a1
call void @llvm.lifetime.end(i64 4, i8* %a1.i8)
@@ -27,7 +27,7 @@ entry:
call void @llvm.lifetime.start(i64 4, i8* %a2.i8)
store float 0.0, float* %a2
- %v2 = load float * %a2
+ %v2 = load float , float * %a2
%v2.int = bitcast float %v2 to i32
%sum1 = add i32 %v1, %v2.int
@@ -45,7 +45,7 @@ entry:
%X = alloca { i32, float }
%Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0
store i32 0, i32* %Y
- %Z = load i32* %Y
+ %Z = load i32, i32* %Y
ret i32 %Z
}
@@ -61,7 +61,7 @@ entry:
br label %L2
L2:
- %Z = load i64* %B
+ %Z = load i64, i64* %B
ret i64 %Z
}
@@ -84,7 +84,7 @@ entry:
; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 42
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
-; CHECK-NEXT: %[[test3_r1:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[test3_r1:.*]] = load i8, i8* %[[gep]]
; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
@@ -98,7 +98,7 @@ entry:
; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 207
-; CHECK-NEXT: %[[test3_r2:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[test3_r2:.*]] = load i8, i8* %[[gep]]
; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 208
; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
@@ -320,9 +320,9 @@ entry:
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 20
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 20
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
-; CHECK-NEXT: %[[test4_r1:.*]] = load i16* %[[bitcast]]
+; CHECK-NEXT: %[[test4_r1:.*]] = load i16, i16* %[[bitcast]]
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 22
-; CHECK-NEXT: %[[test4_r2:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[test4_r2:.*]] = load i8, i8* %[[gep]]
; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 23
; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
@@ -331,17 +331,17 @@ entry:
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 40
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
-; CHECK-NEXT: %[[test4_r3:.*]] = load i16* %[[bitcast]]
+; CHECK-NEXT: %[[test4_r3:.*]] = load i16, i16* %[[bitcast]]
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
-; CHECK-NEXT: %[[test4_r4:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[test4_r4:.*]] = load i8, i8* %[[gep]]
; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 50
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
-; CHECK-NEXT: %[[test4_r5:.*]] = load i16* %[[bitcast]]
+; CHECK-NEXT: %[[test4_r5:.*]] = load i16, i16* %[[bitcast]]
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 52
-; CHECK-NEXT: %[[test4_r6:.*]] = load i8* %[[gep]]
+; CHECK-NEXT: %[[test4_r6:.*]] = load i8, i8* %[[gep]]
; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 53
; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
@@ -422,7 +422,7 @@ entry:
store float 0.0, float* %fptr
%ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
%iptr = bitcast i8* %ptr to i16*
- %val = load i16* %iptr
+ %val = load i16, i16* %iptr
ret i16 %val
}
@@ -430,7 +430,7 @@ define i32 @test6() {
; CHECK-LABEL: @test6(
; CHECK: alloca i32
; CHECK-NEXT: store volatile i32
-; CHECK-NEXT: load i32*
+; CHECK-NEXT: load i32, i32*
; CHECK-NEXT: ret i32
entry:
@@ -438,7 +438,7 @@ entry:
%ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %ptr, i8 42, i32 4, i32 1, i1 true)
%iptr = bitcast i8* %ptr to i32*
- %val = load i32* %iptr
+ %val = load i32, i32* %iptr
ret i32 %val
}
@@ -446,10 +446,10 @@ define void @test7(i8* %src, i8* %dst) {
; CHECK-LABEL: @test7(
; CHECK: alloca i32
; CHECK-NEXT: bitcast i8* %src to i32*
-; CHECK-NEXT: load volatile i32*
+; CHECK-NEXT: load volatile i32, i32*
; CHECK-NEXT: store volatile i32
; CHECK-NEXT: bitcast i8* %dst to i32*
-; CHECK-NEXT: load volatile i32*
+; CHECK-NEXT: load volatile i32, i32*
; CHECK-NEXT: store volatile i32
; CHECK-NEXT: ret
@@ -472,27 +472,27 @@ entry:
; CHECK-NOT: alloca
%s2.next.ptr = getelementptr %S2, %S2* %s2, i64 0, i32 1
- %s2.next = load %S2** %s2.next.ptr
+ %s2.next = load %S2*, %S2** %s2.next.ptr
; CHECK: %[[gep:.*]] = getelementptr %S2, %S2* %s2, i64 0, i32 1
-; CHECK-NEXT: %[[next:.*]] = load %S2** %[[gep]]
+; CHECK-NEXT: %[[next:.*]] = load %S2*, %S2** %[[gep]]
%s2.next.s1.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 0
- %s2.next.s1 = load %S1** %s2.next.s1.ptr
+ %s2.next.s1 = load %S1*, %S1** %s2.next.s1.ptr
%new.s1.ptr = getelementptr %S2, %S2* %new, i64 0, i32 0
store %S1* %s2.next.s1, %S1** %new.s1.ptr
%s2.next.next.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 1
- %s2.next.next = load %S2** %s2.next.next.ptr
+ %s2.next.next = load %S2*, %S2** %s2.next.next.ptr
%new.next.ptr = getelementptr %S2, %S2* %new, i64 0, i32 1
store %S2* %s2.next.next, %S2** %new.next.ptr
; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 0
-; CHECK-NEXT: %[[next_s1:.*]] = load %S1** %[[gep]]
+; CHECK-NEXT: %[[next_s1:.*]] = load %S1*, %S1** %[[gep]]
; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 1
-; CHECK-NEXT: %[[next_next:.*]] = load %S2** %[[gep]]
+; CHECK-NEXT: %[[next_next:.*]] = load %S2*, %S2** %[[gep]]
- %new.s1 = load %S1** %new.s1.ptr
+ %new.s1 = load %S1*, %S1** %new.s1.ptr
%result1 = insertvalue %S2 undef, %S1* %new.s1, 0
; CHECK-NEXT: %[[result1:.*]] = insertvalue %S2 undef, %S1* %[[next_s1]], 0
- %new.next = load %S2** %new.next.ptr
+ %new.next = load %S2*, %S2** %new.next.ptr
%result2 = insertvalue %S2 %result1, %S2* %new.next, 1
; CHECK-NEXT: %[[result2:.*]] = insertvalue %S2 %[[result1]], %S2* %[[next_next]], 1
ret %S2 %result2
@@ -530,7 +530,7 @@ entry:
store i8 26, i8* %gep3, align 1
%cast = bitcast { [3 x i8] }* %a to { i64 }*
%elt = getelementptr inbounds { i64 }, { i64 }* %cast, i32 0, i32 0
- %load = load i64* %elt
+ %load = load i64, i64* %elt
%result = and i64 %load, 16777215
ret i64 %result
}
@@ -545,7 +545,7 @@ entry:
%ptr = getelementptr [8 x i8], [8 x i8]* %a, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %ptr, i8 0, i32 8, i32 1, i1 false)
%s2ptrptr = bitcast i8* %ptr to %S2**
- %s2ptr = load %S2** %s2ptrptr
+ %s2ptr = load %S2*, %S2** %s2ptrptr
ret %S2* %s2ptr
}
@@ -561,13 +561,13 @@ entry:
good:
%Y = getelementptr i32, i32* %X, i64 0
store i32 0, i32* %Y
- %Z = load i32* %Y
+ %Z = load i32, i32* %Y
ret i32 %Z
bad:
%Y2 = getelementptr i32, i32* %X, i64 1
store i32 0, i32* %Y2
- %Z2 = load i32* %Y2
+ %Z2 = load i32, i32* %Y2
ret i32 %Z2
}
@@ -589,7 +589,7 @@ entry:
%a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
store i8 0, i8* %a2ptr
%aiptr = bitcast [3 x i8]* %a to i24*
- %ai = load i24* %aiptr
+ %ai = load i24, i24* %aiptr
; CHECK-NOT: store
; CHECK-NOT: load
; CHECK: %[[ext2:.*]] = zext i8 0 to i24
@@ -607,11 +607,11 @@ entry:
%biptr = bitcast [3 x i8]* %b to i24*
store i24 %ai, i24* %biptr
%b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
- %b0 = load i8* %b0ptr
+ %b0 = load i8, i8* %b0ptr
%b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
- %b1 = load i8* %b1ptr
+ %b1 = load i8, i8* %b1ptr
%b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
- %b2 = load i8* %b2ptr
+ %b2 = load i8, i8* %b2ptr
; CHECK-NOT: store
; CHECK-NOT: load
; CHECK: %[[trunc0:.*]] = trunc i24 %[[insert0]] to i8
@@ -646,7 +646,7 @@ entry:
store i8 0, i8* %b2ptr
%iptrcast = bitcast [3 x i8]* %a to i16*
%iptrgep = getelementptr i16, i16* %iptrcast, i64 1
- %i = load i16* %iptrgep
+ %i = load i16, i16* %iptrgep
%ret = zext i16 %i to i32
ret i32 %ret
}
@@ -672,15 +672,15 @@ entry:
%4 = getelementptr inbounds %test14.struct, %test14.struct* %a, i32 0, i32 0
%5 = bitcast [3 x i32]* %3 to i32*
%6 = bitcast [3 x i32]* %4 to i32*
- %7 = load i32* %6, align 4
+ %7 = load i32, i32* %6, align 4
store i32 %7, i32* %5, align 4
%8 = getelementptr inbounds i32, i32* %5, i32 1
%9 = getelementptr inbounds i32, i32* %6, i32 1
- %10 = load i32* %9, align 4
+ %10 = load i32, i32* %9, align 4
store i32 %10, i32* %8, align 4
%11 = getelementptr inbounds i32, i32* %5, i32 2
%12 = getelementptr inbounds i32, i32* %6, i32 2
- %13 = load i32* %12, align 4
+ %13 = load i32, i32* %12, align 4
store i32 %13, i32* %11, align 4
ret void
}
@@ -735,7 +735,7 @@ define void @test16(i8* %src, i8* %dst) {
; CHECK-LABEL: @test16(
; CHECK-NOT: alloca
; CHECK: %[[srccast:.*]] = bitcast i8* %src to i24*
-; CHECK-NEXT: load i24* %[[srccast]]
+; CHECK-NEXT: load i24, i24* %[[srccast]]
; CHECK-NEXT: %[[dstcast:.*]] = bitcast i8* %dst to i24*
; CHECK-NEXT: store i24 0, i24* %[[dstcast]]
; CHECK-NEXT: ret void
@@ -776,7 +776,7 @@ define void @test18(i8* %src, i8* %dst, i32 %size) {
; CHECK: %[[a:.*]] = alloca [34 x i8]
; CHECK: %[[srcgep1:.*]] = getelementptr inbounds i8, i8* %src, i64 4
; CHECK-NEXT: %[[srccast1:.*]] = bitcast i8* %[[srcgep1]] to i32*
-; CHECK-NEXT: %[[srcload:.*]] = load i32* %[[srccast1]]
+; CHECK-NEXT: %[[srcload:.*]] = load i32, i32* %[[srccast1]]
; CHECK-NEXT: %[[agep1:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[agep1]], i8* %src, i32 %size,
; CHECK-NEXT: %[[agep2:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
@@ -821,7 +821,7 @@ entry:
%cast2 = bitcast { i64, i8* }* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast2, i8* %cast1, i32 16, i32 1, i1 false)
%gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
- %val = load i64* %gep
+ %val = load i64, i64* %gep
ret i32 undef
}
@@ -845,9 +845,9 @@ entry:
%gep3.2 = getelementptr i32, i32* %gep3.1, i32 -12
store i32 3, i32* %gep3.2
- %load1 = load i32* %gep1
- %load2 = load i32* %gep2.2
- %load3 = load i32* %gep3.2
+ %load1 = load i32, i32* %gep1
+ %load2 = load i32, i32* %gep2.2
+ %load3 = load i32, i32* %gep3.2
%sum1 = add i32 %load1, %load2
%sum2 = add i32 %sum1, %load3
ret i32 %sum2
@@ -876,9 +876,9 @@ entry:
store i8 255, i8* %gep5
%cast1 = bitcast i8* %gep4 to i32*
store i32 0, i32* %cast1
- %load = load i8* %gep0
+ %load = load i8, i8* %gep0
%gep6 = getelementptr i8, i8* %gep0, i32 1
- %load2 = load i8* %gep6
+ %load2 = load i8, i8* %gep6
%result = or i8 %load, %load2
ret i8 %result
}
@@ -895,7 +895,7 @@ define void @PR13916.1() {
entry:
%a = alloca i8
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a, i8* %a, i32 1, i32 1, i1 false)
- %tmp2 = load i8* %a
+ %tmp2 = load i8, i8* %a
ret void
}
@@ -919,7 +919,7 @@ if.then:
if.end:
%gep = getelementptr %PR13916.struct, %PR13916.struct* %a, i32 0, i32 0
- %tmp2 = load i8* %gep
+ %tmp2 = load i8, i8* %gep
ret void
}
@@ -968,7 +968,7 @@ entry:
store double* %c, double** %b
store double* %a, double** %b
store double %x, double* %c
- %ret = load double* %a
+ %ret = load double, double* %a
; CHECK-NOT: store
; CHECK-NOT: load
@@ -1013,7 +1013,7 @@ entry:
%gep2 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0
%ptrcast1 = bitcast { [1 x { i32 }] }* %gep2 to { [1 x { float }] }*
- %load1 = load { [1 x { float }] }* %ptrcast1
+ %load1 = load { [1 x { float }] }, { [1 x { float }] }* %ptrcast1
%unwrap1 = extractvalue { [1 x { float }] } %load1, 0, 0
%wrap2 = insertvalue { {}, { float }, [0 x i8] } undef, { float } %unwrap1, 1
@@ -1021,7 +1021,7 @@ entry:
%gep3 = getelementptr { {}, { float }, [0 x i8] }, { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
%ptrcast2 = bitcast float* %gep3 to <4 x i8>*
- %load3 = load <4 x i8>* %ptrcast2
+ %load3 = load <4 x i8>, <4 x i8>* %ptrcast2
%valcast1 = bitcast <4 x i8> %load3 to i32
%wrap3 = insertvalue [1 x [1 x i32]] undef, i32 %valcast1, 0, 0
@@ -1032,7 +1032,7 @@ entry:
%gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
%ptrcast4 = bitcast [1 x <4 x i8>]* %gep5 to { {}, float, {} }*
- %load4 = load { {}, float, {} }* %ptrcast4
+ %load4 = load { {}, float, {} }, { {}, float, {} }* %ptrcast4
%unwrap2 = extractvalue { {}, float, {} } %load4, 1
%valcast2 = bitcast float %unwrap2 to i32
@@ -1080,8 +1080,8 @@ entry:
store i32 1072693248, i32* %X.sroa.0.4.cast5.i, align 4
; Do the actual math...
- %X.sroa.0.0.load1.i = load double* %X.sroa.0.i, align 8
- %accum.real.i = load double* %d, align 8
+ %X.sroa.0.0.load1.i = load double, double* %X.sroa.0.i, align 8
+ %accum.real.i = load double, double* %d, align 8
%add.r.i = fadd double %accum.real.i, %X.sroa.0.0.load1.i
store double %add.r.i, double* %d, align 8
call void @llvm.lifetime.end(i64 -1, i8* %0)
@@ -1104,13 +1104,13 @@ entry:
; CHECK-NOT: store
%phi.realp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
- %phi.real = load float* %phi.realp
+ %phi.real = load float, float* %phi.realp
%phi.imagp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
- %phi.imag = load float* %phi.imagp
+ %phi.imag = load float, float* %phi.imagp
; CHECK: %[[realp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
- ; CHECK-NEXT: %[[real:.*]] = load float* %[[realp]]
+ ; CHECK-NEXT: %[[real:.*]] = load float, float* %[[realp]]
; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
- ; CHECK-NEXT: %[[imag:.*]] = load float* %[[imagp]]
+ ; CHECK-NEXT: %[[imag:.*]] = load float, float* %[[imagp]]
%real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
%imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
@@ -1126,7 +1126,7 @@ entry:
; CHECK-NEXT: %[[real_mask:.*]] = and i64 %[[imag_insert]], -4294967296
; CHECK-NEXT: %[[real_insert:.*]] = or i64 %[[real_mask]], %[[real_ext]]
- %1 = load i64* %0, align 1
+ %1 = load i64, i64* %0, align 1
ret i64 %1
; CHECK-NEXT: ret i64 %[[real_insert]]
}
@@ -1199,18 +1199,18 @@ entry:
%b.i1 = bitcast <{ i1 }>* %b to i1*
store i1 %x, i1* %b.i1, align 8
%b.i8 = bitcast <{ i1 }>* %b to i8*
- %foo = load i8* %b.i8, align 1
+ %foo = load i8, i8* %b.i8, align 1
; CHECK-NEXT: %[[ext:.*]] = zext i1 %x to i8
; CHECK-NEXT: store i8 %[[ext]], i8* %[[a]], align 8
-; CHECK-NEXT: {{.*}} = load i8* %[[a]], align 8
+; CHECK-NEXT: {{.*}} = load i8, i8* %[[a]], align 8
%a.i8 = bitcast <{ i1 }>* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.i8, i8* %b.i8, i32 1, i32 1, i1 false) nounwind
- %bar = load i8* %a.i8, align 1
+ %bar = load i8, i8* %a.i8, align 1
%a.i1 = getelementptr inbounds <{ i1 }>, <{ i1 }>* %a, i32 0, i32 0
- %baz = load i1* %a.i1, align 1
+ %baz = load i1, i1* %a.i1, align 1
; CHECK-NEXT: %[[a_cast:.*]] = bitcast i8* %[[a]] to i1*
-; CHECK-NEXT: {{.*}} = load i1* %[[a_cast]], align 8
+; CHECK-NEXT: {{.*}} = load i1, i1* %[[a_cast]], align 8
ret void
}
@@ -1226,7 +1226,7 @@ entry:
%cast = bitcast <3 x i8>* %a to i32*
store i32 %x, i32* %cast, align 1
- %y = load <3 x i8>* %a, align 4
+ %y = load <3 x i8>, <3 x i8>* %a, align 4
ret <3 x i8> %y
; CHECK: ret <3 x i8>
}
@@ -1242,7 +1242,7 @@ entry:
store <3 x i8> %x, <3 x i8>* %a, align 1
%cast = bitcast <3 x i8>* %a to i32*
- %y = load i32* %cast, align 4
+ %y = load i32, i32* %cast, align 4
ret i32 %y
; CHECK: ret i32
}
@@ -1258,7 +1258,7 @@ entry:
%a.i8 = bitcast i32* %a to i8*
call void @llvm.memset.p0i8.i32(i8* %a.i8, i8 0, i32 %x, i32 1, i1 false)
- %v = load i32* %a
+ %v = load i32, i32* %a
ret i32 %v
}
@@ -1283,7 +1283,7 @@ entry:
bb4:
%src.gep3 = getelementptr inbounds i8, i8* %src, i32 3
- %src.3 = load i8* %src.gep3
+ %src.3 = load i8, i8* %src.gep3
%tmp.gep3 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 3
store i8 %src.3, i8* %tmp.gep3
; CHECK: store i8
@@ -1292,7 +1292,7 @@ bb4:
bb3:
%src.gep2 = getelementptr inbounds i8, i8* %src, i32 2
- %src.2 = load i8* %src.gep2
+ %src.2 = load i8, i8* %src.gep2
%tmp.gep2 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 2
store i8 %src.2, i8* %tmp.gep2
; CHECK: store i8
@@ -1301,7 +1301,7 @@ bb3:
bb2:
%src.gep1 = getelementptr inbounds i8, i8* %src, i32 1
- %src.1 = load i8* %src.gep1
+ %src.1 = load i8, i8* %src.gep1
%tmp.gep1 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 1
store i8 %src.1, i8* %tmp.gep1
; CHECK: store i8
@@ -1310,7 +1310,7 @@ bb2:
bb1:
%src.gep0 = getelementptr inbounds i8, i8* %src, i32 0
- %src.0 = load i8* %src.gep0
+ %src.0 = load i8, i8* %src.gep0
%tmp.gep0 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 0
store i8 %src.0, i8* %tmp.gep0
; CHECK: store i8
@@ -1332,7 +1332,7 @@ define void @PR15805(i1 %a, i1 %b) {
%c = alloca i64, align 8
%p.0.c = select i1 undef, i64* %c, i64* %c
%cond.in = select i1 undef, i64* %p.0.c, i64* %c
- %cond = load i64* %cond.in, align 8
+ %cond = load i64, i64* %cond.in, align 8
ret void
}
@@ -1351,7 +1351,7 @@ define void @PR15805.1(i1 %a, i1 %b) {
loop:
%cond.in = select i1 undef, i64* %c, i64* %p.0.c
%p.0.c = select i1 undef, i64* %c, i64* %c
- %cond = load i64* %cond.in, align 8
+ %cond = load i64, i64* %cond.in, align 8
br i1 undef, label %loop, label %exit
exit:
@@ -1374,7 +1374,7 @@ entry:
%b.cast = bitcast i32* %b to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b.cast, i8* %a, i32 4, i32 4, i1 true)
%b.gep = getelementptr inbounds i8, i8* %b.cast, i32 2
- load i8* %b.gep, align 2
+ load i8, i8* %b.gep, align 2
unreachable
}
@@ -1394,7 +1394,7 @@ entry:
store <2 x float> undef, <2 x float>* %0, align 8
%1 = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
%cond105.in.i.i = select i1 undef, float* null, float* %1
- %cond105.i.i = load float* %cond105.in.i.i, align 8
+ %cond105.i.i = load float, float* %cond105.in.i.i, align 8
ret void
}
@@ -1427,9 +1427,9 @@ entry:
define void @test24(i8* %src, i8* %dst) {
; CHECK-LABEL: @test24(
; CHECK: alloca i64, align 16
-; CHECK: load volatile i64* %{{[^,]*}}, align 1
+; CHECK: load volatile i64, i64* %{{[^,]*}}, align 1
; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 16
-; CHECK: load volatile i64* %{{[^,]*}}, align 16
+; CHECK: load volatile i64, i64* %{{[^,]*}}, align 16
; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 1
entry:
@@ -1466,10 +1466,10 @@ entry:
%b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
store float 0.0, float* %a.gep1
store float 1.0, float* %a.gep2
- %v = load i64* %a
+ %v = load i64, i64* %a
store i64 %v, i64* %b
- %f1 = load float* %b.gep1
- %f2 = load float* %b.gep2
+ %f1 = load float, float* %b.gep1
+ %f2 = load float, float* %b.gep2
%ret = fadd float %f1, %f2
ret float %ret
}
@@ -1482,8 +1482,8 @@ define void @test26() {
;
; CHECK-LABEL: @test26(
; CHECK-NOT: alloca
-; CHECK: %[[L1:.*]] = load i32* bitcast
-; CHECK: %[[L2:.*]] = load i32* bitcast
+; CHECK: %[[L1:.*]] = load i32, i32* bitcast
+; CHECK: %[[L2:.*]] = load i32, i32* bitcast
; CHECK: %[[F1:.*]] = bitcast i32 %[[L1]] to float
; CHECK: %[[F2:.*]] = bitcast i32 %[[L2]] to float
; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
@@ -1498,14 +1498,14 @@ entry:
%a.cast = bitcast i64* %a to [2 x float]*
%a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
%a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
- %v1 = load i64* bitcast ([2 x float]* @complex1 to i64*)
+ %v1 = load i64, i64* bitcast ([2 x float]* @complex1 to i64*)
store i64 %v1, i64* %a
- %f1 = load float* %a.gep1
- %f2 = load float* %a.gep2
+ %f1 = load float, float* %a.gep1
+ %f2 = load float, float* %a.gep2
%sum = fadd float %f1, %f2
store float %sum, float* %a.gep1
store float %sum, float* %a.gep2
- %v2 = load i64* %a
+ %v2 = load i64, i64* %a
store i64 %v2, i64* bitcast ([2 x float]* @complex2 to i64*)
ret void
}
@@ -1534,10 +1534,10 @@ entry:
%fptr3 = bitcast i8* %gep3 to float*
store float 0.0, float* %fptr1
store float 1.0, float* %fptr2
- %v = load i64* %iptr1
+ %v = load i64, i64* %iptr1
store i64 %v, i64* %iptr2
- %f1 = load float* %fptr2
- %f2 = load float* %fptr3
+ %f1 = load float, float* %fptr2
+ %f2 = load float, float* %fptr3
%ret = fadd float %f1, %f2
ret float %ret
}
@@ -1560,7 +1560,7 @@ entry:
%a = alloca i32
%a.cast = bitcast i32* %a to i16*
store volatile i16 42, i16* %a.cast
- %load = load i32* %a
+ %load = load i32, i32* %a
store i32 %load, i32* %a
ret i32 %load
}
@@ -1587,7 +1587,7 @@ entry:
%a.cast1 = bitcast i64* %a to i32*
%a.cast2 = bitcast i64* %a to i16*
store volatile i16 42, i16* %a.cast2
- %load = load i32* %a.cast1
+ %load = load i32, i32* %a.cast1
store i32 %load, i32* %a.cast1
%a.gep1 = getelementptr i32, i32* %a.cast1, i32 1
%a.cast3 = bitcast i32* %a.gep1 to i8*
diff --git a/llvm/test/Transforms/SROA/big-endian.ll b/llvm/test/Transforms/SROA/big-endian.ll
index 7ba7bee542d..b5a04ca8e64 100644
--- a/llvm/test/Transforms/SROA/big-endian.ll
+++ b/llvm/test/Transforms/SROA/big-endian.ll
@@ -23,7 +23,7 @@ entry:
%a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
store i8 0, i8* %a2ptr
%aiptr = bitcast [3 x i8]* %a to i24*
- %ai = load i24* %aiptr
+ %ai = load i24, i24* %aiptr
; CHECK-NOT: store
; CHECK-NOT: load
; CHECK: %[[ext2:.*]] = zext i8 0 to i24
@@ -41,11 +41,11 @@ entry:
%biptr = bitcast [3 x i8]* %b to i24*
store i24 %ai, i24* %biptr
%b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
- %b0 = load i8* %b0ptr
+ %b0 = load i8, i8* %b0ptr
%b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
- %b1 = load i8* %b1ptr
+ %b1 = load i8, i8* %b1ptr
%b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
- %b2 = load i8* %b2ptr
+ %b2 = load i8, i8* %b2ptr
; CHECK-NOT: store
; CHECK-NOT: load
; CHECK: %[[shift0:.*]] = lshr i24 %[[insert0]], 16
@@ -102,7 +102,7 @@ entry:
; CHECK-NOT: load
%aiptr = bitcast [7 x i8]* %a to i56*
- %ai = load i56* %aiptr
+ %ai = load i56, i56* %aiptr
%ret = zext i56 %ai to i64
ret i64 %ret
; CHECK-NEXT: %[[ext4:.*]] = zext i16 1 to i56
diff --git a/llvm/test/Transforms/SROA/fca.ll b/llvm/test/Transforms/SROA/fca.ll
index fbadcf81e03..6eaa73f5318 100644
--- a/llvm/test/Transforms/SROA/fca.ll
+++ b/llvm/test/Transforms/SROA/fca.ll
@@ -19,7 +19,7 @@ entry:
%gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
store i32 %y, i32* %gep2
- %result = load { i32, i32 }* %a
+ %result = load { i32, i32 }, { i32, i32 }* %a
ret { i32, i32 } %result
}
@@ -30,7 +30,7 @@ define { i32, i32 } @test1(i32 %x, i32 %y) {
; CHECK-LABEL: @test1(
; CHECK: alloca
; CHECK: alloca
-; CHECK: load volatile { i32, i32 }*
+; CHECK: load volatile { i32, i32 }, { i32, i32 }*
; CHECK: store volatile { i32, i32 }
; CHECK: ret { i32, i32 }
@@ -43,7 +43,7 @@ entry:
%gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
store i32 %y, i32* %gep2
- %result = load volatile { i32, i32 }* %a
+ %result = load volatile { i32, i32 }, { i32, i32 }* %a
store volatile { i32, i32 } %result, { i32, i32 }* %b
ret { i32, i32 } %result
}
diff --git a/llvm/test/Transforms/SROA/phi-and-select.ll b/llvm/test/Transforms/SROA/phi-and-select.ll
index 883b2fb3297..e97bd66d052 100644
--- a/llvm/test/Transforms/SROA/phi-and-select.ll
+++ b/llvm/test/Transforms/SROA/phi-and-select.ll
@@ -11,8 +11,8 @@ entry:
%a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 0, i32* %a0
store i32 1, i32* %a1
- %v0 = load i32* %a0
- %v1 = load i32* %a1
+ %v0 = load i32, i32* %a0
+ %v1 = load i32, i32* %a1
; CHECK-NOT: store
; CHECK-NOT: load
@@ -26,7 +26,7 @@ exit:
%phi = phi i32* [ %a1, %then ], [ %a0, %entry ]
; CHECK: phi i32 [ 1, %{{.*}} ], [ 0, %{{.*}} ]
- %result = load i32* %phi
+ %result = load i32, i32* %phi
ret i32 %result
}
@@ -40,8 +40,8 @@ entry:
%a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 0, i32* %a0
store i32 1, i32* %a1
- %v0 = load i32* %a0
- %v1 = load i32* %a1
+ %v0 = load i32, i32* %a0
+ %v1 = load i32, i32* %a1
; CHECK-NOT: store
; CHECK-NOT: load
@@ -49,7 +49,7 @@ entry:
%select = select i1 %cond, i32* %a1, i32* %a0
; CHECK: select i1 %{{.*}}, i32 1, i32 0
- %result = load i32* %select
+ %result = load i32, i32* %select
ret i32 %result
}
@@ -100,7 +100,7 @@ exit:
[ %a1b, %bb4 ], [ %a0b, %bb5 ], [ %a0b, %bb6 ], [ %a1b, %bb7 ]
; CHECK: phi i32 [ 1, %{{.*}} ], [ 0, %{{.*}} ], [ 0, %{{.*}} ], [ 1, %{{.*}} ], [ 1, %{{.*}} ], [ 0, %{{.*}} ], [ 0, %{{.*}} ], [ 1, %{{.*}} ]
- %result = load i32* %phi
+ %result = load i32, i32* %phi
ret i32 %result
}
@@ -114,8 +114,8 @@ entry:
%a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 0, i32* %a0
store i32 1, i32* %a1
- %v0 = load i32* %a0
- %v1 = load i32* %a1
+ %v0 = load i32, i32* %a0
+ %v1 = load i32, i32* %a1
; CHECK-NOT: store
; CHECK-NOT: load
@@ -123,7 +123,7 @@ entry:
%select = select i1 %cond, i32* %a0, i32* %a0
; CHECK-NOT: select
- %result = load i32* %select
+ %result = load i32, i32* %select
ret i32 %result
; CHECK: ret i32 0
}
@@ -141,7 +141,7 @@ entry:
%select = select i1 true, i32* %a1, i32* %b
; CHECK-NOT: select
- %result = load i32* %select
+ %result = load i32, i32* %select
; CHECK-NOT: load
ret i32 %result
@@ -172,10 +172,10 @@ entry:
; CHECK: call void @f(i32* %[[select2]], i32* %[[select3]])
- %result = load i32* %select
+ %result = load i32, i32* %select
; CHECK-NOT: load
- %dead = load i32* %c
+ %dead = load i32, i32* %c
ret i32 %result
; CHECK: ret i32 1
@@ -202,7 +202,7 @@ bad:
exit:
%P = phi i32* [ %Y1, %good ], [ %Y2, %bad ]
; CHECK: %[[phi:.*]] = phi i32 [ 0, %good ],
- %Z2 = load i32* %P
+ %Z2 = load i32, i32* %P
ret i32 %Z2
; CHECK: ret i32 %[[phi]]
}
@@ -213,7 +213,7 @@ define i32 @test8(i32 %b, i32* %ptr) {
; CHECK-LABEL: @test8(
; CHECK-NOT: alloca
; CHECK-NOT: load
-; CHECK: %[[value:.*]] = load i32* %ptr
+; CHECK: %[[value:.*]] = load i32, i32* %ptr
; CHECK-NOT: load
; CHECK: %[[result:.*]] = phi i32 [ undef, %else ], [ %[[value]], %then ]
; CHECK-NEXT: ret i32 %[[result]]
@@ -232,7 +232,7 @@ else:
exit:
%phi = phi i32* [ %bitcast, %else ], [ %ptr, %then ]
- %loaded = load i32* %phi, align 4
+ %loaded = load i32, i32* %phi, align 4
ret i32 %loaded
}
@@ -241,7 +241,7 @@ define i32 @test9(i32 %b, i32* %ptr) {
; CHECK-LABEL: @test9(
; CHECK-NOT: alloca
; CHECK-NOT: load
-; CHECK: %[[value:.*]] = load i32* %ptr
+; CHECK: %[[value:.*]] = load i32, i32* %ptr
; CHECK-NOT: load
; CHECK: %[[result:.*]] = select i1 %{{.*}}, i32 undef, i32 %[[value]]
; CHECK-NEXT: ret i32 %[[result]]
@@ -252,7 +252,7 @@ entry:
%test = icmp ne i32 %b, 0
%bitcast = bitcast float* %f to i32*
%select = select i1 %test, i32* %bitcast, i32* %ptr
- %loaded = load i32* %select, align 4
+ %loaded = load i32, i32* %select, align 4
ret i32 %loaded
}
@@ -262,9 +262,9 @@ define float @test10(i32 %b, float* %ptr) {
; node.
; CHECK-LABEL: @test10(
; CHECK: %[[alloca:.*]] = alloca
-; CHECK: %[[argvalue:.*]] = load float* %ptr
+; CHECK: %[[argvalue:.*]] = load float, float* %ptr
; CHECK: %[[cast:.*]] = bitcast double* %[[alloca]] to float*
-; CHECK: %[[allocavalue:.*]] = load float* %[[cast]]
+; CHECK: %[[allocavalue:.*]] = load float, float* %[[cast]]
; CHECK: %[[result:.*]] = phi float [ %[[allocavalue]], %else ], [ %[[argvalue]], %then ]
; CHECK-NEXT: ret float %[[result]]
@@ -283,7 +283,7 @@ else:
exit:
%phi = phi float* [ %bitcast, %else ], [ %ptr, %then ]
- %loaded = load float* %phi, align 4
+ %loaded = load float, float* %phi, align 4
ret float %loaded
}
@@ -292,8 +292,8 @@ define float @test11(i32 %b, float* %ptr) {
; CHECK-LABEL: @test11(
; CHECK: %[[alloca:.*]] = alloca
; CHECK: %[[cast:.*]] = bitcast double* %[[alloca]] to float*
-; CHECK: %[[allocavalue:.*]] = load float* %[[cast]]
-; CHECK: %[[argvalue:.*]] = load float* %ptr
+; CHECK: %[[allocavalue:.*]] = load float, float* %[[cast]]
+; CHECK: %[[argvalue:.*]] = load float, float* %ptr
; CHECK: %[[result:.*]] = select i1 %{{.*}}, float %[[allocavalue]], float %[[argvalue]]
; CHECK-NEXT: ret float %[[result]]
@@ -304,7 +304,7 @@ entry:
%test = icmp ne i32 %b, 0
%bitcast = bitcast double* %f to float*
%select = select i1 %test, float* %bitcast, float* %ptr
- %loaded = load float* %select, align 4
+ %loaded = load float, float* %select, align 4
ret float %loaded
}
@@ -320,7 +320,7 @@ entry:
%a = alloca i32
store i32 %x, i32* %a
%dead = select i1 undef, i32* %a, i32* %p
- %load = load i32* %a
+ %load = load i32, i32* %a
ret i32 %load
}
@@ -342,7 +342,7 @@ loop:
br i1 undef, label %loop, label %exit
exit:
- %load = load i32* %a
+ %load = load i32, i32* %a
ret i32 %load
}
@@ -376,9 +376,9 @@ else:
exit:
%f.phi = phi i32* [ %f, %then ], [ %f.select, %else ]
%g.phi = phi i32* [ %g, %then ], [ %ptr, %else ]
- %f.loaded = load i32* %f.phi
+ %f.loaded = load i32, i32* %f.phi
%g.select = select i1 %b1, i32* %g, i32* %g.phi
- %g.loaded = load i32* %g.select
+ %g.loaded = load i32, i32* %g.select
%result = add i32 %f.loaded, %g.loaded
ret i32 %result
}
@@ -456,8 +456,8 @@ if.then:
; CHECK: %[[ext:.*]] = zext i8 1 to i64
if.end:
- %tmp = load i64** %ptr
- %result = load i64* %tmp
+ %tmp = load i64*, i64** %ptr
+ %result = load i64, i64* %tmp
; CHECK-NOT: load
; CHECK: %[[result:.*]] = phi i64 [ %[[ext]], %if.then ], [ 0, %entry ]
@@ -495,7 +495,7 @@ else:
end:
%a.phi.f = phi float* [ %a.f, %then ], [ %a.raw.4.f, %else ]
- %f = load float* %a.phi.f
+ %f = load float, float* %a.phi.f
ret float %f
; CHECK: %[[phi:.*]] = phi float [ %[[lo_cast]], %then ], [ %[[hi_cast]], %else ]
; CHECK-NOT: load
@@ -528,7 +528,7 @@ else:
merge:
%2 = phi float* [ %0, %then ], [ %1, %else ]
store float 0.000000e+00, float* %temp, align 4
- %3 = load float* %2, align 4
+ %3 = load float, float* %2, align 4
ret float %3
}
@@ -563,7 +563,7 @@ else:
merge:
%3 = phi float* [ %1, %then2 ], [ %2, %else ]
store float 0.000000e+00, float* %temp, align 4
- %4 = load float* %3, align 4
+ %4 = load float, float* %3, align 4
ret float %4
}
diff --git a/llvm/test/Transforms/SROA/slice-order-independence.ll b/llvm/test/Transforms/SROA/slice-order-independence.ll
index 6b38f4c6d55..7d57be6d81c 100644
--- a/llvm/test/Transforms/SROA/slice-order-independence.ll
+++ b/llvm/test/Transforms/SROA/slice-order-independence.ll
@@ -15,9 +15,9 @@ define void @skipped_inttype_first({ i16*, i32 }*) {
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* %2, i32 16, i32 8, i1 false)
%b = getelementptr inbounds { i16*, i32 }, { i16*, i32 }* %arg, i64 0, i32 0
%pb0 = bitcast i16** %b to i63*
- %b0 = load i63* %pb0
+ %b0 = load i63, i63* %pb0
%pb1 = bitcast i16** %b to i8**
- %b1 = load i8** %pb1
+ %b1 = load i8*, i8** %pb1
ret void
}
@@ -30,8 +30,8 @@ define void @skipped_inttype_last({ i16*, i32 }*) {
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* %2, i32 16, i32 8, i1 false)
%b = getelementptr inbounds { i16*, i32 }, { i16*, i32 }* %arg, i64 0, i32 0
%pb1 = bitcast i16** %b to i8**
- %b1 = load i8** %pb1
+ %b1 = load i8*, i8** %pb1
%pb0 = bitcast i16** %b to i63*
- %b0 = load i63* %pb0
+ %b0 = load i63, i63* %pb0
ret void
}
diff --git a/llvm/test/Transforms/SROA/slice-width.ll b/llvm/test/Transforms/SROA/slice-width.ll
index 4d2a56f64ad..a37a15aad90 100644
--- a/llvm/test/Transforms/SROA/slice-width.ll
+++ b/llvm/test/Transforms/SROA/slice-width.ll
@@ -14,14 +14,14 @@ load_i32:
; CHECK-LABEL: load_i32:
; CHECK-NOT: bitcast {{.*}} to i1
; CHECK-NOT: zext i1
- %r0 = load i32* %arg
+ %r0 = load i32, i32* %arg
br label %load_i1
load_i1:
; CHECK-LABEL: load_i1:
; CHECK: bitcast {{.*}} to i1
%p1 = bitcast i32* %arg to i1*
- %t1 = load i1* %p1
+ %t1 = load i1, i1* %p1
ret void
}
@@ -43,15 +43,15 @@ define void @memcpy_fp80_padding() {
; Access a slice of the alloca to trigger SROA.
%mid_p = getelementptr %union.Foo, %union.Foo* %x, i32 0, i32 1
- %elt = load i64* %mid_p
+ %elt = load i64, i64* %mid_p
store i64 %elt, i64* @i64_sink
ret void
}
; CHECK-LABEL: define void @memcpy_fp80_padding
; CHECK: alloca x86_fp80
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32
-; CHECK: load i64* getelementptr inbounds (%union.Foo* @foo_copy_source, i64 0, i32 1)
-; CHECK: load i64* getelementptr inbounds (%union.Foo* @foo_copy_source, i64 0, i32 2)
+; CHECK: load i64, i64* getelementptr inbounds (%union.Foo* @foo_copy_source, i64 0, i32 1)
+; CHECK: load i64, i64* getelementptr inbounds (%union.Foo* @foo_copy_source, i64 0, i32 2)
define void @memset_fp80_padding() {
%x = alloca %union.Foo
@@ -62,7 +62,7 @@ define void @memset_fp80_padding() {
; Access a slice of the alloca to trigger SROA.
%mid_p = getelementptr %union.Foo, %union.Foo* %x, i32 0, i32 1
- %elt = load i64* %mid_p
+ %elt = load i64, i64* %mid_p
store i64 %elt, i64* @i64_sink
ret void
}
@@ -90,7 +90,7 @@ entry:
; The following block does nothing; but appears to confuse SROA
%unused1 = bitcast %S.vec3float* %tmp1 to %U.vec3float*
%unused2 = getelementptr inbounds %U.vec3float, %U.vec3float* %unused1, i32 0, i32 0
- %unused3 = load <4 x float>* %unused2, align 1
+ %unused3 = load <4 x float>, <4 x float>* %unused2, align 1
; Create a second temporary and copy %tmp1 into it
%tmp2 = alloca %S.vec3float, align 4
diff --git a/llvm/test/Transforms/SROA/vector-conversion.ll b/llvm/test/Transforms/SROA/vector-conversion.ll
index 08d79608731..91ae5be6c3d 100644
--- a/llvm/test/Transforms/SROA/vector-conversion.ll
+++ b/llvm/test/Transforms/SROA/vector-conversion.ll
@@ -10,7 +10,7 @@ define <4 x i64> @vector_ptrtoint({<2 x i32*>, <2 x i32*>} %x) {
; CHECK-NOT: store
%cast = bitcast {<2 x i32*>, <2 x i32*>}* %a to <4 x i64>*
- %vec = load <4 x i64>* %cast
+ %vec = load <4 x i64>, <4 x i64>* %cast
; CHECK-NOT: load
; CHECK: ptrtoint
@@ -26,7 +26,7 @@ define <4 x i32*> @vector_inttoptr({<2 x i64>, <2 x i64>} %x) {
; CHECK-NOT: store
%cast = bitcast {<2 x i64>, <2 x i64>}* %a to <4 x i32*>*
- %vec = load <4 x i32*>* %cast
+ %vec = load <4 x i32*>, <4 x i32*>* %cast
; CHECK-NOT: load
; CHECK: inttoptr
@@ -42,7 +42,7 @@ define <2 x i64> @vector_ptrtointbitcast({<1 x i32*>, <1 x i32*>} %x) {
; CHECK-NOT: store
%cast = bitcast {<1 x i32*>, <1 x i32*>}* %a to <2 x i64>*
- %vec = load <2 x i64>* %cast
+ %vec = load <2 x i64>, <2 x i64>* %cast
; CHECK-NOT: load
; CHECK: ptrtoint
; CHECK: bitcast
diff --git a/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll b/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
index 30c93b054ec..37cf394382a 100644
--- a/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
+++ b/llvm/test/Transforms/SROA/vector-lifetime-intrinsic.ll
@@ -18,7 +18,7 @@ bb:
call void @llvm.lifetime.start(i64 16, i8* %tmp8)
store <4 x float> %arg1, <4 x float>* %tmp, align 16
%tmp17 = bitcast <4 x float>* %tmp to <3 x float>*
- %tmp18 = load <3 x float>* %tmp17
+ %tmp18 = load <3 x float>, <3 x float>* %tmp17
%tmp20 = bitcast <4 x float>* %tmp to i8*
call void @llvm.lifetime.end(i64 16, i8* %tmp20)
call void @wombat3(<3 x float> %tmp18)
diff --git a/llvm/test/Transforms/SROA/vector-promotion.ll b/llvm/test/Transforms/SROA/vector-promotion.ll
index 73f5ba04ec1..2d9b26b5309 100644
--- a/llvm/test/Transforms/SROA/vector-promotion.ll
+++ b/llvm/test/Transforms/SROA/vector-promotion.ll
@@ -16,11 +16,11 @@ entry:
; CHECK-NOT: store
%a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
- %tmp1 = load i32* %a.tmp1
+ %tmp1 = load i32, i32* %a.tmp1
%a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
- %tmp2 = load i32* %a.tmp2
+ %tmp2 = load i32, i32* %a.tmp2
%a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
- %tmp3 = load i32* %a.tmp3
+ %tmp3 = load i32, i32* %a.tmp3
; CHECK-NOT: load
; CHECK: extractelement <4 x i32> %x, i32 2
; CHECK-NEXT: extractelement <4 x i32> %y, i32 3
@@ -47,12 +47,12 @@ entry:
; CHECK-NOT: store
%a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
- %tmp1 = load i32* %a.tmp1
+ %tmp1 = load i32, i32* %a.tmp1
%a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
- %tmp2 = load i32* %a.tmp2
+ %tmp2 = load i32, i32* %a.tmp2
%a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
%a.tmp3.cast = bitcast i32* %a.tmp3 to <2 x i32>*
- %tmp3.vec = load <2 x i32>* %a.tmp3.cast
+ %tmp3.vec = load <2 x i32>, <2 x i32>* %a.tmp3.cast
%tmp3 = extractelement <2 x i32> %tmp3.vec, i32 0
; CHECK-NOT: load
; CHECK: %[[extract1:.*]] = extractelement <4 x i32> %x, i32 2
@@ -87,11 +87,11 @@ entry:
%a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
%a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.tmp1.cast, i8 -1, i32 4, i32 1, i1 false)
- %tmp1 = load i32* %a.tmp1
+ %tmp1 = load i32, i32* %a.tmp1
%a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
- %tmp2 = load i32* %a.tmp2
+ %tmp2 = load i32, i32* %a.tmp2
%a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
- %tmp3 = load i32* %a.tmp3
+ %tmp3 = load i32, i32* %a.tmp3
; CHECK-NOT: load
; CHECK: %[[insert:.*]] = insertelement <4 x i32> %x, i32 -1, i32 2
; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
@@ -128,15 +128,15 @@ entry:
%z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
%z.tmp1.cast = bitcast i32* %z.tmp1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.tmp1.cast, i8* %z.tmp1.cast, i32 4, i32 1, i1 false)
- %tmp1 = load i32* %a.tmp1
+ %tmp1 = load i32, i32* %a.tmp1
%a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
- %tmp2 = load i32* %a.tmp2
+ %tmp2 = load i32, i32* %a.tmp2
%a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
- %tmp3 = load i32* %a.tmp3
+ %tmp3 = load i32, i32* %a.tmp3
; CHECK-NOT: memcpy
-; CHECK: %[[load:.*]] = load <4 x i32>* %z
+; CHECK: %[[load:.*]] = load <4 x i32>, <4 x i32>* %z
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
-; CHECK-NEXT: %[[element_load:.*]] = load i32* %[[gep]]
+; CHECK-NEXT: %[[element_load:.*]] = load i32, i32* %[[gep]]
; CHECK-NEXT: %[[insert:.*]] = insertelement <4 x i32> %x, i32 %[[element_load]], i32 2
; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
; CHECK-NEXT: extractelement <4 x i32> %[[load]], i32 3
@@ -175,15 +175,15 @@ entry:
%z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %z, i16 0, i16 2
%z.tmp1.cast = bitcast i32 addrspace(1)* %z.tmp1 to i8 addrspace(1)*
call void @llvm.memcpy.p0i8.p1i8.i32(i8* %a.tmp1.cast, i8 addrspace(1)* %z.tmp1.cast, i32 4, i32 1, i1 false)
- %tmp1 = load i32* %a.tmp1
+ %tmp1 = load i32, i32* %a.tmp1
%a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
- %tmp2 = load i32* %a.tmp2
+ %tmp2 = load i32, i32* %a.tmp2
%a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
- %tmp3 = load i32* %a.tmp3
+ %tmp3 = load i32, i32* %a.tmp3
; CHECK-NOT: memcpy
-; CHECK: %[[load:.*]] = load <4 x i32> addrspace(1)* %z
+; CHECK: %[[load:.*]] = load <4 x i32>, <4 x i32> addrspace(1)* %z
; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %z, i64 0, i64 2
-; CHECK-NEXT: %[[element_load:.*]] = load i32 addrspace(1)* %[[gep]]
+; CHECK-NEXT: %[[element_load:.*]] = load i32, i32 addrspace(1)* %[[gep]]
; CHECK-NEXT: %[[insert:.*]] = insertelement <4 x i32> %x, i32 %[[element_load]], i32 2
; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
; CHECK-NEXT: extractelement <4 x i32> %[[load]], i32 3
@@ -221,11 +221,11 @@ entry:
%z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
%z.tmp1.cast = bitcast i32* %z.tmp1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %z.tmp1.cast, i8* %a.tmp1.cast, i32 4, i32 1, i1 false)
- %tmp1 = load i32* %a.tmp1
+ %tmp1 = load i32, i32* %a.tmp1
%a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
- %tmp2 = load i32* %a.tmp2
+ %tmp2 = load i32, i32* %a.tmp2
%a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
- %tmp3 = load i32* %a.tmp3
+ %tmp3 = load i32, i32* %a.tmp3
; CHECK-NOT: memcpy
; CHECK: %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
; CHECK-NEXT: %[[extract:.*]] = extractelement <4 x i32> %y, i32 2
@@ -257,7 +257,7 @@ define i64 @test6(<4 x i64> %x, <4 x i64> %y, i64 %n) {
store <4 x i64> %y, <4 x i64>* %p1
; CHECK: store <4 x i64> %y,
%addr = getelementptr inbounds { <4 x i64>, <4 x i64> }, { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0, i64 %n
- %res = load i64* %addr, align 4
+ %res = load i64, i64* %addr, align 4
ret i64 %res
}
@@ -287,7 +287,7 @@ entry:
store i32 3, i32* %a.gep3
; CHECK-NEXT: insertelement <4 x i32>
- %ret = load <4 x i32>* %a
+ %ret = load <4 x i32>, <4 x i32>* %a
ret <4 x i32> %ret
; CHECK-NEXT: ret <4 x i32>
@@ -303,18 +303,18 @@ entry:
%a.gep0 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 0
%a.cast0 = bitcast i32* %a.gep0 to <2 x i32>*
- %first = load <2 x i32>* %a.cast0
+ %first = load <2 x i32>, <2 x i32>* %a.cast0
; CHECK-NOT: load
; CHECK: %[[extract1:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
%a.gep1 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 1
%a.cast1 = bitcast i32* %a.gep1 to <2 x i32>*
- %second = load <2 x i32>* %a.cast1
+ %second = load <2 x i32>, <2 x i32>* %a.cast1
; CHECK-NEXT: %[[extract2:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 1, i32 2>
%a.gep2 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 2
%a.cast2 = bitcast i32* %a.gep2 to <2 x i32>*
- %third = load <2 x i32>* %a.cast2
+ %third = load <2 x i32>, <2 x i32>* %a.cast2
; CHECK-NEXT: %[[extract3:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp = shufflevector <2 x i32> %first, <2 x i32> %second, <2 x i32> <i32 0, i32 2>
@@ -355,7 +355,7 @@ entry:
call void @llvm.memset.p0i8.i32(i8* %a.cast3, i8 7, i32 4, i32 0, i1 false)
; CHECK-NEXT: insertelement <4 x float>
- %ret = load <4 x float>* %a
+ %ret = load <4 x float>, <4 x float>* %a
ret <4 x float> %ret
; CHECK-NEXT: ret <4 x float>
@@ -371,7 +371,7 @@ entry:
%a.cast0 = bitcast float* %a.gep0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast0, i8* %x, i32 8, i32 0, i1 false)
; CHECK: %[[xptr:.*]] = bitcast i8* %x to <2 x float>*
-; CHECK-NEXT: %[[x:.*]] = load <2 x float>* %[[xptr]]
+; CHECK-NEXT: %[[x:.*]] = load <2 x float>, <2 x float>* %[[xptr]]
; CHECK-NEXT: %[[expand_x:.*]] = shufflevector <2 x float> %[[x]], <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
; CHECK-NEXT: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>
@@ -379,7 +379,7 @@ entry:
%a.cast1 = bitcast float* %a.gep1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast1, i8* %y, i32 8, i32 0, i1 false)
; CHECK-NEXT: %[[yptr:.*]] = bitcast i8* %y to <2 x float>*
-; CHECK-NEXT: %[[y:.*]] = load <2 x float>* %[[yptr]]
+; CHECK-NEXT: %[[y:.*]] = load <2 x float>, <2 x float>* %[[yptr]]
; CHECK-NEXT: %[[expand_y:.*]] = shufflevector <2 x float> %[[y]], <2 x float> undef, <4 x i32> <i32 undef, i32 0, i32 1, i32 undef>
; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
@@ -387,7 +387,7 @@ entry:
%a.cast2 = bitcast float* %a.gep2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast2, i8* %z, i32 8, i32 0, i1 false)
; CHECK-NEXT: %[[zptr:.*]] = bitcast i8* %z to <2 x float>*
-; CHECK-NEXT: %[[z:.*]] = load <2 x float>* %[[zptr]]
+; CHECK-NEXT: %[[z:.*]] = load <2 x float>, <2 x float>* %[[zptr]]
; CHECK-NEXT: %[[expand_z:.*]] = shufflevector <2 x float> %[[z]], <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
@@ -395,7 +395,7 @@ entry:
%a.cast3 = bitcast float* %a.gep3 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast3, i8* %f, i32 4, i32 0, i1 false)
; CHECK-NEXT: %[[fptr:.*]] = bitcast i8* %f to float*
-; CHECK-NEXT: %[[f:.*]] = load float* %[[fptr]]
+; CHECK-NEXT: %[[f:.*]] = load float, float* %[[fptr]]
; CHECK-NEXT: %[[insert_f:.*]] = insertelement <4 x float>
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %out, i8* %a.cast2, i32 8, i32 0, i1 false)
@@ -403,7 +403,7 @@ entry:
; CHECK-NEXT: %[[extract_out:.*]] = shufflevector <4 x float> %[[insert_f]], <4 x float> undef, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: store <2 x float> %[[extract_out]], <2 x float>* %[[outptr]]
- %ret = load <4 x float>* %a
+ %ret = load <4 x float>, <4 x float>* %a
ret <4 x float> %ret
; CHECK-NEXT: ret <4 x float> %[[insert_f]]
@@ -419,7 +419,7 @@ entry:
store <3 x i8> undef, <3 x i8>* %retval, align 4
%cast = bitcast <3 x i8>* %retval to i32*
- %load = load i32* %cast, align 4
+ %load = load i32, i32* %cast, align 4
ret i32 %load
; CHECK: ret i32
}
@@ -437,7 +437,7 @@ entry:
; CHECK-NOT: store
%cast = bitcast i32* %a to <2 x i8>*
- %vec = load <2 x i8>* %cast
+ %vec = load <2 x i8>, <2 x i8>* %cast
; CHECK-NOT: load
ret <2 x i8> %vec
@@ -459,7 +459,7 @@ entry:
store <2 x i8> %x, <2 x i8>* %cast
; CHECK-NOT: store
- %int = load i32* %a
+ %int = load i32, i32* %a
; CHECK-NOT: load
ret i32 %int
@@ -484,11 +484,11 @@ entry:
; CHECK-NOT: store
%a.tmp1 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 0, i64 1
- %tmp1 = load i32* %a.tmp1
+ %tmp1 = load i32, i32* %a.tmp1
%a.tmp2 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 1
- %tmp2 = load i32* %a.tmp2
+ %tmp2 = load i32, i32* %a.tmp2
%a.tmp3 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 0
- %tmp3 = load i32* %a.tmp3
+ %tmp3 = load i32, i32* %a.tmp3
; CHECK-NOT: load
; CHECK: extractelement <2 x i32> %x, i32 1
; CHECK-NEXT: extractelement <2 x i32> %y, i32 1
@@ -515,9 +515,9 @@ entry:
store <2 x i32> %x, <2 x i32>* %a.vec
; CHECK-NOT: store
- %tmp1 = load i32* %a.i32
+ %tmp1 = load i32, i32* %a.i32
%a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
- %tmp2 = load i32* %a.tmp2
+ %tmp2 = load i32, i32* %a.tmp2
; CHECK-NOT: load
; CHECK: extractelement <2 x i32> %x, i32 0
; CHECK-NEXT: extractelement <2 x i32> %x, i32 1
@@ -545,7 +545,7 @@ entry:
; CHECK: %[[V1:.*]] = insertelement <2 x i32> undef, i32 %x, i32 0
; CHECK-NEXT: %[[V2:.*]] = insertelement <2 x i32> %[[V1]], i32 %y, i32 1
- %result = load <2 x i32>* %a.vec
+ %result = load <2 x i32>, <2 x i32>* %a.vec
; CHECK-NOT: load
ret <2 x i32> %result
@@ -570,7 +570,7 @@ entry:
; CHECK: %[[V1:.*]] = bitcast <4 x i16> %x to <2 x i32>
; CHECK-NEXT: %[[V2:.*]] = insertelement <2 x i32> %[[V1]], i32 %y, i32 1
- %result = load <2 x i32>* %a.vec1
+ %result = load <2 x i32>, <2 x i32>* %a.vec1
; CHECK-NOT: load
ret <2 x i32> %result
@@ -598,7 +598,7 @@ entry:
; CHECK-NEXT: %[[V3:.*]] = select <4 x i1> <i1 false, i1 false, i1 true, i1 true>, <4 x i16> %[[V2]], <4 x i16> %x
; CHECK-NEXT: %[[V4:.*]] = bitcast <4 x i16> %[[V3]] to <2 x float>
- %result = load <2 x float>* %a.vec1
+ %result = load <2 x float>, <2 x float>* %a.vec1
; CHECK-NOT: load
ret <2 x float> %result
@@ -616,7 +616,7 @@ define <4 x float> @test12() {
%cast2 = bitcast <3 x i32>* %a to <3 x float>*
%cast3 = bitcast <3 x float>* %cast2 to <4 x float>*
- %vec = load <4 x float>* %cast3
+ %vec = load <4 x float>, <4 x float>* %cast3
; CHECK-NOT: load
; CHECK: %[[ret:.*]] = bitcast <4 x i32> undef to <4 x float>
diff --git a/llvm/test/Transforms/SROA/vectors-of-pointers.ll b/llvm/test/Transforms/SROA/vectors-of-pointers.ll
index 7e995b9e447..ff09e959896 100644
--- a/llvm/test/Transforms/SROA/vectors-of-pointers.ll
+++ b/llvm/test/Transforms/SROA/vectors-of-pointers.ll
@@ -20,6 +20,6 @@ if.then.i.i.i.i.i237:
unreachable
bb0.exit257:
- %0 = load <2 x i32*>* %Args.i, align 16
+ %0 = load <2 x i32*>, <2 x i32*>* %Args.i, align 16
unreachable
}
diff --git a/llvm/test/Transforms/SampleProfile/branch.ll b/llvm/test/Transforms/SampleProfile/branch.ll
index ef39093fdf2..13e362de7ca 100644
--- a/llvm/test/Transforms/SampleProfile/branch.ll
+++ b/llvm/test/Transforms/SampleProfile/branch.ll
@@ -41,7 +41,7 @@ entry:
if.end: ; preds = %entry
%arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1, !dbg !30
- %0 = load i8** %arrayidx, align 8, !dbg !30, !tbaa !31
+ %0 = load i8*, i8** %arrayidx, align 8, !dbg !30, !tbaa !31
%call = tail call i32 @atoi(i8* %0) #4, !dbg !30
tail call void @llvm.dbg.value(metadata i32 %call, i64 0, metadata !17, metadata !{}), !dbg !30
%cmp1 = icmp sgt i32 %call, 100, !dbg !35
diff --git a/llvm/test/Transforms/SampleProfile/calls.ll b/llvm/test/Transforms/SampleProfile/calls.ll
index d56660937bc..3194e62f494 100644
--- a/llvm/test/Transforms/SampleProfile/calls.ll
+++ b/llvm/test/Transforms/SampleProfile/calls.ll
@@ -30,8 +30,8 @@ entry:
%y.addr = alloca i32, align 4
store i32 %x, i32* %x.addr, align 4
store i32 %y, i32* %y.addr, align 4
- %0 = load i32* %x.addr, align 4, !dbg !11
- %1 = load i32* %y.addr, align 4, !dbg !11
+ %0 = load i32, i32* %x.addr, align 4, !dbg !11
+ %1 = load i32, i32* %y.addr, align 4, !dbg !11
%add = add nsw i32 %0, %1, !dbg !11
ret i32 %add, !dbg !11
}
@@ -47,7 +47,7 @@ entry:
br label %while.cond, !dbg !13
while.cond: ; preds = %if.end, %entry
- %0 = load i32* %i, align 4, !dbg !14
+ %0 = load i32, i32* %i, align 4, !dbg !14
%inc = add nsw i32 %0, 1, !dbg !14
store i32 %inc, i32* %i, align 4, !dbg !14
%cmp = icmp slt i32 %0, 400000000, !dbg !14
@@ -56,7 +56,7 @@ while.cond: ; preds = %if.end, %entry
; CHECK: edge while.cond -> while.end probability is 1 / 5392 = 0.018546%
while.body: ; preds = %while.cond
- %1 = load i32* %i, align 4, !dbg !16
+ %1 = load i32, i32* %i, align 4, !dbg !16
%cmp1 = icmp ne i32 %1, 100, !dbg !16
br i1 %cmp1, label %if.then, label %if.else, !dbg !16
; Without discriminator information, the profiler used to think that
@@ -68,8 +68,8 @@ while.body: ; preds = %while.cond
if.then: ; preds = %while.body
- %2 = load i32* %i, align 4, !dbg !18
- %3 = load i32* %s, align 4, !dbg !18
+ %2 = load i32, i32* %i, align 4, !dbg !18
+ %3 = load i32, i32* %s, align 4, !dbg !18
%call = call i32 @_Z3sumii(i32 %2, i32 %3), !dbg !18
store i32 %call, i32* %s, align 4, !dbg !18
br label %if.end, !dbg !18
@@ -82,7 +82,7 @@ if.end: ; preds = %if.else, %if.then
br label %while.cond, !dbg !22
while.end: ; preds = %while.cond
- %4 = load i32* %s, align 4, !dbg !24
+ %4 = load i32, i32* %s, align 4, !dbg !24
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i32 %4), !dbg !24
ret i32 0, !dbg !25
}
diff --git a/llvm/test/Transforms/SampleProfile/discriminator.ll b/llvm/test/Transforms/SampleProfile/discriminator.ll
index cafc69d1492..704a407fcf6 100644
--- a/llvm/test/Transforms/SampleProfile/discriminator.ll
+++ b/llvm/test/Transforms/SampleProfile/discriminator.ll
@@ -31,33 +31,33 @@ entry:
br label %while.cond, !dbg !11
while.cond: ; preds = %if.end, %entry
- %0 = load i32* %i.addr, align 4, !dbg !12
+ %0 = load i32, i32* %i.addr, align 4, !dbg !12
%cmp = icmp slt i32 %0, 100, !dbg !12
br i1 %cmp, label %while.body, label %while.end, !dbg !12
; CHECK: edge while.cond -> while.body probability is 100 / 101 = 99.0099% [HOT edge]
; CHECK: edge while.cond -> while.end probability is 1 / 101 = 0.990099%
while.body: ; preds = %while.cond
- %1 = load i32* %i.addr, align 4, !dbg !14
+ %1 = load i32, i32* %i.addr, align 4, !dbg !14
%cmp1 = icmp slt i32 %1, 50, !dbg !14
br i1 %cmp1, label %if.then, label %if.end, !dbg !14
; CHECK: edge while.body -> if.then probability is 5 / 100 = 5%
; CHECK: edge while.body -> if.end probability is 95 / 100 = 95% [HOT edge]
if.then: ; preds = %while.body
- %2 = load i32* %x, align 4, !dbg !17
+ %2 = load i32, i32* %x, align 4, !dbg !17
%dec = add nsw i32 %2, -1, !dbg !17
store i32 %dec, i32* %x, align 4, !dbg !17
br label %if.end, !dbg !17
if.end: ; preds = %if.then, %while.body
- %3 = load i32* %i.addr, align 4, !dbg !19
+ %3 = load i32, i32* %i.addr, align 4, !dbg !19
%inc = add nsw i32 %3, 1, !dbg !19
store i32 %inc, i32* %i.addr, align 4, !dbg !19
br label %while.cond, !dbg !20
while.end: ; preds = %while.cond
- %4 = load i32* %x, align 4, !dbg !21
+ %4 = load i32, i32* %x, align 4, !dbg !21
ret i32 %4, !dbg !21
}
diff --git a/llvm/test/Transforms/SampleProfile/propagate.ll b/llvm/test/Transforms/SampleProfile/propagate.ll
index 594645f7f2b..520ab04ab3c 100644
--- a/llvm/test/Transforms/SampleProfile/propagate.ll
+++ b/llvm/test/Transforms/SampleProfile/propagate.ll
@@ -51,14 +51,14 @@ entry:
store i32 %x, i32* %x.addr, align 4
store i32 %y, i32* %y.addr, align 4
store i64 %N, i64* %N.addr, align 8
- %0 = load i32* %x.addr, align 4, !dbg !11
- %1 = load i32* %y.addr, align 4, !dbg !11
+ %0 = load i32, i32* %x.addr, align 4, !dbg !11
+ %1 = load i32, i32* %y.addr, align 4, !dbg !11
%cmp = icmp slt i32 %0, %1, !dbg !11
br i1 %cmp, label %if.then, label %if.else, !dbg !11
if.then: ; preds = %entry
- %2 = load i32* %y.addr, align 4, !dbg !13
- %3 = load i32* %x.addr, align 4, !dbg !13
+ %2 = load i32, i32* %y.addr, align 4, !dbg !13
+ %3 = load i32, i32* %x.addr, align 4, !dbg !13
%sub = sub nsw i32 %2, %3, !dbg !13
%conv = sext i32 %sub to i64, !dbg !13
store i64 %conv, i64* %retval, !dbg !13
@@ -69,16 +69,16 @@ if.else: ; preds = %entry
br label %for.cond, !dbg !15
for.cond: ; preds = %for.inc16, %if.else
- %4 = load i64* %i, align 8, !dbg !15
- %5 = load i64* %N.addr, align 8, !dbg !15
+ %4 = load i64, i64* %i, align 8, !dbg !15
+ %5 = load i64, i64* %N.addr, align 8, !dbg !15
%cmp1 = icmp slt i64 %4, %5, !dbg !15
br i1 %cmp1, label %for.body, label %for.end18, !dbg !15
; CHECK: edge for.cond -> for.body probability is 10 / 11 = 90.9091% [HOT edge]
; CHECK: edge for.cond -> for.end18 probability is 1 / 11 = 9.09091%
for.body: ; preds = %for.cond
- %6 = load i64* %i, align 8, !dbg !18
- %7 = load i64* %N.addr, align 8, !dbg !18
+ %6 = load i64, i64* %i, align 8, !dbg !18
+ %7 = load i64, i64* %N.addr, align 8, !dbg !18
%div = sdiv i64 %7, 3, !dbg !18
%cmp2 = icmp sgt i64 %6, %div, !dbg !18
br i1 %cmp2, label %if.then3, label %if.end, !dbg !18
@@ -86,14 +86,14 @@ for.body: ; preds = %for.cond
; CHECK: edge for.body -> if.end probability is 4 / 5 = 80%
if.then3: ; preds = %for.body
- %8 = load i32* %x.addr, align 4, !dbg !21
+ %8 = load i32, i32* %x.addr, align 4, !dbg !21
%dec = add nsw i32 %8, -1, !dbg !21
store i32 %dec, i32* %x.addr, align 4, !dbg !21
br label %if.end, !dbg !21
if.end: ; preds = %if.then3, %for.body
- %9 = load i64* %i, align 8, !dbg !22
- %10 = load i64* %N.addr, align 8, !dbg !22
+ %9 = load i64, i64* %i, align 8, !dbg !22
+ %10 = load i64, i64* %N.addr, align 8, !dbg !22
%div4 = sdiv i64 %10, 4, !dbg !22
%cmp5 = icmp sgt i64 %9, %div4, !dbg !22
br i1 %cmp5, label %if.then6, label %if.else7, !dbg !22
@@ -101,10 +101,10 @@ if.end: ; preds = %if.then3, %for.body
; CHECK: edge if.end -> if.else7 probability is 6339 / 6342 = 99.9527% [HOT edge]
if.then6: ; preds = %if.end
- %11 = load i32* %y.addr, align 4, !dbg !24
+ %11 = load i32, i32* %y.addr, align 4, !dbg !24
%inc = add nsw i32 %11, 1, !dbg !24
store i32 %inc, i32* %y.addr, align 4, !dbg !24
- %12 = load i32* %x.addr, align 4, !dbg !26
+ %12 = load i32, i32* %x.addr, align 4, !dbg !26
%add = add nsw i32 %12, 3, !dbg !26
store i32 %add, i32* %x.addr, align 4, !dbg !26
br label %if.end15, !dbg !27
@@ -114,26 +114,26 @@ if.else7: ; preds = %if.end
br label %for.cond8, !dbg !28
for.cond8: ; preds = %for.inc, %if.else7
- %13 = load i32* %j, align 4, !dbg !28
+ %13 = load i32, i32* %j, align 4, !dbg !28
%conv9 = zext i32 %13 to i64, !dbg !28
- %14 = load i64* %i, align 8, !dbg !28
+ %14 = load i64, i64* %i, align 8, !dbg !28
%cmp10 = icmp slt i64 %conv9, %14, !dbg !28
br i1 %cmp10, label %for.body11, label %for.end, !dbg !28
; CHECK: edge for.cond8 -> for.body11 probability is 16191 / 16192 = 99.9938% [HOT edge]
; CHECK: edge for.cond8 -> for.end probability is 1 / 16192 = 0.00617589%
for.body11: ; preds = %for.cond8
- %15 = load i32* %j, align 4, !dbg !31
- %16 = load i32* %x.addr, align 4, !dbg !31
+ %15 = load i32, i32* %j, align 4, !dbg !31
+ %16 = load i32, i32* %x.addr, align 4, !dbg !31
%add12 = add i32 %16, %15, !dbg !31
store i32 %add12, i32* %x.addr, align 4, !dbg !31
- %17 = load i32* %y.addr, align 4, !dbg !33
+ %17 = load i32, i32* %y.addr, align 4, !dbg !33
%sub13 = sub nsw i32 %17, 3, !dbg !33
store i32 %sub13, i32* %y.addr, align 4, !dbg !33
br label %for.inc, !dbg !34
for.inc: ; preds = %for.body11
- %18 = load i32* %j, align 4, !dbg !28
+ %18 = load i32, i32* %j, align 4, !dbg !28
%inc14 = add i32 %18, 1, !dbg !28
store i32 %inc14, i32* %j, align 4, !dbg !28
br label %for.cond8, !dbg !28
@@ -145,7 +145,7 @@ if.end15: ; preds = %for.end, %if.then6
br label %for.inc16, !dbg !35
for.inc16: ; preds = %if.end15
- %19 = load i64* %i, align 8, !dbg !15
+ %19 = load i64, i64* %i, align 8, !dbg !15
%inc17 = add nsw i64 %19, 1, !dbg !15
store i64 %inc17, i64* %i, align 8, !dbg !15
br label %for.cond, !dbg !15
@@ -154,15 +154,15 @@ for.end18: ; preds = %for.cond
br label %if.end19
if.end19: ; preds = %for.end18
- %20 = load i32* %y.addr, align 4, !dbg !36
- %21 = load i32* %x.addr, align 4, !dbg !36
+ %20 = load i32, i32* %y.addr, align 4, !dbg !36
+ %21 = load i32, i32* %x.addr, align 4, !dbg !36
%mul = mul nsw i32 %20, %21, !dbg !36
%conv20 = sext i32 %mul to i64, !dbg !36
store i64 %conv20, i64* %retval, !dbg !36
br label %return, !dbg !36
return: ; preds = %if.end19, %if.then
- %22 = load i64* %retval, !dbg !37
+ %22 = load i64, i64* %retval, !dbg !37
ret i64 %22, !dbg !37
}
@@ -177,12 +177,12 @@ entry:
store i32 5678, i32* %x, align 4, !dbg !38
store i32 1234, i32* %y, align 4, !dbg !39
store i64 999999, i64* %N, align 8, !dbg !40
- %0 = load i32* %x, align 4, !dbg !41
- %1 = load i32* %y, align 4, !dbg !41
- %2 = load i64* %N, align 8, !dbg !41
- %3 = load i32* %x, align 4, !dbg !41
- %4 = load i32* %y, align 4, !dbg !41
- %5 = load i64* %N, align 8, !dbg !41
+ %0 = load i32, i32* %x, align 4, !dbg !41
+ %1 = load i32, i32* %y, align 4, !dbg !41
+ %2 = load i64, i64* %N, align 8, !dbg !41
+ %3 = load i32, i32* %x, align 4, !dbg !41
+ %4 = load i32, i32* %y, align 4, !dbg !41
+ %5 = load i64, i64* %N, align 8, !dbg !41
%call = call i64 @_Z3fooiil(i32 %3, i32 %4, i64 %5), !dbg !41
%call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([24 x i8]* @.str, i32 0, i32 0), i32 %0, i32 %1, i64 %2, i64 %call), !dbg !41
ret i32 0, !dbg !42
diff --git a/llvm/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll b/llvm/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll
index e5a6be99b50..336c0a9dfa6 100644
--- a/llvm/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll
+++ b/llvm/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll
@@ -8,6 +8,6 @@ define i32 @test() nounwind {
%Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 0 ; <i32*> [#uses=1]
; Must preserve arrayness!
%Z = getelementptr i32, i32* %Y, i64 1 ; <i32*> [#uses=1]
- %A = load i32* %Z ; <i32> [#uses=1]
+ %A = load i32, i32* %Z ; <i32> [#uses=1]
ret i32 %A
}
diff --git a/llvm/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll b/llvm/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll
index a455e4f0d16..2701fdaea51 100644
--- a/llvm/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll
+++ b/llvm/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll
@@ -12,7 +12,7 @@ define i32 @func(<4 x float> %v0, <4 x float> %v1) nounwind {
%tmp14 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %vsiidx, i32 0, i32 1 ; <<4 x i32>*> [#uses=1]
store <4 x i32> %tmp10, <4 x i32>* %tmp14
%tmp15 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %vsiidx, i32 0, i32 0, i32 4 ; <i32*> [#uses=1]
- %tmp.upgrd.4 = load i32* %tmp15 ; <i32> [#uses=1]
+ %tmp.upgrd.4 = load i32, i32* %tmp15 ; <i32> [#uses=1]
ret i32 %tmp.upgrd.4
}
diff --git a/llvm/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll b/llvm/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll
index 9e73452f1cc..966b17939fe 100644
--- a/llvm/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll
+++ b/llvm/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll
@@ -14,7 +14,7 @@ entry:
%tmp2 = getelementptr %struct.UnionType, %struct.UnionType* %tmp, i32 0, i32 0, i32 0
%tmp13 = getelementptr %struct.UnionType, %struct.UnionType* %p, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp13, i32 8, i32 0, i1 false)
- %tmp5 = load %struct.UnionType** %pointerToUnion
+ %tmp5 = load %struct.UnionType*, %struct.UnionType** %pointerToUnion
%tmp56 = getelementptr %struct.UnionType, %struct.UnionType* %tmp5, i32 0, i32 0, i32 0
%tmp7 = getelementptr %struct.UnionType, %struct.UnionType* %tmp, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp56, i8* %tmp7, i32 8, i32 0, i1 false)
diff --git a/llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll b/llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll
index 52bd2c7b6e3..2de2f6717b5 100644
--- a/llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll
+++ b/llvm/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll
@@ -11,20 +11,20 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i16 %b, i16* %b_addr
%tmp1 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0 ; <i16*> [#uses=1]
- %tmp2 = load i16* %b_addr, align 2 ; <i16> [#uses=1]
+ %tmp2 = load i16, i16* %b_addr, align 2 ; <i16> [#uses=1]
store i16 %tmp2, i16* %tmp1, align 2
%tmp3 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0 ; <i16*> [#uses=1]
%tmp34 = bitcast i16* %tmp3 to [2 x i1]* ; <[2 x i1]*> [#uses=1]
%tmp5 = getelementptr [2 x i1], [2 x i1]* %tmp34, i32 0, i32 1 ; <i1*> [#uses=1]
- %tmp6 = load i1* %tmp5, align 1 ; <i1> [#uses=1]
+ %tmp6 = load i1, i1* %tmp5, align 1 ; <i1> [#uses=1]
%tmp67 = zext i1 %tmp6 to i32 ; <i32> [#uses=1]
store i32 %tmp67, i32* %tmp, align 4
- %tmp8 = load i32* %tmp, align 4 ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %tmp, align 4 ; <i32> [#uses=1]
store i32 %tmp8, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval9 = load i32* %retval ; <i32> [#uses=1]
+ %retval9 = load i32, i32* %retval ; <i32> [#uses=1]
%retval910 = trunc i32 %retval9 to i1 ; <i1> [#uses=1]
ret i1 %retval910
}
diff --git a/llvm/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll b/llvm/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll
index 343fa1da98f..99366b36442 100644
--- a/llvm/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll
+++ b/llvm/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll
@@ -13,7 +13,7 @@ entry:
%tmp16 = getelementptr [1 x %struct.T], [1 x %struct.T]* %s, i32 0, i32 0 ; <%struct.T*> [#uses=1]
%tmp17 = getelementptr %struct.T, %struct.T* %tmp16, i32 0, i32 1 ; <[3 x i8]*> [#uses=1]
%tmp1718 = bitcast [3 x i8]* %tmp17 to i32* ; <i32*> [#uses=1]
- %tmp19 = load i32* %tmp1718, align 4 ; <i32> [#uses=1]
+ %tmp19 = load i32, i32* %tmp1718, align 4 ; <i32> [#uses=1]
%mask = and i32 %tmp19, 16777215 ; <i32> [#uses=2]
%mask2324 = trunc i32 %mask to i8 ; <i8> [#uses=1]
ret i8 %mask2324
diff --git a/llvm/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll b/llvm/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll
index d66070b4eae..f37b6529a54 100644
--- a/llvm/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll
+++ b/llvm/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll
@@ -11,6 +11,6 @@ entry:
%tmp7 = getelementptr %struct..0anon, %struct..0anon* %c, i32 0, i32 0 ; <<1 x i64>*> [#uses=1]
%tmp78 = bitcast <1 x i64>* %tmp7 to [2 x i32]* ; <[2 x i32]*> [#uses=1]
%tmp9 = getelementptr [2 x i32], [2 x i32]* %tmp78, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp10 = load i32* %tmp9, align 4 ; <i32> [#uses=0]
+ %tmp10 = load i32, i32* %tmp9, align 4 ; <i32> [#uses=0]
unreachable
}
diff --git a/llvm/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll b/llvm/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll
index ad4918d2927..d1f33121174 100644
--- a/llvm/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll
+++ b/llvm/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll
@@ -15,7 +15,7 @@ define i32 @foo() {
store { i32, i32 } %res2, { i32, i32 }* %target
; Actually use %target, so it doesn't get removed altogether
%ptr = getelementptr { i32, i32 }, { i32, i32 }* %target, i32 0, i32 0
- %val = load i32* %ptr
+ %val = load i32, i32* %ptr
ret i32 %val
}
@@ -28,6 +28,6 @@ define i32 @bar() {
store [ 2 x i32 ] %res2, [ 2 x i32 ]* %target
; Actually use %target, so it doesn't get removed altogether
%ptr = getelementptr [ 2 x i32 ], [ 2 x i32 ]* %target, i32 0, i32 0
- %val = load i32* %ptr
+ %val = load i32, i32* %ptr
ret i32 %val
}
diff --git a/llvm/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll b/llvm/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll
index 3ddb67d1953..c0ff25f3541 100644
--- a/llvm/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll
+++ b/llvm/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll
@@ -14,7 +14,7 @@ entry:
%s2 = bitcast %struct.x* %s to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %r1, i8* %s2, i32 12, i32 8, i1 false)
%1 = getelementptr %struct.x, %struct.x* %r, i32 0, i32 0, i32 1
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
ret i32 %2
}
diff --git a/llvm/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll b/llvm/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll
index e60a2d0ff53..f0af1caa461 100644
--- a/llvm/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll
+++ b/llvm/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll
@@ -11,6 +11,6 @@ define i32 @f(i32 %x, i32 %y) {
store i32 %x, i32* %cast
%second = getelementptr %pair, %pair* %instance, i32 0, i32 1
store i32 %y, i32* %second
- %v = load i32* %cast
+ %v = load i32, i32* %cast
ret i32 %v
}
diff --git a/llvm/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll b/llvm/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll
index 67228a7a3ce..56375ffe793 100644
--- a/llvm/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll
+++ b/llvm/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll
@@ -9,7 +9,7 @@ entry:
%tmp = alloca { i64, i64 }, align 8 ; <{ i64, i64 }*> [#uses=2]
store { i64, i64 } %0, { i64, i64 }* %tmp
%1 = bitcast { i64, i64 }* %tmp to %struct.anon* ; <%struct.anon*> [#uses=1]
- %2 = load %struct.anon* %1, align 8 ; <%struct.anon> [#uses=1]
+ %2 = load %struct.anon, %struct.anon* %1, align 8 ; <%struct.anon> [#uses=1]
%tmp3 = extractvalue %struct.anon %2, 0
ret i32 %tmp3
}
diff --git a/llvm/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll b/llvm/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll
index 218d3d57da1..d1cc4244ccf 100644
--- a/llvm/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll
+++ b/llvm/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll
@@ -30,16 +30,16 @@ entry:
store %struct.int16x8x2_t* %dst, %struct.int16x8x2_t** %dst_addr
%2 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__ax, i32 0, i32 0
%3 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %tmp_addr, i32 0, i32 0
- %4 = load <8 x i16>* %3, align 16
+ %4 = load <8 x i16>, <8 x i16>* %3, align 16
store <8 x i16> %4, <8 x i16>* %2, align 16
%5 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__bx, i32 0, i32 0
%6 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %tmp_addr, i32 0, i32 0
- %7 = load <8 x i16>* %6, align 16
+ %7 = load <8 x i16>, <8 x i16>* %6, align 16
store <8 x i16> %7, <8 x i16>* %5, align 16
%8 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__ax, i32 0, i32 0
- %9 = load <8 x i16>* %8, align 16
+ %9 = load <8 x i16>, <8 x i16>* %8, align 16
%10 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__bx, i32 0, i32 0
- %11 = load <8 x i16>* %10, align 16
+ %11 = load <8 x i16>, <8 x i16>* %10, align 16
%12 = getelementptr inbounds %union..0anon, %union..0anon* %__rv, i32 0, i32 0
%13 = bitcast %struct.int16x8x2_t* %12 to %struct.__neon_int16x8x2_t*
%14 = shufflevector <8 x i16> %9, <8 x i16> %11, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
@@ -55,7 +55,7 @@ entry:
%tmp21 = bitcast %struct.int16x8x2_t* %tmp2 to i8*
%21 = bitcast %struct.int16x8x2_t* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp21, i8* %21, i32 32, i32 16, i1 false)
- %22 = load %struct.int16x8x2_t** %dst_addr, align 4
+ %22 = load %struct.int16x8x2_t*, %struct.int16x8x2_t** %dst_addr, align 4
%23 = bitcast %struct.int16x8x2_t* %22 to i8*
%tmp22 = bitcast %struct.int16x8x2_t* %tmp2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %23, i8* %tmp22, i32 32, i32 16, i1 false)
diff --git a/llvm/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll b/llvm/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll
index 1f5a2d8da88..dee27f8e306 100644
--- a/llvm/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll
+++ b/llvm/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll
@@ -21,7 +21,7 @@ entry:
%1 = getelementptr inbounds %struct.Point_3, %struct.Point_3* %tmpcast, i64 0, i32 0
%base.i.i.i = getelementptr inbounds %struct.PointC3, %struct.PointC3* %1, i64 0, i32 0
%arrayidx.i.i.i.i = getelementptr inbounds %struct.array, %struct.array* %base.i.i.i, i64 0, i32 0, i64 0
- %tmp5.i.i = load float* %arrayidx.i.i.i.i, align 4
+ %tmp5.i.i = load float, float* %arrayidx.i.i.i.i, align 4
ret void
}
@@ -35,7 +35,7 @@ entry:
%tmpcast = bitcast {<2 x float>, float}* %ref.tmp2 to float*
%0 = getelementptr {<2 x float>, float}, {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
store <2 x float> zeroinitializer, <2 x float>* %0, align 16
- %tmp5.i.i = load float* %tmpcast, align 4
+ %tmp5.i.i = load float, float* %tmpcast, align 4
ret void
}
@@ -54,8 +54,8 @@ entry:
%0 = getelementptr {<2 x float>, float}, {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
store <2 x float> zeroinitializer, <2 x float>* %0, align 16
store float 1.0, float* %tmpcast2, align 4
- %r1 = load float* %tmpcast, align 4
- %r2 = load float* %tmpcast2, align 4
+ %r1 = load float, float* %tmpcast, align 4
+ %r2 = load float, float* %tmpcast2, align 4
%r = fadd float %r1, %r2
ret float %r
}
@@ -70,6 +70,6 @@ entry:
store { <2 x float>, <2 x float> } {<2 x float> <float 0.0, float 1.0>, <2 x float> <float 2.0, float 3.0>}, { <2 x float>, <2 x float> }* %ai, align 8
%tmpcast = bitcast { <2 x float>, <2 x float> }* %ai to [4 x float]*
%arrayidx = getelementptr inbounds [4 x float], [4 x float]* %tmpcast, i64 0, i64 3
- %f = load float* %arrayidx, align 4
+ %f = load float, float* %arrayidx, align 4
ret float %f
}
diff --git a/llvm/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll b/llvm/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll
index 9e531638ab3..af6d1f36fae 100644
--- a/llvm/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll
+++ b/llvm/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll
@@ -14,7 +14,7 @@ entry:
%a = alloca <4 x float>, align 16
%p = bitcast <4 x float>* %a to i8*
call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 3, i32 16, i1 false)
- %vec = load <4 x float>* %a, align 8
+ %vec = load <4 x float>, <4 x float>* %a, align 8
%val = extractelement <4 x float> %vec, i32 0
ret float %val
}
diff --git a/llvm/test/Transforms/ScalarRepl/2011-09-22-PHISpeculateInvoke.ll b/llvm/test/Transforms/ScalarRepl/2011-09-22-PHISpeculateInvoke.ll
index 5f4d0fc7e1c..9a24662f411 100644
--- a/llvm/test/Transforms/ScalarRepl/2011-09-22-PHISpeculateInvoke.ll
+++ b/llvm/test/Transforms/ScalarRepl/2011-09-22-PHISpeculateInvoke.ll
@@ -25,7 +25,7 @@ else: ; preds = %2
join: ; preds = %then, %else
%storemerge.in = phi i32* [ %retptr2, %else ], [ %retptr1, %then ]
- %storemerge = load i32* %storemerge.in
+ %storemerge = load i32, i32* %storemerge.in
%x3 = call i32 @extern_fn2(i32 %storemerge)
ret void
diff --git a/llvm/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll b/llvm/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll
index af8d55c72e2..51d1d146a90 100644
--- a/llvm/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll
+++ b/llvm/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll
@@ -19,7 +19,7 @@ entry:
%1 = bitcast %struct.S* %ret to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 8, i32 8, i1 false)
%2 = bitcast %struct.S* %retval to double*
- %3 = load double* %2, align 1
+ %3 = load double, double* %2, align 1
ret double %3
}
diff --git a/llvm/test/Transforms/ScalarRepl/AggregatePromote.ll b/llvm/test/Transforms/ScalarRepl/AggregatePromote.ll
index a3386f65224..f6dfdf55346 100644
--- a/llvm/test/Transforms/ScalarRepl/AggregatePromote.ll
+++ b/llvm/test/Transforms/ScalarRepl/AggregatePromote.ll
@@ -10,7 +10,7 @@ define i64 @test1(i64 %X) {
%B = bitcast i64* %A to i32* ; <i32*> [#uses=1]
%C = bitcast i32* %B to i8* ; <i8*> [#uses=1]
store i8 0, i8* %C
- %Y = load i64* %A ; <i64> [#uses=1]
+ %Y = load i64, i64* %A ; <i64> [#uses=1]
ret i64 %Y
}
@@ -21,7 +21,7 @@ define i8 @test2(i64 %X) {
%tmp.1 = getelementptr i32, i32* %tmp.0, i32 1 ; <i32*> [#uses=1]
%tmp.2 = bitcast i32* %tmp.1 to i8* ; <i8*> [#uses=1]
%tmp.3 = getelementptr i8, i8* %tmp.2, i32 3 ; <i8*> [#uses=1]
- %tmp.2.upgrd.1 = load i8* %tmp.3 ; <i8> [#uses=1]
+ %tmp.2.upgrd.1 = load i8, i8* %tmp.3 ; <i8> [#uses=1]
ret i8 %tmp.2.upgrd.1
}
@@ -31,9 +31,9 @@ define i16 @crafty(i64 %X) {
store i64 %X, i64* %tmp.0
%tmp.3 = bitcast { i64 }* %a to [4 x i16]* ; <[4 x i16]*> [#uses=2]
%tmp.4 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 3 ; <i16*> [#uses=1]
- %tmp.5 = load i16* %tmp.4 ; <i16> [#uses=1]
+ %tmp.5 = load i16, i16* %tmp.4 ; <i16> [#uses=1]
%tmp.8 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 2 ; <i16*> [#uses=1]
- %tmp.9 = load i16* %tmp.8 ; <i16> [#uses=1]
+ %tmp.9 = load i16, i16* %tmp.8 ; <i16> [#uses=1]
%tmp.10 = or i16 %tmp.9, %tmp.5 ; <i16> [#uses=1]
ret i16 %tmp.10
}
@@ -43,9 +43,9 @@ define i16 @crafty2(i64 %X) {
store i64 %X, i64* %a
%tmp.3 = bitcast i64* %a to [4 x i16]* ; <[4 x i16]*> [#uses=2]
%tmp.4 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 3 ; <i16*> [#uses=1]
- %tmp.5 = load i16* %tmp.4 ; <i16> [#uses=1]
+ %tmp.5 = load i16, i16* %tmp.4 ; <i16> [#uses=1]
%tmp.8 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 2 ; <i16*> [#uses=1]
- %tmp.9 = load i16* %tmp.8 ; <i16> [#uses=1]
+ %tmp.9 = load i16, i16* %tmp.8 ; <i16> [#uses=1]
%tmp.10 = or i16 %tmp.9, %tmp.5 ; <i16> [#uses=1]
ret i16 %tmp.10
}
diff --git a/llvm/test/Transforms/ScalarRepl/DifferingTypes.ll b/llvm/test/Transforms/ScalarRepl/DifferingTypes.ll
index 933c47f7987..3860f6cd757 100644
--- a/llvm/test/Transforms/ScalarRepl/DifferingTypes.ll
+++ b/llvm/test/Transforms/ScalarRepl/DifferingTypes.ll
@@ -10,7 +10,7 @@ define i32 @testfunc(i32 %i, i8 %j) {
store i32 %i, i32* %I
%P = bitcast i32* %I to i8* ; <i8*> [#uses=1]
store i8 %j, i8* %P
- %t = load i32* %I ; <i32> [#uses=1]
+ %t = load i32, i32* %I ; <i32> [#uses=1]
ret i32 %t
}
diff --git a/llvm/test/Transforms/ScalarRepl/address-space.ll b/llvm/test/Transforms/ScalarRepl/address-space.ll
index d6e3b74c214..b8b90efefc3 100644
--- a/llvm/test/Transforms/ScalarRepl/address-space.ll
+++ b/llvm/test/Transforms/ScalarRepl/address-space.ll
@@ -7,7 +7,7 @@ target triple = "x86_64-apple-darwin10"
%struct.anon = type { [1 x float] }
; CHECK-LABEL: define void @Test(
-; CHECK: load float addrspace(2)*
+; CHECK: load float, float addrspace(2)*
; CHECK-NEXT: fsub float
; CHECK: store float {{.*}}, float addrspace(2)*
define void @Test(%struct.anon addrspace(2)* %pPtr) nounwind {
@@ -19,7 +19,7 @@ entry:
call void @llvm.memcpy.p0i8.p2i8.i64(i8* %tmp1, i8 addrspace(2)* %tmp2, i64 4, i32 4, i1 false)
%tmp3 = getelementptr inbounds %struct.anon, %struct.anon* %s, i32 0, i32 0 ; <[1 x float]*> [#uses=1]
%arrayidx4 = getelementptr inbounds [1 x float], [1 x float]* %tmp3, i32 0, i64 0 ; <float*> [#uses=2]
- %tmp5 = load float* %arrayidx4 ; <float> [#uses=1]
+ %tmp5 = load float, float* %arrayidx4 ; <float> [#uses=1]
%sub = fsub float %tmp5, 5.000000e+00 ; <float> [#uses=1]
store float %sub, float* %arrayidx4
%arrayidx7 = getelementptr inbounds %struct.anon, %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
diff --git a/llvm/test/Transforms/ScalarRepl/arraytest.ll b/llvm/test/Transforms/ScalarRepl/arraytest.ll
index d95d5a13270..486e725fa6a 100644
--- a/llvm/test/Transforms/ScalarRepl/arraytest.ll
+++ b/llvm/test/Transforms/ScalarRepl/arraytest.ll
@@ -5,7 +5,7 @@ define i32 @test() {
%X = alloca [4 x i32] ; <[4 x i32]*> [#uses=1]
%Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 0 ; <i32*> [#uses=2]
store i32 0, i32* %Y
- %Z = load i32* %Y ; <i32> [#uses=1]
+ %Z = load i32, i32* %Y ; <i32> [#uses=1]
ret i32 %Z
}
diff --git a/llvm/test/Transforms/ScalarRepl/badarray.ll b/llvm/test/Transforms/ScalarRepl/badarray.ll
index a0017c472c0..6f5bc95b43b 100644
--- a/llvm/test/Transforms/ScalarRepl/badarray.ll
+++ b/llvm/test/Transforms/ScalarRepl/badarray.ll
@@ -12,7 +12,7 @@ define i32 @test1() {
%X = alloca [4 x i32]
%Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 6 ; <i32*> [#uses=2]
store i32 0, i32* %Y
- %Z = load i32* %Y ; <i32> [#uses=1]
+ %Z = load i32, i32* %Y ; <i32> [#uses=1]
ret i32 %Z
}
@@ -24,7 +24,7 @@ entry:
; CHECK-NOT: = alloca
%yx2.i = alloca float, align 4 ; <float*> [#uses=1]
%yx26.i = bitcast float* %yx2.i to i64* ; <i64*> [#uses=1]
- %0 = load i64* %yx26.i, align 8 ; <i64> [#uses=0]
+ %0 = load i64, i64* %yx26.i, align 8 ; <i64> [#uses=0]
unreachable
}
diff --git a/llvm/test/Transforms/ScalarRepl/basictest.ll b/llvm/test/Transforms/ScalarRepl/basictest.ll
index 7f5d235898f..35d4d3ba86f 100644
--- a/llvm/test/Transforms/ScalarRepl/basictest.ll
+++ b/llvm/test/Transforms/ScalarRepl/basictest.ll
@@ -5,7 +5,7 @@ define i32 @test1() {
%X = alloca { i32, float } ; <{ i32, float }*> [#uses=1]
%Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0 ; <i32*> [#uses=2]
store i32 0, i32* %Y
- %Z = load i32* %Y ; <i32> [#uses=1]
+ %Z = load i32, i32* %Y ; <i32> [#uses=1]
ret i32 %Z
; CHECK-LABEL: @test1(
; CHECK-NOT: alloca
@@ -21,7 +21,7 @@ define i64 @test2(i64 %X) {
br label %L2
L2:
- %Z = load i64* %B ; <i32> [#uses=1]
+ %Z = load i64, i64* %B ; <i32> [#uses=1]
ret i64 %Z
; CHECK-LABEL: @test2(
; CHECK-NOT: alloca
diff --git a/llvm/test/Transforms/ScalarRepl/bitfield-sroa.ll b/llvm/test/Transforms/ScalarRepl/bitfield-sroa.ll
index 07b522b7564..52986b0a49d 100644
--- a/llvm/test/Transforms/ScalarRepl/bitfield-sroa.ll
+++ b/llvm/test/Transforms/ScalarRepl/bitfield-sroa.ll
@@ -9,9 +9,9 @@ define i8 @foo(i64 %A) {
store i64 %A, i64* %tmp59172, align 8
%C = getelementptr %t, %t* %ALL, i32 0, i32 0, i32 1
%D = bitcast i16* %C to i32*
- %E = load i32* %D, align 4
+ %E = load i32, i32* %D, align 4
%F = bitcast %t* %ALL to i8*
- %G = load i8* %F, align 8
+ %G = load i8, i8* %F, align 8
ret i8 %G
}
diff --git a/llvm/test/Transforms/ScalarRepl/copy-aggregate.ll b/llvm/test/Transforms/ScalarRepl/copy-aggregate.ll
index b464947cc3f..97977dbf11f 100644
--- a/llvm/test/Transforms/ScalarRepl/copy-aggregate.ll
+++ b/llvm/test/Transforms/ScalarRepl/copy-aggregate.ll
@@ -12,8 +12,8 @@ define i32 @test1(i64 %V) nounwind {
%A = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 0
%B = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 1
- %a = load i32* %A
- %b = load i32* %B
+ %a = load i32, i32* %A
+ %b = load i32, i32* %B
%c = add i32 %a, %b
ret i32 %c
}
@@ -28,8 +28,8 @@ define float @test2(i128 %V) nounwind {
%A = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 0
%B = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 3
- %a = load float* %A
- %b = load float* %B
+ %a = load float, float* %A
+ %b = load float, float* %B
%c = fadd float %a, %b
ret float %c
}
@@ -46,7 +46,7 @@ define i64 @test3(i32 %a, i32 %b) nounwind {
store i32 %b, i32* %B
%Y = bitcast {{i32,i32}}* %X to i64*
- %Z = load i64* %Y
+ %Z = load i64, i64* %Y
ret i64 %Z
}
@@ -61,7 +61,7 @@ define i128 @test4(float %a, float %b) nounwind {
store float %b, float* %B
%Y = bitcast {[4 x float]}* %X to i128*
- %V = load i128* %Y
+ %V = load i128, i128* %Y
ret i128 %V
}
diff --git a/llvm/test/Transforms/ScalarRepl/crash.ll b/llvm/test/Transforms/ScalarRepl/crash.ll
index f0a50f85417..72e9f090fb6 100644
--- a/llvm/test/Transforms/ScalarRepl/crash.ll
+++ b/llvm/test/Transforms/ScalarRepl/crash.ll
@@ -11,7 +11,7 @@ entry:
unreachable
for.cond: ; preds = %for.cond
- %tmp1.i = load i32** %l_72, align 8
+ %tmp1.i = load i32*, i32** %l_72, align 8
store i32* %tmp1.i, i32** %l_72, align 8
br label %for.cond
@@ -30,7 +30,7 @@ define i32 @test3() {
%X = alloca { [4 x i32] } ; <{ [4 x i32] }*> [#uses=1]
%Y = getelementptr { [4 x i32] }, { [4 x i32] }* %X, i64 0, i32 0, i64 2 ; <i32*> [#uses=2]
store i32 4, i32* %Y
- %Z = load i32* %Y ; <i32> [#uses=1]
+ %Z = load i32, i32* %Y ; <i32> [#uses=1]
ret i32 %Z
}
@@ -102,11 +102,11 @@ bb9131: ; preds = %bb1365
ret void
bb9875: ; preds = %bb1365
%source_ptr9884 = bitcast i8** %source_ptr to i8** ; <i8**> [#uses=1]
- %tmp9885 = load i8** %source_ptr9884 ; <i8*> [#uses=0]
+ %tmp9885 = load i8*, i8** %source_ptr9884 ; <i8*> [#uses=0]
ret void
bb10249: ; preds = %bb1365
%source_ptr10257 = bitcast i8** %source_ptr to i16** ; <i16**> [#uses=1]
- %tmp10258 = load i16** %source_ptr10257 ; <i16*> [#uses=0]
+ %tmp10258 = load i16*, i16** %source_ptr10257 ; <i16*> [#uses=0]
ret void
cond_next10377: ; preds = %bb1365
ret void
@@ -125,9 +125,9 @@ entry:
%this_addr.i = alloca %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"**> [#uses=3]
%tmp = alloca %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>", align 4 ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
store %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp, %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i
- %tmp.i = load %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
+ %tmp.i = load %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*, %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
%tmp.i.upgrd.1 = bitcast %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp.i to %"struct.__gnu_cxx::bitmap_allocator<char>"* ; <%"struct.__gnu_cxx::bitmap_allocator<char>"*> [#uses=0]
- %tmp1.i = load %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
+ %tmp1.i = load %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*, %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
%tmp.i.upgrd.2 = getelementptr %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>", %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp1.i, i32 0, i32 0 ; <%"struct.__gnu_cxx::bitmap_allocator<char>::_Alloc_block"**> [#uses=0]
unreachable
}
@@ -161,7 +161,7 @@ cond_next: ; preds = %cond_true
br i1 false, label %cond_next34, label %cond_next79
cond_next34: ; preds = %cond_next
- %i.2.reload22 = load i32* null ; <i32> [#uses=1]
+ %i.2.reload22 = load i32, i32* null ; <i32> [#uses=1]
%tmp51 = getelementptr %struct.aal_spanbucket_t, %struct.aal_spanbucket_t* %SB, i32 0, i32 2, i32 0, i32 0, i32 %i.2.reload22, i32 1
; <i16*> [#uses=0]
ret void
@@ -221,7 +221,7 @@ entry:
%storetmp.i = bitcast %struct.singlebool* %a to i1* ; <i1*> [#uses=1]
store i1 true, i1* %storetmp.i
%tmp = getelementptr %struct.singlebool, %struct.singlebool* %a, i64 0, i32 0 ; <i8*> [#uses=1]
- %tmp1 = load i8* %tmp ; <i8> [#uses=1]
+ %tmp1 = load i8, i8* %tmp ; <i8> [#uses=1]
ret i8 %tmp1
}
diff --git a/llvm/test/Transforms/ScalarRepl/debuginfo-preserved.ll b/llvm/test/Transforms/ScalarRepl/debuginfo-preserved.ll
index b0c459e21b1..bb83185c4a2 100644
--- a/llvm/test/Transforms/ScalarRepl/debuginfo-preserved.ll
+++ b/llvm/test/Transforms/ScalarRepl/debuginfo-preserved.ll
@@ -21,18 +21,18 @@ entry:
store i32 %b, i32* %b.addr, align 4
call void @llvm.dbg.declare(metadata i32* %b.addr, metadata !8, metadata !{}), !dbg !9
call void @llvm.dbg.declare(metadata i32* %c, metadata !10, metadata !{}), !dbg !12
- %tmp = load i32* %a.addr, align 4, !dbg !13
+ %tmp = load i32, i32* %a.addr, align 4, !dbg !13
store i32 %tmp, i32* %c, align 4, !dbg !13
- %tmp1 = load i32* %a.addr, align 4, !dbg !14
- %tmp2 = load i32* %b.addr, align 4, !dbg !14
+ %tmp1 = load i32, i32* %a.addr, align 4, !dbg !14
+ %tmp2 = load i32, i32* %b.addr, align 4, !dbg !14
%add = add nsw i32 %tmp1, %tmp2, !dbg !14
store i32 %add, i32* %a.addr, align 4, !dbg !14
- %tmp3 = load i32* %c, align 4, !dbg !15
- %tmp4 = load i32* %b.addr, align 4, !dbg !15
+ %tmp3 = load i32, i32* %c, align 4, !dbg !15
+ %tmp4 = load i32, i32* %b.addr, align 4, !dbg !15
%sub = sub nsw i32 %tmp3, %tmp4, !dbg !15
store i32 %sub, i32* %b.addr, align 4, !dbg !15
- %tmp5 = load i32* %a.addr, align 4, !dbg !16
- %tmp6 = load i32* %b.addr, align 4, !dbg !16
+ %tmp5 = load i32, i32* %a.addr, align 4, !dbg !16
+ %tmp6 = load i32, i32* %b.addr, align 4, !dbg !16
%add7 = add nsw i32 %tmp5, %tmp6, !dbg !16
ret i32 %add7, !dbg !16
}
diff --git a/llvm/test/Transforms/ScalarRepl/inline-vector.ll b/llvm/test/Transforms/ScalarRepl/inline-vector.ll
index d118be0705d..5d856c6d8fb 100644
--- a/llvm/test/Transforms/ScalarRepl/inline-vector.ll
+++ b/llvm/test/Transforms/ScalarRepl/inline-vector.ll
@@ -30,20 +30,20 @@ for.body: ; preds = %for.cond
%tmp3 = bitcast %struct.Vector4* %vector to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 16, i32 16, i1 false)
%0 = bitcast %struct.Vector4* %agg.tmp to [2 x i64]*
- %1 = load [2 x i64]* %0, align 16
+ %1 = load [2 x i64], [2 x i64]* %0, align 16
%tmp2.i = extractvalue [2 x i64] %1, 0
%tmp3.i = zext i64 %tmp2.i to i128
%tmp10.i = bitcast i128 %tmp3.i to <4 x float>
%sub.i.i = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %tmp10.i
%2 = bitcast %struct.Vector4* %vector to <4 x float>*
store <4 x float> %sub.i.i, <4 x float>* %2, align 16
- %tmp4 = load i32* %i, align 4
+ %tmp4 = load i32, i32* %i, align 4
%inc = add nsw i32 %tmp4, 1
br label %for.cond
for.end: ; preds = %for.cond
%x = getelementptr inbounds %struct.Vector4, %struct.Vector4* %vector, i32 0, i32 0
- %tmp5 = load float* %x, align 16
+ %tmp5 = load float, float* %x, align 16
%conv = fpext float %tmp5 to double
%call = call i32 (...)* @printf(double %conv) nounwind
ret void
diff --git a/llvm/test/Transforms/ScalarRepl/lifetime.ll b/llvm/test/Transforms/ScalarRepl/lifetime.ll
index b6627dd1f18..c0ddfb58bbd 100644
--- a/llvm/test/Transforms/ScalarRepl/lifetime.ll
+++ b/llvm/test/Transforms/ScalarRepl/lifetime.ll
@@ -30,7 +30,7 @@ define void @test2() {
%B = bitcast i32* %A2 to i8*
store i32 0, i32* %A2
call void @llvm.lifetime.start(i64 -1, i8* %B)
- %C = load i32* %A2
+ %C = load i32, i32* %A2
ret void
; CHECK: ret void
}
@@ -44,7 +44,7 @@ define void @test3() {
%B = bitcast i32* %A2 to i8*
store i32 0, i32* %A2
call void @llvm.lifetime.start(i64 6, i8* %B)
- %C = load i32* %A2
+ %C = load i32, i32* %A2
ret void
; CHECK-NEXT: ret void
}
@@ -58,7 +58,7 @@ define void @test4() {
%B = bitcast i32* %A2 to i8*
store i32 0, i32* %A2
call void @llvm.lifetime.start(i64 1, i8* %B)
- %C = load i32* %A2
+ %C = load i32, i32* %A2
ret void
; CHECK-NEXT: ret void
}
@@ -90,7 +90,7 @@ define void @test5() {
; CHECK: llvm.lifetime{{.*}}i64 1
; CHECK: llvm.lifetime{{.*}}i64 1
; CHECK: llvm.lifetime{{.*}}i64 1
- %C = load i8* %A2
+ %C = load i8, i8* %A2
ret void
}
diff --git a/llvm/test/Transforms/ScalarRepl/load-store-aggregate.ll b/llvm/test/Transforms/ScalarRepl/load-store-aggregate.ll
index f414234db25..88299f3679c 100644
--- a/llvm/test/Transforms/ScalarRepl/load-store-aggregate.ll
+++ b/llvm/test/Transforms/ScalarRepl/load-store-aggregate.ll
@@ -11,11 +11,11 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
define i32 @test(%struct.foo* %P) {
entry:
%L = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
- %V = load %struct.foo* %P
+ %V = load %struct.foo, %struct.foo* %P
store %struct.foo %V, %struct.foo* %L
%tmp4 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %tmp4 ; <i32> [#uses=1]
ret i32 %tmp5
}
@@ -26,6 +26,6 @@ entry:
store i32 %A, i32* %L.0
%L.1 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 1
store i32 %B, i32* %L.1
- %V = load %struct.foo* %L
+ %V = load %struct.foo, %struct.foo* %L
ret %struct.foo %V
}
diff --git a/llvm/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll b/llvm/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll
index 7ab466679a2..e8088c121fb 100644
--- a/llvm/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll
+++ b/llvm/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll
@@ -15,7 +15,7 @@ entry:
%L2 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0 ; <i8*> [#uses=2]
%tmp13 = getelementptr %struct.foo, %struct.foo* %P, i32 0, i32 0 ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i32( i8* %L2, i8* %tmp13, i32 2, i32 1, i1 false)
- %tmp5 = load i8* %L2 ; <i8> [#uses=1]
+ %tmp5 = load i8, i8* %L2 ; <i8> [#uses=1]
%tmp56 = sext i8 %tmp5 to i32 ; <i32> [#uses=1]
ret i32 %tmp56
}
diff --git a/llvm/test/Transforms/ScalarRepl/memset-aggregate.ll b/llvm/test/Transforms/ScalarRepl/memset-aggregate.ll
index 981ace61c1b..98e2dddefe7 100644
--- a/llvm/test/Transforms/ScalarRepl/memset-aggregate.ll
+++ b/llvm/test/Transforms/ScalarRepl/memset-aggregate.ll
@@ -16,7 +16,7 @@ entry:
%tmp13 = bitcast %struct.foo* %P to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %L2, i8* %tmp13, i32 8, i32 4, i1 false)
%tmp4 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %tmp4 ; <i32> [#uses=1]
ret i32 %tmp5
}
@@ -27,7 +27,7 @@ entry:
%L12 = bitcast [4 x %struct.foo]* %L to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i32(i8* %L12, i8 0, i32 32, i32 16, i1 false)
%tmp4 = getelementptr [4 x %struct.foo], [4 x %struct.foo]* %L, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %tmp4 ; <i32> [#uses=1]
ret i32 %tmp5
}
@@ -42,7 +42,7 @@ entry:
%tmp4 = getelementptr %struct.bar, %struct.bar* %B, i32 0, i32 2 ; <double*> [#uses=1]
store double 1.000000e+01, double* %tmp4
%tmp6 = getelementptr %struct.bar, %struct.bar* %B, i32 0, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp7 = load i32* %tmp6 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %tmp6 ; <i32> [#uses=1]
ret i32 %tmp7
}
@@ -58,7 +58,7 @@ entry:
%2 = bitcast i32* %1 to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i32(i8* %2, i8 2, i32 12, i32 4, i1 false)
%3 = getelementptr %struct.f, %struct.f* %A, i32 0, i32 2 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 8 ; <i32> [#uses=1]
+ %4 = load i32, i32* %3, align 8 ; <i32> [#uses=1]
%retval12 = trunc i32 %4 to i16 ; <i16> [#uses=1]
ret i16 %retval12
}
diff --git a/llvm/test/Transforms/ScalarRepl/nonzero-first-index.ll b/llvm/test/Transforms/ScalarRepl/nonzero-first-index.ll
index 5de6eca66ae..da757b08d45 100644
--- a/llvm/test/Transforms/ScalarRepl/nonzero-first-index.ll
+++ b/llvm/test/Transforms/ScalarRepl/nonzero-first-index.ll
@@ -14,7 +14,7 @@ define i32 @test1() {
%A = alloca %nested
%B = getelementptr %nested, %nested* %A, i32 0, i32 1, i32 0
%C = getelementptr i32, i32* %B, i32 2
- %D = load i32* %C
+ %D = load i32, i32* %C
ret i32 %D
}
@@ -25,7 +25,7 @@ define i32 @test2() {
%A = alloca %nested
%B = getelementptr %nested, %nested* %A, i32 0, i32 1, i32 0
%C = getelementptr i32, i32* %B, i32 4
- %D = load i32* %C
+ %D = load i32, i32* %C
ret i32 %D
}
@@ -37,7 +37,7 @@ define i32 @test3() {
%A = alloca %nested
%B = bitcast %nested* %A to i32*
%C = getelementptr i32, i32* %B, i32 2
- %D = load i32* %C
+ %D = load i32, i32* %C
ret i32 %D
}
@@ -48,6 +48,6 @@ define i32 @test4() {
%A = alloca %nested
%B = bitcast %nested* %A to i32*
%C = getelementptr i32, i32* %B, i32 -1
- %D = load i32* %C
+ %D = load i32, i32* %C
ret i32 %D
}
diff --git a/llvm/test/Transforms/ScalarRepl/not-a-vector.ll b/llvm/test/Transforms/ScalarRepl/not-a-vector.ll
index 8a4f2d42392..04c1f93617b 100644
--- a/llvm/test/Transforms/ScalarRepl/not-a-vector.ll
+++ b/llvm/test/Transforms/ScalarRepl/not-a-vector.ll
@@ -13,7 +13,7 @@ define double @test(double %A, double %B) {
store double %B, double* %E
%F = getelementptr double, double* %C, i32 4
- %G = load double* %F
+ %G = load double, double* %F
ret double %G
}
diff --git a/llvm/test/Transforms/ScalarRepl/phi-cycle.ll b/llvm/test/Transforms/ScalarRepl/phi-cycle.ll
index 13b7d4ed29f..dd451b73cbe 100644
--- a/llvm/test/Transforms/ScalarRepl/phi-cycle.ll
+++ b/llvm/test/Transforms/ScalarRepl/phi-cycle.ll
@@ -69,7 +69,7 @@ while.cond.backedge.i: ; preds = %if.end.i, %while.bo
; CHECK-NOT: load
; CHECK: %call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i64 0, i64 0), i32 %tmp) [[NUW:#[0-9]+]]
func.exit: ; preds = %while.body.i.func.exit_crit_edge, %while.cond.i.func.exit_crit_edge
- %tmp3 = load i32* %x.i, align 4
+ %tmp3 = load i32, i32* %x.i, align 4
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([6 x i8]* @.str, i64 0, i64 0), i32 %tmp3) nounwind
ret i32 0
}
diff --git a/llvm/test/Transforms/ScalarRepl/phi-select.ll b/llvm/test/Transforms/ScalarRepl/phi-select.ll
index a17a5b126d1..a6c7135a492 100644
--- a/llvm/test/Transforms/ScalarRepl/phi-select.ll
+++ b/llvm/test/Transforms/ScalarRepl/phi-select.ll
@@ -20,7 +20,7 @@ entry:
%2 = icmp eq i32 %x, 0 ; <i1> [#uses=1]
%p.0 = select i1 %2, %struct.X* %b, %struct.X* %a ; <%struct.X*> [#uses=1]
%3 = getelementptr inbounds %struct.X, %struct.X* %p.0, i64 0, i32 0 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 8 ; <i32> [#uses=1]
+ %4 = load i32, i32* %3, align 8 ; <i32> [#uses=1]
ret i32 %4
}
@@ -39,7 +39,7 @@ T:
br label %F
F:
%X = phi i32* [%B, %entry], [%C, %T]
- %Q = load i32* %X
+ %Q = load i32, i32* %X
ret i32 %Q
}
@@ -55,7 +55,7 @@ define i32 @test3(i1 %c) {
store i32 2, i32* %C
%X = select i1 %c, i32* %B, i32* %C
- %Q = load i32* %X
+ %Q = load i32, i32* %X
ret i32 %Q
}
@@ -72,7 +72,7 @@ entry:
%X = select i1 %c, i32* %B, i32* %C
%Y = bitcast i32* %X to i64*
- %Q = load i64* %Y
+ %Q = load i64, i64* %Y
ret i64 %Q
}
@@ -91,7 +91,7 @@ entry:
%p.0 = select i1 false, i32* %b, i32* %P
store i32 123, i32* %p.0
- %r = load i32* %b, align 8
+ %r = load i32, i32* %b, align 8
ret i32 %r
; CHECK-LABEL: @test5(
@@ -105,7 +105,7 @@ define i32 @test6(i32 %x, i1 %c) nounwind readnone ssp {
store i32 1, i32* %a, align 8
store i32 2, i32* %b, align 8
%p.0 = select i1 %c, i32* %b, i32* %a
- %r = load i32* %p.0, align 8
+ %r = load i32, i32* %p.0, align 8
ret i32 %r
; CHECK-LABEL: @test6(
; CHECK-NEXT: %r = select i1 %c, i32 2, i32 1
@@ -122,7 +122,7 @@ define i32 @test7(i32 %x, i1 %c) nounwind readnone ssp {
store i32 0, i32* %a
- %r = load i32* %p.0, align 8
+ %r = load i32, i32* %p.0, align 8
ret i32 %r
; CHECK-LABEL: @test7(
; CHECK-NOT: alloca i32
@@ -148,6 +148,6 @@ T:
br label %Cont
Cont:
%p.0 = phi i32* [%b, %entry],[%a, %T]
- %r = load i32* %p.0, align 8
+ %r = load i32, i32* %p.0, align 8
ret i32 %r
}
diff --git a/llvm/test/Transforms/ScalarRepl/phinodepromote.ll b/llvm/test/Transforms/ScalarRepl/phinodepromote.ll
index 9c6e8b92f3e..c3af62485db 100644
--- a/llvm/test/Transforms/ScalarRepl/phinodepromote.ll
+++ b/llvm/test/Transforms/ScalarRepl/phinodepromote.ll
@@ -21,14 +21,14 @@ entry:
%mem_tmp.1 = alloca i32 ; <i32*> [#uses=3]
store i32 0, i32* %mem_tmp.0
store i32 1, i32* %mem_tmp.1
- %tmp.1.i = load i32* %mem_tmp.1 ; <i32> [#uses=1]
- %tmp.3.i = load i32* %mem_tmp.0 ; <i32> [#uses=1]
+ %tmp.1.i = load i32, i32* %mem_tmp.1 ; <i32> [#uses=1]
+ %tmp.3.i = load i32, i32* %mem_tmp.0 ; <i32> [#uses=1]
%tmp.4.i = icmp sle i32 %tmp.1.i, %tmp.3.i ; <i1> [#uses=1]
br i1 %tmp.4.i, label %cond_true.i, label %cond_continue.i
cond_true.i: ; preds = %entry
br label %cond_continue.i
cond_continue.i: ; preds = %cond_true.i, %entry
%mem_tmp.i.0 = phi i32* [ %mem_tmp.1, %cond_true.i ], [ %mem_tmp.0, %entry ] ; <i32*> [#uses=1]
- %tmp.3 = load i32* %mem_tmp.i.0 ; <i32> [#uses=1]
+ %tmp.3 = load i32, i32* %mem_tmp.i.0 ; <i32> [#uses=1]
ret i32 %tmp.3
}
diff --git a/llvm/test/Transforms/ScalarRepl/select_promote.ll b/llvm/test/Transforms/ScalarRepl/select_promote.ll
index d6b2b75327c..b4ef8c46fa0 100644
--- a/llvm/test/Transforms/ScalarRepl/select_promote.ll
+++ b/llvm/test/Transforms/ScalarRepl/select_promote.ll
@@ -8,11 +8,11 @@ define i32 @main() {
%mem_tmp.1 = alloca i32 ; <i32*> [#uses=3]
store i32 0, i32* %mem_tmp.0
store i32 1, i32* %mem_tmp.1
- %tmp.1.i = load i32* %mem_tmp.1 ; <i32> [#uses=1]
- %tmp.3.i = load i32* %mem_tmp.0 ; <i32> [#uses=1]
+ %tmp.1.i = load i32, i32* %mem_tmp.1 ; <i32> [#uses=1]
+ %tmp.3.i = load i32, i32* %mem_tmp.0 ; <i32> [#uses=1]
%tmp.4.i = icmp sle i32 %tmp.1.i, %tmp.3.i ; <i1> [#uses=1]
%mem_tmp.i.0 = select i1 %tmp.4.i, i32* %mem_tmp.1, i32* %mem_tmp.0 ; <i32*> [#uses=1]
- %tmp.3 = load i32* %mem_tmp.i.0 ; <i32> [#uses=1]
+ %tmp.3 = load i32, i32* %mem_tmp.i.0 ; <i32> [#uses=1]
ret i32 %tmp.3
}
diff --git a/llvm/test/Transforms/ScalarRepl/sroa-fca.ll b/llvm/test/Transforms/ScalarRepl/sroa-fca.ll
index 2df3b9be1e4..c6e7c23ab06 100644
--- a/llvm/test/Transforms/ScalarRepl/sroa-fca.ll
+++ b/llvm/test/Transforms/ScalarRepl/sroa-fca.ll
@@ -6,7 +6,7 @@ define i64 @test({i32, i32} %A) {
%Y = bitcast i64* %X to {i32,i32}*
store {i32,i32} %A, {i32,i32}* %Y
- %Q = load i64* %X
+ %Q = load i64, i64* %X
ret i64 %Q
}
@@ -15,7 +15,7 @@ define {i32,i32} @test2(i64 %A) {
%Y = bitcast i64* %X to {i32,i32}*
store i64 %A, i64* %X
- %Q = load {i32,i32}* %Y
+ %Q = load {i32,i32}, {i32,i32}* %Y
ret {i32,i32} %Q
}
diff --git a/llvm/test/Transforms/ScalarRepl/sroa_two.ll b/llvm/test/Transforms/ScalarRepl/sroa_two.ll
index 0ede5d2194b..f2285ef21a4 100644
--- a/llvm/test/Transforms/ScalarRepl/sroa_two.ll
+++ b/llvm/test/Transforms/ScalarRepl/sroa_two.ll
@@ -7,7 +7,7 @@ define i32 @test(i32 %X) {
%tmp.1 = getelementptr [2 x i32], [2 x i32]* %Arr, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 2, i32* %tmp.1
%tmp.3 = getelementptr [2 x i32], [2 x i32]* %Arr, i32 0, i32 %X ; <i32*> [#uses=1]
- %tmp.4 = load i32* %tmp.3 ; <i32> [#uses=1]
+ %tmp.4 = load i32, i32* %tmp.3 ; <i32> [#uses=1]
ret i32 %tmp.4
}
diff --git a/llvm/test/Transforms/ScalarRepl/union-fp-int.ll b/llvm/test/Transforms/ScalarRepl/union-fp-int.ll
index 6a499188718..fa64b60685f 100644
--- a/llvm/test/Transforms/ScalarRepl/union-fp-int.ll
+++ b/llvm/test/Transforms/ScalarRepl/union-fp-int.ll
@@ -8,7 +8,7 @@ define i32 @test(float %X) {
%X_addr = alloca float ; <float*> [#uses=2]
store float %X, float* %X_addr
%X_addr.upgrd.1 = bitcast float* %X_addr to i32* ; <i32*> [#uses=1]
- %tmp = load i32* %X_addr.upgrd.1 ; <i32> [#uses=1]
+ %tmp = load i32, i32* %X_addr.upgrd.1 ; <i32> [#uses=1]
ret i32 %tmp
}
diff --git a/llvm/test/Transforms/ScalarRepl/union-packed.ll b/llvm/test/Transforms/ScalarRepl/union-packed.ll
index b272abfc3d5..741de76d581 100644
--- a/llvm/test/Transforms/ScalarRepl/union-packed.ll
+++ b/llvm/test/Transforms/ScalarRepl/union-packed.ll
@@ -8,7 +8,7 @@ define <4 x i32> @test(<4 x float> %X) {
%X_addr = alloca <4 x float> ; <<4 x float>*> [#uses=2]
store <4 x float> %X, <4 x float>* %X_addr
%X_addr.upgrd.1 = bitcast <4 x float>* %X_addr to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp = load <4 x i32>* %X_addr.upgrd.1 ; <<4 x i32>> [#uses=1]
+ %tmp = load <4 x i32>, <4 x i32>* %X_addr.upgrd.1 ; <<4 x i32>> [#uses=1]
ret <4 x i32> %tmp
}
diff --git a/llvm/test/Transforms/ScalarRepl/union-pointer.ll b/llvm/test/Transforms/ScalarRepl/union-pointer.ll
index 82a2c3b2092..6a5db1c4d6b 100644
--- a/llvm/test/Transforms/ScalarRepl/union-pointer.ll
+++ b/llvm/test/Transforms/ScalarRepl/union-pointer.ll
@@ -14,7 +14,7 @@ define i8* @test(i16* %X) {
%X_addr = alloca i16* ; <i16**> [#uses=2]
store i16* %X, i16** %X_addr
%X_addr.upgrd.1 = bitcast i16** %X_addr to i8** ; <i8**> [#uses=1]
- %tmp = load i8** %X_addr.upgrd.1 ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** %X_addr.upgrd.1 ; <i8*> [#uses=1]
ret i8* %tmp
}
@@ -26,7 +26,7 @@ define i8 addrspace(1)* @test_as1(i16 addrspace(1)* %x) {
%x_addr = alloca i16 addrspace(1)*
store i16 addrspace(1)* %x, i16 addrspace(1)** %x_addr
%x_addr.upgrd.1 = bitcast i16 addrspace(1)** %x_addr to i8 addrspace(1)**
- %tmp = load i8 addrspace(1)** %x_addr.upgrd.1
+ %tmp = load i8 addrspace(1)*, i8 addrspace(1)** %x_addr.upgrd.1
ret i8 addrspace(1)* %tmp
}
@@ -39,7 +39,7 @@ define i8 addrspace(1)* @test_as1_array(i16 addrspace(1)* %x) {
%elem1 = getelementptr [4 x i16 addrspace(1)*], [4 x i16 addrspace(1)*]* %as_ptr_array, i32 0, i32 1
store i16 addrspace(1)* %x, i16 addrspace(1)** %elem1
%elem1.cast = bitcast i16 addrspace(1)** %elem1 to i8 addrspace(1)**
- %tmp = load i8 addrspace(1)** %elem1.cast
+ %tmp = load i8 addrspace(1)*, i8 addrspace(1)** %elem1.cast
ret i8 addrspace(1)* %tmp
}
@@ -56,15 +56,15 @@ define void @test2(i64 %Op.0) {
store i64 %tmp.upgrd.2, i64* %tmp1.upgrd.3
%tmp.upgrd.4 = getelementptr %struct.Val, %struct.Val* %tmp, i32 0, i32 0 ; <i32**> [#uses=1]
%tmp2 = getelementptr %struct.Val, %struct.Val* %tmp1, i32 0, i32 0 ; <i32**> [#uses=1]
- %tmp.upgrd.5 = load i32** %tmp2 ; <i32*> [#uses=1]
+ %tmp.upgrd.5 = load i32*, i32** %tmp2 ; <i32*> [#uses=1]
store i32* %tmp.upgrd.5, i32** %tmp.upgrd.4
%tmp3 = getelementptr %struct.Val, %struct.Val* %tmp, i32 0, i32 1 ; <i32*> [#uses=1]
%tmp4 = getelementptr %struct.Val, %struct.Val* %tmp1, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp.upgrd.6 = load i32* %tmp4 ; <i32> [#uses=1]
+ %tmp.upgrd.6 = load i32, i32* %tmp4 ; <i32> [#uses=1]
store i32 %tmp.upgrd.6, i32* %tmp3
%tmp7 = bitcast %struct.Val* %tmp to { i64 }* ; <{ i64 }*> [#uses=1]
%tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
+ %tmp9 = load i64, i64* %tmp8 ; <i64> [#uses=1]
call void @_Z3bar3ValS_( i64 %Op.0, i64 %tmp9 )
ret void
}
diff --git a/llvm/test/Transforms/ScalarRepl/vector_memcpy.ll b/llvm/test/Transforms/ScalarRepl/vector_memcpy.ll
index dfba9e2c807..031ad5e38ca 100644
--- a/llvm/test/Transforms/ScalarRepl/vector_memcpy.ll
+++ b/llvm/test/Transforms/ScalarRepl/vector_memcpy.ll
@@ -10,7 +10,7 @@ define <16 x float> @foo(<16 x float> %A) nounwind {
%s = bitcast <16 x float>* %tmp to i8*
%s2 = bitcast <16 x float>* %tmp2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %s2, i8* %s, i64 64, i32 16, i1 false)
- %R = load <16 x float>* %tmp2
+ %R = load <16 x float>, <16 x float>* %tmp2
ret <16 x float> %R
}
@@ -20,7 +20,7 @@ define <16 x float> @foo2(<16 x float> %A) nounwind {
%s2 = bitcast <16 x float>* %tmp2 to i8*
call void @llvm.memset.p0i8.i64(i8* %s2, i8 0, i64 64, i32 16, i1 false)
- %R = load <16 x float>* %tmp2
+ %R = load <16 x float>, <16 x float>* %tmp2
ret <16 x float> %R
}
diff --git a/llvm/test/Transforms/ScalarRepl/vector_promote.ll b/llvm/test/Transforms/ScalarRepl/vector_promote.ll
index 3ee57b32bae..3c2377fc0f2 100644
--- a/llvm/test/Transforms/ScalarRepl/vector_promote.ll
+++ b/llvm/test/Transforms/ScalarRepl/vector_promote.ll
@@ -5,18 +5,18 @@ target triple = "x86_64-apple-darwin10.0.0"
define void @test1(<4 x float>* %F, float %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%G.upgrd.1 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1]
store float %f, float* %G.upgrd.1
- %tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
+ %tmp4 = load <4 x float>, <4 x float>* %G ; <<4 x float>> [#uses=2]
%tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp6, <4 x float>* %F
ret void
; CHECK-LABEL: @test1(
; CHECK-NOT: alloca
-; CHECK: %tmp = load <4 x float>* %F
+; CHECK: %tmp = load <4 x float>, <4 x float>* %F
; CHECK: fadd <4 x float> %tmp, %tmp
; CHECK-NEXT: insertelement <4 x float> %tmp3, float %f, i32 0
}
@@ -24,18 +24,18 @@ entry:
define void @test2(<4 x float>* %F, float %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%tmp.upgrd.2 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
store float %f, float* %tmp.upgrd.2
- %tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
+ %tmp4 = load <4 x float>, <4 x float>* %G ; <<4 x float>> [#uses=2]
%tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp6, <4 x float>* %F
ret void
; CHECK-LABEL: @test2(
; CHECK-NOT: alloca
-; CHECK: %tmp = load <4 x float>* %F
+; CHECK: %tmp = load <4 x float>, <4 x float>* %F
; CHECK: fadd <4 x float> %tmp, %tmp
; CHECK-NEXT: insertelement <4 x float> %tmp3, float %f, i32 2
}
@@ -43,16 +43,16 @@ entry:
define void @test3(<4 x float>* %F, float* %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%tmp.upgrd.3 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
- %tmp.upgrd.4 = load float* %tmp.upgrd.3 ; <float> [#uses=1]
+ %tmp.upgrd.4 = load float, float* %tmp.upgrd.3 ; <float> [#uses=1]
store float %tmp.upgrd.4, float* %f
ret void
; CHECK-LABEL: @test3(
; CHECK-NOT: alloca
-; CHECK: %tmp = load <4 x float>* %F
+; CHECK: %tmp = load <4 x float>, <4 x float>* %F
; CHECK: fadd <4 x float> %tmp, %tmp
; CHECK-NEXT: extractelement <4 x float> %tmp3, i32 2
}
@@ -60,16 +60,16 @@ entry:
define void @test4(<4 x float>* %F, float* %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%G.upgrd.5 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp.upgrd.6 = load float* %G.upgrd.5 ; <float> [#uses=1]
+ %tmp.upgrd.6 = load float, float* %G.upgrd.5 ; <float> [#uses=1]
store float %tmp.upgrd.6, float* %f
ret void
; CHECK-LABEL: @test4(
; CHECK-NOT: alloca
-; CHECK: %tmp = load <4 x float>* %F
+; CHECK: %tmp = load <4 x float>, <4 x float>* %F
; CHECK: fadd <4 x float> %tmp, %tmp
; CHECK-NEXT: extractelement <4 x float> %tmp3, i32 0
}
@@ -79,7 +79,7 @@ define i32 @test5(float %X) { ;; should turn into bitcast.
%X1 = getelementptr [4 x float], [4 x float]* %X_addr, i32 0, i32 2
store float %X, float* %X1
%a = bitcast float* %X1 to i32*
- %tmp = load i32* %a
+ %tmp = load i32, i32* %a
ret i32 %tmp
; CHECK-LABEL: @test5(
; CHECK-NEXT: bitcast float %X to i32
@@ -90,7 +90,7 @@ define i64 @test6(<2 x float> %X) {
%X_addr = alloca <2 x float>
store <2 x float> %X, <2 x float>* %X_addr
%P = bitcast <2 x float>* %X_addr to i64*
- %tmp = load i64* %P
+ %tmp = load i64, i64* %P
ret i64 %tmp
; CHECK-LABEL: @test6(
; CHECK: bitcast <2 x float> %X to i64
@@ -121,14 +121,14 @@ entry:
%__a = alloca <1 x i64>, align 8
%tmp = alloca <1 x i64>, align 8
store <1 x i64> %a, <1 x i64>* %a.addr, align 8
- %0 = load <1 x i64>* %a.addr, align 8
+ %0 = load <1 x i64>, <1 x i64>* %a.addr, align 8
store <1 x i64> %0, <1 x i64>* %__a, align 8
- %1 = load <1 x i64>* %__a, align 8
+ %1 = load <1 x i64>, <1 x i64>* %__a, align 8
%2 = bitcast <1 x i64> %1 to <8 x i8>
%3 = bitcast <8 x i8> %2 to <1 x i64>
%vshl_n = shl <1 x i64> %3, <i64 4>
store <1 x i64> %vshl_n, <1 x i64>* %tmp
- %4 = load <1 x i64>* %tmp
+ %4 = load <1 x i64>, <1 x i64>* %tmp
ret <1 x i64> %4
; CHECK-LABEL: @test8(
; CHECK-NOT: alloca
diff --git a/llvm/test/Transforms/ScalarRepl/vectors-with-mismatched-elements.ll b/llvm/test/Transforms/ScalarRepl/vectors-with-mismatched-elements.ll
index c3fbdf5f863..154883122df 100644
--- a/llvm/test/Transforms/ScalarRepl/vectors-with-mismatched-elements.ll
+++ b/llvm/test/Transforms/ScalarRepl/vectors-with-mismatched-elements.ll
@@ -18,10 +18,10 @@ define <2 x i64> @foo() nounwind {
entry:
%retval = alloca <3 x i32>, align 16
%z = alloca <4 x i32>, align 16
- %tmp = load <4 x i32>* %z
+ %tmp = load <4 x i32>, <4 x i32>* %z
%tmp1 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
store <3 x i32> %tmp1, <3 x i32>* %retval
%0 = bitcast <3 x i32>* %retval to <2 x i64>*
- %1 = load <2 x i64>* %0, align 1
+ %1 = load <2 x i64>, <2 x i64>* %0, align 1
ret <2 x i64> %1
}
diff --git a/llvm/test/Transforms/ScalarRepl/volatile.ll b/llvm/test/Transforms/ScalarRepl/volatile.ll
index 370a6d31b50..2a600b3b279 100644
--- a/llvm/test/Transforms/ScalarRepl/volatile.ll
+++ b/llvm/test/Transforms/ScalarRepl/volatile.ll
@@ -7,7 +7,7 @@ define i32 @voltest(i32 %T) {
; CHECK: store volatile
%C = getelementptr {i32,i32}, {i32,i32}* %A, i32 0, i32 1
- %X = load volatile i32* %C
+ %X = load volatile i32, i32* %C
; CHECK: load volatile
ret i32 %X
}
diff --git a/llvm/test/Transforms/Scalarizer/basic.ll b/llvm/test/Transforms/Scalarizer/basic.ll
index 32143460e12..150eb7d43c4 100644
--- a/llvm/test/Transforms/Scalarizer/basic.ll
+++ b/llvm/test/Transforms/Scalarizer/basic.ll
@@ -21,13 +21,13 @@ define void @f1(<4 x float> %init, <4 x float> *%base, i32 %count) {
; CHECK: %nexti = sub i32 %i, 1
; CHECK: %ptr = getelementptr <4 x float>, <4 x float>* %base, i32 %i
; CHECK: %ptr.i0 = bitcast <4 x float>* %ptr to float*
-; CHECK: %val.i0 = load float* %ptr.i0, align 16
+; CHECK: %val.i0 = load float, float* %ptr.i0, align 16
; CHECK: %ptr.i1 = getelementptr float, float* %ptr.i0, i32 1
-; CHECK: %val.i1 = load float* %ptr.i1, align 4
+; CHECK: %val.i1 = load float, float* %ptr.i1, align 4
; CHECK: %ptr.i2 = getelementptr float, float* %ptr.i0, i32 2
-; CHECK: %val.i2 = load float* %ptr.i2, align 8
+; CHECK: %val.i2 = load float, float* %ptr.i2, align 8
; CHECK: %ptr.i3 = getelementptr float, float* %ptr.i0, i32 3
-; CHECK: %val.i3 = load float* %ptr.i3, align 4
+; CHECK: %val.i3 = load float, float* %ptr.i3, align 4
; CHECK: %add.i0 = fadd float %val.i0, %val.i2
; CHECK: %add.i1 = fadd float %val.i1, %val.i3
; CHECK: %add.i2 = fadd float %acc.i0, %acc.i2
@@ -66,7 +66,7 @@ loop:
%nexti = sub i32 %i, 1
%ptr = getelementptr <4 x float>, <4 x float> *%base, i32 %i
- %val = load <4 x float> *%ptr
+ %val = load <4 x float> , <4 x float> *%ptr
%dval = bitcast <4 x float> %val to <2 x double>
%dacc = bitcast <4 x float> %acc to <2 x double>
%shuffle1 = shufflevector <2 x double> %dval, <2 x double> %dacc,
@@ -107,13 +107,13 @@ define void @f2(<4 x i32> %init, <4 x i8> *%base, i32 %count) {
; CHECK: %nexti = sub i32 %i, 1
; CHECK: %ptr = getelementptr <4 x i8>, <4 x i8>* %base, i32 %i
; CHECK: %ptr.i0 = bitcast <4 x i8>* %ptr to i8*
-; CHECK: %val.i0 = load i8* %ptr.i0, align 4
+; CHECK: %val.i0 = load i8, i8* %ptr.i0, align 4
; CHECK: %ptr.i1 = getelementptr i8, i8* %ptr.i0, i32 1
-; CHECK: %val.i1 = load i8* %ptr.i1, align 1
+; CHECK: %val.i1 = load i8, i8* %ptr.i1, align 1
; CHECK: %ptr.i2 = getelementptr i8, i8* %ptr.i0, i32 2
-; CHECK: %val.i2 = load i8* %ptr.i2, align 2
+; CHECK: %val.i2 = load i8, i8* %ptr.i2, align 2
; CHECK: %ptr.i3 = getelementptr i8, i8* %ptr.i0, i32 3
-; CHECK: %val.i3 = load i8* %ptr.i3, align 1
+; CHECK: %val.i3 = load i8, i8* %ptr.i3, align 1
; CHECK: %ext.i0 = sext i8 %val.i0 to i32
; CHECK: %ext.i1 = sext i8 %val.i1 to i32
; CHECK: %ext.i2 = sext i8 %val.i2 to i32
@@ -151,7 +151,7 @@ loop:
%nexti = sub i32 %i, 1
%ptr = getelementptr <4 x i8>, <4 x i8> *%base, i32 %i
- %val = load <4 x i8> *%ptr
+ %val = load <4 x i8> , <4 x i8> *%ptr
%ext = sext <4 x i8> %val to <4 x i32>
%add = add <4 x i32> %ext, %acc
%cmp = icmp slt <4 x i32> %add, <i32 -10, i32 -11, i32 -12, i32 -13>
@@ -172,16 +172,16 @@ exit:
; Check that !tbaa information is preserved.
define void @f3(<4 x i32> *%src, <4 x i32> *%dst) {
; CHECK-LABEL: @f3(
-; CHECK: %val.i0 = load i32* %src.i0, align 16, !tbaa ![[TAG:[0-9]*]]
-; CHECK: %val.i1 = load i32* %src.i1, align 4, !tbaa ![[TAG]]
-; CHECK: %val.i2 = load i32* %src.i2, align 8, !tbaa ![[TAG]]
-; CHECK: %val.i3 = load i32* %src.i3, align 4, !tbaa ![[TAG]]
+; CHECK: %val.i0 = load i32, i32* %src.i0, align 16, !tbaa ![[TAG:[0-9]*]]
+; CHECK: %val.i1 = load i32, i32* %src.i1, align 4, !tbaa ![[TAG]]
+; CHECK: %val.i2 = load i32, i32* %src.i2, align 8, !tbaa ![[TAG]]
+; CHECK: %val.i3 = load i32, i32* %src.i3, align 4, !tbaa ![[TAG]]
; CHECK: store i32 %add.i0, i32* %dst.i0, align 16, !tbaa ![[TAG:[0-9]*]]
; CHECK: store i32 %add.i1, i32* %dst.i1, align 4, !tbaa ![[TAG]]
; CHECK: store i32 %add.i2, i32* %dst.i2, align 8, !tbaa ![[TAG]]
; CHECK: store i32 %add.i3, i32* %dst.i3, align 4, !tbaa ![[TAG]]
; CHECK: ret void
- %val = load <4 x i32> *%src, !tbaa !1
+ %val = load <4 x i32> , <4 x i32> *%src, !tbaa !1
%add = add <4 x i32> %val, %val
store <4 x i32> %add, <4 x i32> *%dst, !tbaa !2
ret void
@@ -190,16 +190,16 @@ define void @f3(<4 x i32> *%src, <4 x i32> *%dst) {
; Check that !tbaa.struct information is preserved.
define void @f4(<4 x i32> *%src, <4 x i32> *%dst) {
; CHECK-LABEL: @f4(
-; CHECK: %val.i0 = load i32* %src.i0, align 16, !tbaa.struct ![[TAG:[0-9]*]]
-; CHECK: %val.i1 = load i32* %src.i1, align 4, !tbaa.struct ![[TAG]]
-; CHECK: %val.i2 = load i32* %src.i2, align 8, !tbaa.struct ![[TAG]]
-; CHECK: %val.i3 = load i32* %src.i3, align 4, !tbaa.struct ![[TAG]]
+; CHECK: %val.i0 = load i32, i32* %src.i0, align 16, !tbaa.struct ![[TAG:[0-9]*]]
+; CHECK: %val.i1 = load i32, i32* %src.i1, align 4, !tbaa.struct ![[TAG]]
+; CHECK: %val.i2 = load i32, i32* %src.i2, align 8, !tbaa.struct ![[TAG]]
+; CHECK: %val.i3 = load i32, i32* %src.i3, align 4, !tbaa.struct ![[TAG]]
; CHECK: store i32 %add.i0, i32* %dst.i0, align 16, !tbaa.struct ![[TAG]]
; CHECK: store i32 %add.i1, i32* %dst.i1, align 4, !tbaa.struct ![[TAG]]
; CHECK: store i32 %add.i2, i32* %dst.i2, align 8, !tbaa.struct ![[TAG]]
; CHECK: store i32 %add.i3, i32* %dst.i3, align 4, !tbaa.struct ![[TAG]]
; CHECK: ret void
- %val = load <4 x i32> *%src, !tbaa.struct !5
+ %val = load <4 x i32> , <4 x i32> *%src, !tbaa.struct !5
%add = add <4 x i32> %val, %val
store <4 x i32> %add, <4 x i32> *%dst, !tbaa.struct !5
ret void
@@ -208,10 +208,10 @@ define void @f4(<4 x i32> *%src, <4 x i32> *%dst) {
; Check that llvm.mem.parallel_loop_access information is preserved.
define void @f5(i32 %count, <4 x i32> *%src, <4 x i32> *%dst) {
; CHECK-LABEL: @f5(
-; CHECK: %val.i0 = load i32* %this_src.i0, align 16, !llvm.mem.parallel_loop_access ![[TAG:[0-9]*]]
-; CHECK: %val.i1 = load i32* %this_src.i1, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
-; CHECK: %val.i2 = load i32* %this_src.i2, align 8, !llvm.mem.parallel_loop_access ![[TAG]]
-; CHECK: %val.i3 = load i32* %this_src.i3, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: %val.i0 = load i32, i32* %this_src.i0, align 16, !llvm.mem.parallel_loop_access ![[TAG:[0-9]*]]
+; CHECK: %val.i1 = load i32, i32* %this_src.i1, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: %val.i2 = load i32, i32* %this_src.i2, align 8, !llvm.mem.parallel_loop_access ![[TAG]]
+; CHECK: %val.i3 = load i32, i32* %this_src.i3, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
; CHECK: store i32 %add.i0, i32* %this_dst.i0, align 16, !llvm.mem.parallel_loop_access ![[TAG]]
; CHECK: store i32 %add.i1, i32* %this_dst.i1, align 4, !llvm.mem.parallel_loop_access ![[TAG]]
; CHECK: store i32 %add.i2, i32* %this_dst.i2, align 8, !llvm.mem.parallel_loop_access ![[TAG]]
@@ -224,7 +224,7 @@ loop:
%index = phi i32 [ 0, %entry ], [ %next_index, %loop ]
%this_src = getelementptr <4 x i32>, <4 x i32> *%src, i32 %index
%this_dst = getelementptr <4 x i32>, <4 x i32> *%dst, i32 %index
- %val = load <4 x i32> *%this_src, !llvm.mem.parallel_loop_access !3
+ %val = load <4 x i32> , <4 x i32> *%this_src, !llvm.mem.parallel_loop_access !3
%add = add <4 x i32> %val, %val
store <4 x i32> %add, <4 x i32> *%this_dst, !llvm.mem.parallel_loop_access !3
%next_index = add i32 %index, -1
@@ -261,7 +261,7 @@ define void @f7(<4 x i32> *%src, <4 x i32> *%dst) {
; CHECK-LABEL: @f7(
; CHECK-NOT: !foo
; CHECK: ret void
- %val = load <4 x i32> *%src, !foo !5
+ %val = load <4 x i32> , <4 x i32> *%src, !foo !5
%add = add <4 x i32> %val, %val
store <4 x i32> %add, <4 x i32> *%dst, !foo !5
ret void
@@ -305,19 +305,19 @@ define void @f9(<4 x float> *%dest, <4 x float> *%src) {
; CHECK: %dest.i2 = getelementptr float, float* %dest.i0, i32 2
; CHECK: %dest.i3 = getelementptr float, float* %dest.i0, i32 3
; CHECK: %src.i0 = bitcast <4 x float>* %src to float*
-; CHECK: %val.i0 = load float* %src.i0, align 4
+; CHECK: %val.i0 = load float, float* %src.i0, align 4
; CHECK: %src.i1 = getelementptr float, float* %src.i0, i32 1
-; CHECK: %val.i1 = load float* %src.i1, align 4
+; CHECK: %val.i1 = load float, float* %src.i1, align 4
; CHECK: %src.i2 = getelementptr float, float* %src.i0, i32 2
-; CHECK: %val.i2 = load float* %src.i2, align 4
+; CHECK: %val.i2 = load float, float* %src.i2, align 4
; CHECK: %src.i3 = getelementptr float, float* %src.i0, i32 3
-; CHECK: %val.i3 = load float* %src.i3, align 4
+; CHECK: %val.i3 = load float, float* %src.i3, align 4
; CHECK: store float %val.i0, float* %dest.i0, align 8
; CHECK: store float %val.i1, float* %dest.i1, align 4
; CHECK: store float %val.i2, float* %dest.i2, align 8
; CHECK: store float %val.i3, float* %dest.i3, align 4
; CHECK: ret void
- %val = load <4 x float> *%src, align 4
+ %val = load <4 x float> , <4 x float> *%src, align 4
store <4 x float> %val, <4 x float> *%dest, align 8
ret void
}
@@ -330,19 +330,19 @@ define void @f10(<4 x float> *%dest, <4 x float> *%src) {
; CHECK: %dest.i2 = getelementptr float, float* %dest.i0, i32 2
; CHECK: %dest.i3 = getelementptr float, float* %dest.i0, i32 3
; CHECK: %src.i0 = bitcast <4 x float>* %src to float*
-; CHECK: %val.i0 = load float* %src.i0, align 1
+; CHECK: %val.i0 = load float, float* %src.i0, align 1
; CHECK: %src.i1 = getelementptr float, float* %src.i0, i32 1
-; CHECK: %val.i1 = load float* %src.i1, align 1
+; CHECK: %val.i1 = load float, float* %src.i1, align 1
; CHECK: %src.i2 = getelementptr float, float* %src.i0, i32 2
-; CHECK: %val.i2 = load float* %src.i2, align 1
+; CHECK: %val.i2 = load float, float* %src.i2, align 1
; CHECK: %src.i3 = getelementptr float, float* %src.i0, i32 3
-; CHECK: %val.i3 = load float* %src.i3, align 1
+; CHECK: %val.i3 = load float, float* %src.i3, align 1
; CHECK: store float %val.i0, float* %dest.i0, align 2
; CHECK: store float %val.i1, float* %dest.i1, align 2
; CHECK: store float %val.i2, float* %dest.i2, align 2
; CHECK: store float %val.i3, float* %dest.i3, align 2
; CHECK: ret void
- %val = load <4 x float> *%src, align 1
+ %val = load <4 x float> , <4 x float> *%src, align 1
store <4 x float> %val, <4 x float> *%dest, align 2
ret void
}
@@ -350,13 +350,13 @@ define void @f10(<4 x float> *%dest, <4 x float> *%src) {
; Test that sub-byte loads aren't scalarized.
define void @f11(<32 x i1> *%dest, <32 x i1> *%src0) {
; CHECK: @f11(
-; CHECK: %val0 = load <32 x i1>* %src0
-; CHECK: %val1 = load <32 x i1>* %src1
+; CHECK: %val0 = load <32 x i1>, <32 x i1>* %src0
+; CHECK: %val1 = load <32 x i1>, <32 x i1>* %src1
; CHECK: store <32 x i1> %and, <32 x i1>* %dest
; CHECK: ret void
%src1 = getelementptr <32 x i1>, <32 x i1> *%src0, i32 1
- %val0 = load <32 x i1> *%src0
- %val1 = load <32 x i1> *%src1
+ %val0 = load <32 x i1> , <32 x i1> *%src0
+ %val1 = load <32 x i1> , <32 x i1> *%src1
%and = and <32 x i1> %val0, %val1
store <32 x i1> %and, <32 x i1> *%dest
ret void
@@ -375,7 +375,7 @@ define void @f12(<4 x i32> *%dest, <4 x i32> *%src, i32 %index) {
; CHECK-DAG: %val2.i2 = shl i32 3, %val1.i2
; CHECK-DAG: %val2.i3 = shl i32 4, %val1.i3
; CHECK: ret void
- %val0 = load <4 x i32> *%src
+ %val0 = load <4 x i32> , <4 x i32> *%src
%val1 = insertelement <4 x i32> %val0, i32 1, i32 %index
%val2 = shl <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %val1
store <4 x i32> %val2, <4 x i32> *%dest
diff --git a/llvm/test/Transforms/Scalarizer/dbginfo.ll b/llvm/test/Transforms/Scalarizer/dbginfo.ll
index 892947d5733..0460309c834 100644
--- a/llvm/test/Transforms/Scalarizer/dbginfo.ll
+++ b/llvm/test/Transforms/Scalarizer/dbginfo.ll
@@ -19,14 +19,14 @@ define void @f1(<4 x i32>* nocapture %a, <4 x i32>* nocapture readonly %b, <4 x
; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %a, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}
; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %b, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}
; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %c, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}
-; CHECK: %bval.i0 = load i32* %b.i0, align 16, !dbg ![[TAG1:[0-9]+]], !tbaa ![[TAG2:[0-9]+]]
-; CHECK: %bval.i1 = load i32* %b.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
-; CHECK: %bval.i2 = load i32* %b.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]]
-; CHECK: %bval.i3 = load i32* %b.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
-; CHECK: %cval.i0 = load i32* %c.i0, align 16, !dbg ![[TAG1]], !tbaa ![[TAG2]]
-; CHECK: %cval.i1 = load i32* %c.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
-; CHECK: %cval.i2 = load i32* %c.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]]
-; CHECK: %cval.i3 = load i32* %c.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %bval.i0 = load i32, i32* %b.i0, align 16, !dbg ![[TAG1:[0-9]+]], !tbaa ![[TAG2:[0-9]+]]
+; CHECK: %bval.i1 = load i32, i32* %b.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %bval.i2 = load i32, i32* %b.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %bval.i3 = load i32, i32* %b.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %cval.i0 = load i32, i32* %c.i0, align 16, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %cval.i1 = load i32, i32* %c.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %cval.i2 = load i32, i32* %c.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]]
+; CHECK: %cval.i3 = load i32, i32* %c.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]]
; CHECK: %add.i0 = add i32 %bval.i0, %cval.i0, !dbg ![[TAG1]]
; CHECK: %add.i1 = add i32 %bval.i1, %cval.i1, !dbg ![[TAG1]]
; CHECK: %add.i2 = add i32 %bval.i2, %cval.i2, !dbg ![[TAG1]]
@@ -40,8 +40,8 @@ entry:
tail call void @llvm.dbg.value(metadata <4 x i32>* %a, i64 0, metadata !15, metadata !{}), !dbg !20
tail call void @llvm.dbg.value(metadata <4 x i32>* %b, i64 0, metadata !16, metadata !{}), !dbg !20
tail call void @llvm.dbg.value(metadata <4 x i32>* %c, i64 0, metadata !17, metadata !{}), !dbg !20
- %bval = load <4 x i32>* %b, align 16, !dbg !21, !tbaa !22
- %cval = load <4 x i32>* %c, align 16, !dbg !21, !tbaa !22
+ %bval = load <4 x i32>, <4 x i32>* %b, align 16, !dbg !21, !tbaa !22
+ %cval = load <4 x i32>, <4 x i32>* %c, align 16, !dbg !21, !tbaa !22
%add = add <4 x i32> %bval, %cval, !dbg !21
store <4 x i32> %add, <4 x i32>* %a, align 16, !dbg !21, !tbaa !22
ret void, !dbg !25
diff --git a/llvm/test/Transforms/Scalarizer/no-data-layout.ll b/llvm/test/Transforms/Scalarizer/no-data-layout.ll
index 3eaf669ef9d..c89c7868c57 100644
--- a/llvm/test/Transforms/Scalarizer/no-data-layout.ll
+++ b/llvm/test/Transforms/Scalarizer/no-data-layout.ll
@@ -3,7 +3,7 @@
; Test the handling of loads and stores when no data layout is available.
define void @f1(<4 x float> *%dest, <4 x float> *%src) {
; CHECK: @f1(
-; CHECK: %val = load <4 x float>* %src, align 4
+; CHECK: %val = load <4 x float>, <4 x float>* %src, align 4
; CHECK: %val.i0 = extractelement <4 x float> %val, i32 0
; CHECK: %add.i0 = fadd float %val.i0, %val.i0
; CHECK: %val.i1 = extractelement <4 x float> %val, i32 1
@@ -18,7 +18,7 @@ define void @f1(<4 x float> *%dest, <4 x float> *%src) {
; CHECK: %add = insertelement <4 x float> %add.upto2, float %add.i3, i32 3
; CHECK: store <4 x float> %add, <4 x float>* %dest, align 8
; CHECK: ret void
- %val = load <4 x float> *%src, align 4
+ %val = load <4 x float> , <4 x float> *%src, align 4
%add = fadd <4 x float> %val, %val
store <4 x float> %add, <4 x float> *%dest, align 8
ret void
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
index 58e2d3b1f01..9ee492df94e 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
@@ -23,23 +23,23 @@ define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
%1 = sext i32 %x to i64
%2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
- %4 = load float* %3, align 4
+ %4 = load float, float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add i32 %y, 1
%7 = sext i32 %6 to i64
%8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
%9 = addrspacecast float addrspace(3)* %8 to float*
- %10 = load float* %9, align 4
+ %10 = load float, float* %9, align 4
%11 = fadd float %5, %10
%12 = add i32 %x, 1
%13 = sext i32 %12 to i64
%14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
%15 = addrspacecast float addrspace(3)* %14 to float*
- %16 = load float* %15, align 4
+ %16 = load float, float* %15, align 4
%17 = fadd float %11, %16
%18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
%19 = addrspacecast float addrspace(3)* %18 to float*
- %20 = load float* %19, align 4
+ %20 = load float, float* %19, align 4
%21 = fadd float %17, %20
store float %21, float* %output, align 4
ret void
@@ -68,21 +68,21 @@ define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
%1 = sext i32 %x to i64
%2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
- %4 = load float* %3, align 4
+ %4 = load float, float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add i64 %0, 1
%7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
%8 = addrspacecast float addrspace(3)* %7 to float*
- %9 = load float* %8, align 4
+ %9 = load float, float* %8, align 4
%10 = fadd float %5, %9
%11 = add i64 %1, 1
%12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
%13 = addrspacecast float addrspace(3)* %12 to float*
- %14 = load float* %13, align 4
+ %14 = load float, float* %13, align 4
%15 = fadd float %10, %14
%16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
%17 = addrspacecast float addrspace(3)* %16 to float*
- %18 = load float* %17, align 4
+ %18 = load float, float* %17, align 4
%19 = fadd float %15, %18
store float %19, float* %output, align 4
ret void
@@ -116,23 +116,23 @@ define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) {
%1 = zext i32 %x to i64
%2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
- %4 = load float* %3, align 4
+ %4 = load float, float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add nuw i32 %y, 1
%7 = zext i32 %6 to i64
%8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
%9 = addrspacecast float addrspace(3)* %8 to float*
- %10 = load float* %9, align 4
+ %10 = load float, float* %9, align 4
%11 = fadd float %5, %10
%12 = add nuw i32 %x, 1
%13 = zext i32 %12 to i64
%14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
%15 = addrspacecast float addrspace(3)* %14 to float*
- %16 = load float* %15, align 4
+ %16 = load float, float* %15, align 4
%17 = fadd float %11, %16
%18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
%19 = addrspacecast float addrspace(3)* %18 to float*
- %20 = load float* %19, align 4
+ %20 = load float, float* %19, align 4
%21 = fadd float %17, %20
store float %21, float* %output, align 4
ret void
@@ -164,21 +164,21 @@ define void @sum_of_array4(i32 %x, i32 %y, float* nocapture %output) {
%1 = zext i32 %x to i64
%2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
- %4 = load float* %3, align 4
+ %4 = load float, float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add i64 %0, 1
%7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
%8 = addrspacecast float addrspace(3)* %7 to float*
- %9 = load float* %8, align 4
+ %9 = load float, float* %8, align 4
%10 = fadd float %5, %9
%11 = add i64 %1, 1
%12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
%13 = addrspacecast float addrspace(3)* %12 to float*
- %14 = load float* %13, align 4
+ %14 = load float, float* %13, align 4
%15 = fadd float %10, %14
%16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
%17 = addrspacecast float addrspace(3)* %16 to float*
- %18 = load float* %17, align 4
+ %18 = load float, float* %17, align 4
%19 = fadd float %15, %18
store float %19, float* %output, align 4
ret void
diff --git a/llvm/test/Transforms/SimplifyCFG/2005-06-16-PHICrash.ll b/llvm/test/Transforms/SimplifyCFG/2005-06-16-PHICrash.ll
index 90be6803a5c..8fd1fae3450 100644
--- a/llvm/test/Transforms/SimplifyCFG/2005-06-16-PHICrash.ll
+++ b/llvm/test/Transforms/SimplifyCFG/2005-06-16-PHICrash.ll
@@ -30,7 +30,7 @@ endif.0: ; preds = %loopentry.0
%tmp.14 = sext i8 %tmp.1 to i32 ; <i32> [#uses=1]
%tmp.16 = zext i8 %l_88173906 to i32 ; <i32> [#uses=1]
%tmp.17 = icmp sgt i32 %tmp.14, %tmp.16 ; <i1> [#uses=1]
- %tmp.19 = load i32* @g_59182229 ; <i32> [#uses=2]
+ %tmp.19 = load i32, i32* @g_59182229 ; <i32> [#uses=2]
br i1 %tmp.17, label %cond_true, label %cond_false
cond_true: ; preds = %endif.0
%tmp.20 = icmp ne i32 %tmp.19, 1 ; <i1> [#uses=1]
@@ -53,7 +53,7 @@ loopentry.1: ; preds = %endif.3, %else.2
%tmp.29 = icmp sgt i32 %i.1.1, 99 ; <i1> [#uses=1]
br i1 %tmp.29, label %endif.2, label %no_exit.1
no_exit.1: ; preds = %loopentry.1
- %tmp.30 = load i32* @g_38098584 ; <i32> [#uses=1]
+ %tmp.30 = load i32, i32* @g_38098584 ; <i32> [#uses=1]
%tmp.31 = icmp eq i32 %tmp.30, 0 ; <i1> [#uses=1]
br i1 %tmp.31, label %else.3, label %then.3
then.3: ; preds = %no_exit.1
diff --git a/llvm/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll b/llvm/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll
index 231d11b5bb4..2606e08da9a 100644
--- a/llvm/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll
+++ b/llvm/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll
@@ -36,8 +36,8 @@ entry:
%guess = alloca %struct.anon* ; <%struct.anon**> [#uses=7]
%guess1 = alloca %struct.anon* ; <%struct.anon**> [#uses=7]
%point5 = alloca %struct.anon* ; <%struct.anon**> [#uses=3]
- %tmp = load %struct.anon** %num ; <%struct.anon*> [#uses=1]
- %tmp1 = load %struct.anon** @_zero_ ; <%struct.anon*> [#uses=1]
+ %tmp = load %struct.anon*, %struct.anon** %num ; <%struct.anon*> [#uses=1]
+ %tmp1 = load %struct.anon*, %struct.anon** @_zero_ ; <%struct.anon*> [#uses=1]
%tmp.upgrd.1 = call i32 @bc_compare( %struct.anon* %tmp, %struct.anon* %tmp1 ) ; <i32> [#uses=2]
%tmp.upgrd.2 = icmp slt i32 %tmp.upgrd.1, 0 ; <i1> [#uses=1]
br i1 %tmp.upgrd.2, label %cond_true, label %cond_false
@@ -48,26 +48,26 @@ cond_false: ; preds = %entry
br i1 %tmp5, label %cond_true6, label %cond_next13
cond_true6: ; preds = %cond_false
call void @free_num( %struct.anon** %num )
- %tmp8 = load %struct.anon** @_zero_ ; <%struct.anon*> [#uses=1]
+ %tmp8 = load %struct.anon*, %struct.anon** @_zero_ ; <%struct.anon*> [#uses=1]
%tmp9 = call %struct.anon* @copy_num( %struct.anon* %tmp8 ) ; <%struct.anon*> [#uses=1]
store %struct.anon* %tmp9, %struct.anon** %num
ret i32 1
cond_next13: ; preds = %cond_false
- %tmp15 = load %struct.anon** %num ; <%struct.anon*> [#uses=1]
- %tmp16 = load %struct.anon** @_one_ ; <%struct.anon*> [#uses=1]
+ %tmp15 = load %struct.anon*, %struct.anon** %num ; <%struct.anon*> [#uses=1]
+ %tmp16 = load %struct.anon*, %struct.anon** @_one_ ; <%struct.anon*> [#uses=1]
%tmp17 = call i32 @bc_compare( %struct.anon* %tmp15, %struct.anon* %tmp16 ) ; <i32> [#uses=2]
%tmp19 = icmp eq i32 %tmp17, 0 ; <i1> [#uses=1]
br i1 %tmp19, label %cond_true20, label %cond_next27
cond_true20: ; preds = %cond_next13
call void @free_num( %struct.anon** %num )
- %tmp22 = load %struct.anon** @_one_ ; <%struct.anon*> [#uses=1]
+ %tmp22 = load %struct.anon*, %struct.anon** @_one_ ; <%struct.anon*> [#uses=1]
%tmp23 = call %struct.anon* @copy_num( %struct.anon* %tmp22 ) ; <%struct.anon*> [#uses=1]
store %struct.anon* %tmp23, %struct.anon** %num
ret i32 1
cond_next27: ; preds = %cond_next13
- %tmp29 = load %struct.anon** %num ; <%struct.anon*> [#uses=1]
+ %tmp29 = load %struct.anon*, %struct.anon** %num ; <%struct.anon*> [#uses=1]
%tmp30 = getelementptr %struct.anon, %struct.anon* %tmp29, i32 0, i32 2 ; <i32*> [#uses=1]
- %tmp31 = load i32* %tmp30 ; <i32> [#uses=2]
+ %tmp31 = load i32, i32* %tmp30 ; <i32> [#uses=2]
%tmp33 = icmp sge i32 %tmp31, %scale ; <i1> [#uses=1]
%max = select i1 %tmp33, i32 %tmp31, i32 %scale ; <i32> [#uses=4]
%tmp35 = add i32 %max, 2 ; <i32> [#uses=0]
@@ -80,24 +80,24 @@ cond_next27: ; preds = %cond_next13
%tmp39 = icmp slt i32 %tmp17, 0 ; <i1> [#uses=1]
br i1 %tmp39, label %cond_true40, label %cond_false43
cond_true40: ; preds = %cond_next27
- %tmp41 = load %struct.anon** @_one_ ; <%struct.anon*> [#uses=1]
+ %tmp41 = load %struct.anon*, %struct.anon** @_one_ ; <%struct.anon*> [#uses=1]
%tmp42 = call %struct.anon* @copy_num( %struct.anon* %tmp41 ) ; <%struct.anon*> [#uses=1]
store %struct.anon* %tmp42, %struct.anon** %guess
br label %bb80.outer
cond_false43: ; preds = %cond_next27
call void @int2num( %struct.anon** %guess, i32 10 )
- %tmp45 = load %struct.anon** %num ; <%struct.anon*> [#uses=1]
+ %tmp45 = load %struct.anon*, %struct.anon** %num ; <%struct.anon*> [#uses=1]
%tmp46 = getelementptr %struct.anon, %struct.anon* %tmp45, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp47 = load i32* %tmp46 ; <i32> [#uses=1]
+ %tmp47 = load i32, i32* %tmp46 ; <i32> [#uses=1]
call void @int2num( %struct.anon** %guess1, i32 %tmp47 )
- %tmp48 = load %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
- %tmp49 = load %struct.anon** %point5 ; <%struct.anon*> [#uses=1]
+ %tmp48 = load %struct.anon*, %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
+ %tmp49 = load %struct.anon*, %struct.anon** %point5 ; <%struct.anon*> [#uses=1]
call void @bc_multiply( %struct.anon* %tmp48, %struct.anon* %tmp49, %struct.anon** %guess1, i32 %max )
- %tmp51 = load %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
+ %tmp51 = load %struct.anon*, %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
%tmp52 = getelementptr %struct.anon, %struct.anon* %tmp51, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 0, i32* %tmp52
- %tmp53 = load %struct.anon** %guess ; <%struct.anon*> [#uses=1]
- %tmp54 = load %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
+ %tmp53 = load %struct.anon*, %struct.anon** %guess ; <%struct.anon*> [#uses=1]
+ %tmp54 = load %struct.anon*, %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
call void @bc_raise( %struct.anon* %tmp53, %struct.anon* %tmp54, %struct.anon** %guess, i32 %max )
br label %bb80.outer
bb80.outer: ; preds = %cond_true83, %cond_false43, %cond_true40
@@ -113,8 +113,8 @@ cond_true83: ; preds = %bb80
; CHECK: bb86
bb86: ; preds = %bb80
call void @free_num( %struct.anon** %num )
- %tmp88 = load %struct.anon** %guess ; <%struct.anon*> [#uses=1]
- %tmp89 = load %struct.anon** @_one_ ; <%struct.anon*> [#uses=1]
+ %tmp88 = load %struct.anon*, %struct.anon** %guess ; <%struct.anon*> [#uses=1]
+ %tmp89 = load %struct.anon*, %struct.anon** @_one_ ; <%struct.anon*> [#uses=1]
%tmp92 = call i32 @bc_divide( %struct.anon* %tmp88, %struct.anon* %tmp89, %struct.anon** %num, i32 %max ) ; <i32> [#uses=0]
call void @free_num( %struct.anon** %guess )
call void @free_num( %struct.anon** %guess1 )
diff --git a/llvm/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll b/llvm/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll
index 8fbca35a8d5..0820e9c9825 100644
--- a/llvm/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll
+++ b/llvm/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll
@@ -34,7 +34,7 @@ target triple = "powerpc-apple-darwin8"
define void @fold_builtin_classify() {
entry:
- %tmp63 = load i32* null ; <i32> [#uses=1]
+ %tmp63 = load i32, i32* null ; <i32> [#uses=1]
switch i32 %tmp63, label %bb276 [
i32 414, label %bb145
i32 417, label %bb
@@ -42,54 +42,54 @@ entry:
bb: ; preds = %entry
ret void
bb145: ; preds = %entry
- %tmp146 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
+ %tmp146 = load %struct.tree_node*, %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
%tmp148 = getelementptr %struct.tree_node, %struct.tree_node* %tmp146, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
- %tmp149 = load %struct.tree_node** %tmp148 ; <%struct.tree_node*> [#uses=1]
+ %tmp149 = load %struct.tree_node*, %struct.tree_node** %tmp148 ; <%struct.tree_node*> [#uses=1]
%tmp150 = bitcast %struct.tree_node* %tmp149 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
%tmp151 = getelementptr %struct.tree_type, %struct.tree_type* %tmp150, i32 0, i32 6 ; <i16*> [#uses=1]
%tmp151.upgrd.1 = bitcast i16* %tmp151 to i32* ; <i32*> [#uses=1]
- %tmp152 = load i32* %tmp151.upgrd.1 ; <i32> [#uses=1]
+ %tmp152 = load i32, i32* %tmp151.upgrd.1 ; <i32> [#uses=1]
%tmp154 = lshr i32 %tmp152, 16 ; <i32> [#uses=1]
%tmp154.mask = and i32 %tmp154, 127 ; <i32> [#uses=1]
%gep.upgrd.2 = zext i32 %tmp154.mask to i64 ; <i64> [#uses=1]
%tmp155 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.2 ; <i8*> [#uses=1]
- %tmp156 = load i8* %tmp155 ; <i8> [#uses=1]
+ %tmp156 = load i8, i8* %tmp155 ; <i8> [#uses=1]
%tmp157 = icmp eq i8 %tmp156, 4 ; <i1> [#uses=1]
br i1 %tmp157, label %cond_next241, label %cond_true158
cond_true158: ; preds = %bb145
- %tmp172 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
+ %tmp172 = load %struct.tree_node*, %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
%tmp174 = getelementptr %struct.tree_node, %struct.tree_node* %tmp172, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
- %tmp175 = load %struct.tree_node** %tmp174 ; <%struct.tree_node*> [#uses=1]
+ %tmp175 = load %struct.tree_node*, %struct.tree_node** %tmp174 ; <%struct.tree_node*> [#uses=1]
%tmp176 = bitcast %struct.tree_node* %tmp175 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
%tmp177 = getelementptr %struct.tree_type, %struct.tree_type* %tmp176, i32 0, i32 6 ; <i16*> [#uses=1]
%tmp177.upgrd.3 = bitcast i16* %tmp177 to i32* ; <i32*> [#uses=1]
- %tmp178 = load i32* %tmp177.upgrd.3 ; <i32> [#uses=1]
+ %tmp178 = load i32, i32* %tmp177.upgrd.3 ; <i32> [#uses=1]
%tmp180 = lshr i32 %tmp178, 16 ; <i32> [#uses=1]
%tmp180.mask = and i32 %tmp180, 127 ; <i32> [#uses=1]
%gep.upgrd.4 = zext i32 %tmp180.mask to i64 ; <i64> [#uses=1]
%tmp181 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.4 ; <i8*> [#uses=1]
- %tmp182 = load i8* %tmp181 ; <i8> [#uses=1]
+ %tmp182 = load i8, i8* %tmp181 ; <i8> [#uses=1]
%tmp183 = icmp eq i8 %tmp182, 8 ; <i1> [#uses=1]
br i1 %tmp183, label %cond_next241, label %cond_true184
cond_true184: ; preds = %cond_true158
- %tmp185 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
+ %tmp185 = load %struct.tree_node*, %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
%tmp187 = getelementptr %struct.tree_node, %struct.tree_node* %tmp185, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
- %tmp188 = load %struct.tree_node** %tmp187 ; <%struct.tree_node*> [#uses=1]
+ %tmp188 = load %struct.tree_node*, %struct.tree_node** %tmp187 ; <%struct.tree_node*> [#uses=1]
%tmp189 = bitcast %struct.tree_node* %tmp188 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
%tmp190 = getelementptr %struct.tree_type, %struct.tree_type* %tmp189, i32 0, i32 6 ; <i16*> [#uses=1]
%tmp190.upgrd.5 = bitcast i16* %tmp190 to i32* ; <i32*> [#uses=1]
- %tmp191 = load i32* %tmp190.upgrd.5 ; <i32> [#uses=1]
+ %tmp191 = load i32, i32* %tmp190.upgrd.5 ; <i32> [#uses=1]
%tmp193 = lshr i32 %tmp191, 16 ; <i32> [#uses=1]
%tmp193.mask = and i32 %tmp193, 127 ; <i32> [#uses=1]
%gep.upgrd.6 = zext i32 %tmp193.mask to i64 ; <i64> [#uses=1]
%tmp194 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.6 ; <i8*> [#uses=1]
- %tmp195 = load i8* %tmp194 ; <i8> [#uses=1]
+ %tmp195 = load i8, i8* %tmp194 ; <i8> [#uses=1]
%tmp196 = icmp eq i8 %tmp195, 4 ; <i1> [#uses=1]
br i1 %tmp196, label %cond_next241, label %cond_true197
cond_true197: ; preds = %cond_true184
ret void
cond_next241: ; preds = %cond_true184, %cond_true158, %bb145
- %tmp245 = load i32* null ; <i32> [#uses=0]
+ %tmp245 = load i32, i32* null ; <i32> [#uses=0]
ret void
bb276: ; preds = %entry
ret void
diff --git a/llvm/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll b/llvm/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll
index a6217610d27..dcf241255d8 100644
--- a/llvm/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll
+++ b/llvm/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll
@@ -47,40 +47,40 @@ entry:
store i32 0, i32* %wstate
%tmp = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 0 ; <i8**> [#uses=1]
%tmp1 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 0 ; <i8**> [#uses=1]
- %tmp.upgrd.5 = load i8** %tmp1 ; <i8*> [#uses=1]
+ %tmp.upgrd.5 = load i8*, i8** %tmp1 ; <i8*> [#uses=1]
store i8* %tmp.upgrd.5, i8** %tmp
%tmp.upgrd.6 = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 1 ; <i32*> [#uses=1]
%tmp2 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 1 ; <i32*> [#uses=1]
- %tmp.upgrd.7 = load i32* %tmp2 ; <i32> [#uses=1]
+ %tmp.upgrd.7 = load i32, i32* %tmp2 ; <i32> [#uses=1]
store i32 %tmp.upgrd.7, i32* %tmp.upgrd.6
%tmp3 = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 2 ; <i32*> [#uses=1]
%tmp4 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 2 ; <i32*> [#uses=1]
- %tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* %tmp4 ; <i32> [#uses=1]
store i32 %tmp5, i32* %tmp3
br label %bb33
bb: ; preds = %bb33
- %tmp.upgrd.8 = load %struct.FILE** %f_addr ; <%struct.FILE*> [#uses=1]
+ %tmp.upgrd.8 = load %struct.FILE*, %struct.FILE** %f_addr ; <%struct.FILE*> [#uses=1]
%tmp.upgrd.9 = call i32 @_IO_getc( %struct.FILE* %tmp.upgrd.8 ) ; <i32> [#uses=1]
%tmp6 = call i32 @tolower( i32 %tmp.upgrd.9 ) ; <i32> [#uses=1]
%tmp6.upgrd.10 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1]
store i8 %tmp6.upgrd.10, i8* %c
- %tmp7 = load i32* %wstate ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %wstate ; <i32> [#uses=1]
%tmp.upgrd.11 = icmp ne i32 %tmp7, 0 ; <i1> [#uses=1]
br i1 %tmp.upgrd.11, label %cond_true, label %cond_false
cond_true: ; preds = %bb
- %tmp.upgrd.12 = load i8* %c ; <i8> [#uses=1]
+ %tmp.upgrd.12 = load i8, i8* %c ; <i8> [#uses=1]
%tmp8 = icmp sle i8 %tmp.upgrd.12, 96 ; <i1> [#uses=1]
br i1 %tmp8, label %cond_true9, label %cond_next
cond_true9: ; preds = %cond_true
br label %bb16
cond_next: ; preds = %cond_true
- %tmp10 = load i8* %c ; <i8> [#uses=1]
+ %tmp10 = load i8, i8* %c ; <i8> [#uses=1]
%tmp11 = icmp sgt i8 %tmp10, 122 ; <i1> [#uses=1]
br i1 %tmp11, label %cond_true12, label %cond_next13
cond_true12: ; preds = %cond_next
br label %bb16
cond_next13: ; preds = %cond_next
- %tmp14 = load i8* %c ; <i8> [#uses=1]
+ %tmp14 = load i8, i8* %c ; <i8> [#uses=1]
%tmp14.upgrd.13 = sext i8 %tmp14 to i32 ; <i32> [#uses=1]
%tmp1415 = trunc i32 %tmp14.upgrd.13 to i8 ; <i8> [#uses=1]
call void @charsequence_push( %struct.charsequence* %cs, i8 %tmp1415 )
@@ -88,26 +88,26 @@ cond_next13: ; preds = %cond_next
bb16: ; preds = %cond_true12, %cond_true9
%tmp17 = call i8* @charsequence_val( %struct.charsequence* %cs ) ; <i8*> [#uses=1]
store i8* %tmp17, i8** %str
- %tmp.upgrd.14 = load %struct.trie_s** %t_addr ; <%struct.trie_s*> [#uses=1]
- %tmp18 = load i8** %str ; <i8*> [#uses=1]
+ %tmp.upgrd.14 = load %struct.trie_s*, %struct.trie_s** %t_addr ; <%struct.trie_s*> [#uses=1]
+ %tmp18 = load i8*, i8** %str ; <i8*> [#uses=1]
%tmp19 = call %struct.trie_s* @trie_insert( %struct.trie_s* %tmp.upgrd.14, i8* %tmp18 ) ; <%struct.trie_s*> [#uses=0]
- %tmp20 = load i8** %str ; <i8*> [#uses=1]
+ %tmp20 = load i8*, i8** %str ; <i8*> [#uses=1]
call void @free( i8* %tmp20 )
store i32 0, i32* %wstate
br label %bb21
bb21: ; preds = %bb16, %cond_next13
br label %cond_next32
cond_false: ; preds = %bb
- %tmp22 = load i8* %c ; <i8> [#uses=1]
+ %tmp22 = load i8, i8* %c ; <i8> [#uses=1]
%tmp23 = icmp sgt i8 %tmp22, 96 ; <i1> [#uses=1]
br i1 %tmp23, label %cond_true24, label %cond_next31
cond_true24: ; preds = %cond_false
- %tmp25 = load i8* %c ; <i8> [#uses=1]
+ %tmp25 = load i8, i8* %c ; <i8> [#uses=1]
%tmp26 = icmp sle i8 %tmp25, 122 ; <i1> [#uses=1]
br i1 %tmp26, label %cond_true27, label %cond_next30
cond_true27: ; preds = %cond_true24
call void @charsequence_reset( %struct.charsequence* %cs )
- %tmp28 = load i8* %c ; <i8> [#uses=1]
+ %tmp28 = load i8, i8* %c ; <i8> [#uses=1]
%tmp28.upgrd.15 = sext i8 %tmp28 to i32 ; <i32> [#uses=1]
%tmp2829 = trunc i32 %tmp28.upgrd.15 to i8 ; <i8> [#uses=1]
call void @charsequence_push( %struct.charsequence* %cs, i8 %tmp2829 )
@@ -120,7 +120,7 @@ cond_next31: ; preds = %cond_next30, %cond_false
cond_next32: ; preds = %cond_next31, %bb21
br label %bb33
bb33: ; preds = %cond_next32, %entry
- %tmp34 = load %struct.FILE** %f_addr ; <%struct.FILE*> [#uses=1]
+ %tmp34 = load %struct.FILE*, %struct.FILE** %f_addr ; <%struct.FILE*> [#uses=1]
%tmp35 = call i32 @feof( %struct.FILE* %tmp34 ) ; <i32> [#uses=1]
%tmp36 = icmp eq i32 %tmp35, 0 ; <i1> [#uses=1]
br i1 %tmp36, label %bb, label %bb37
diff --git a/llvm/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll b/llvm/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
index 8e156373998..7625d935ab3 100644
--- a/llvm/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
+++ b/llvm/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
@@ -8,7 +8,7 @@ target triple = "i686-apple-darwin8"
define void @test(i32 %X, i32 %Y, i32 %Z) {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load i32* @G, align 8 ; <i32> [#uses=2]
+ %tmp = load i32, i32* @G, align 8 ; <i32> [#uses=2]
%tmp3 = icmp eq i32 %X, %Y ; <i1> [#uses=1]
%tmp34 = zext i1 %tmp3 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp34, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/SimplifyCFG/2008-07-13-InfLoopMiscompile.ll b/llvm/test/Transforms/SimplifyCFG/2008-07-13-InfLoopMiscompile.ll
index 9b6084f0e1a..3127931c56b 100644
--- a/llvm/test/Transforms/SimplifyCFG/2008-07-13-InfLoopMiscompile.ll
+++ b/llvm/test/Transforms/SimplifyCFG/2008-07-13-InfLoopMiscompile.ll
@@ -9,7 +9,7 @@ target triple = "i386-pc-linux-gnu"
define i32 @main() nounwind {
entry:
- %l = load i32* @g_37, align 4 ; <i32> [#uses=1]
+ %l = load i32, i32* @g_37, align 4 ; <i32> [#uses=1]
%cmpa = icmp ne i32 %l, 0 ; <i1> [#uses=3]
br i1 %cmpa, label %func_1.exit, label %mooseblock
diff --git a/llvm/test/Transforms/SimplifyCFG/2008-09-08-MultiplePred.ll b/llvm/test/Transforms/SimplifyCFG/2008-09-08-MultiplePred.ll
index ac9622d43c3..6b216f598ef 100644
--- a/llvm/test/Transforms/SimplifyCFG/2008-09-08-MultiplePred.ll
+++ b/llvm/test/Transforms/SimplifyCFG/2008-09-08-MultiplePred.ll
@@ -4,7 +4,7 @@
define i32 @func_127(i32 %p_129) nounwind {
entry:
- load i32* @g_103, align 4 ; <i32>:0 [#uses=1]
+ load i32, i32* @g_103, align 4 ; <i32>:0 [#uses=1]
icmp eq i32 %0, 0 ; <i1>:1 [#uses=2]
br i1 %1, label %bb6.preheader, label %entry.return_crit_edge
diff --git a/llvm/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll b/llvm/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll
index 419feb6b56a..faf3f5f8b2a 100644
--- a/llvm/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll
+++ b/llvm/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll
@@ -18,7 +18,7 @@ bb: ; preds = %bb4
br i1 icmp ne (i32* @i, i32* null), label %bb1, label %bb2
bb1: ; preds = %bb
- %0 = load i32* @i, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @i, align 4 ; <i32> [#uses=1]
br label %bb3
bb2: ; preds = %bb
diff --git a/llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll b/llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll
index 076cb5867af..cfbe219c4c2 100644
--- a/llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll
+++ b/llvm/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll
@@ -5,9 +5,9 @@
; Function Attrs: nounwind
define double @_Z3fooRdS_S_S_(double* dereferenceable(8) %x, double* dereferenceable(8) %y, double* dereferenceable(8) %a) #0 {
entry:
- %0 = load double* %y, align 8
+ %0 = load double, double* %y, align 8
%cmp = fcmp oeq double %0, 0.000000e+00
- %1 = load double* %x, align 8
+ %1 = load double, double* %x, align 8
br i1 %cmp, label %if.then, label %if.else
; fadd (const, (fmul x, y))
@@ -15,7 +15,7 @@ if.then: ; preds = %entry
; CHECK-LABEL: if.then:
; CHECK: %3 = fmul fast double %1, %2
; CHECK-NEXT: %mul = fadd fast double 1.000000e+00, %3
- %2 = load double* %a, align 8
+ %2 = load double, double* %a, align 8
%3 = fmul fast double %1, %2
%mul = fadd fast double 1.000000e+00, %3
store double %mul, double* %y, align 8
@@ -26,16 +26,16 @@ if.else: ; preds = %entry
; CHECK-LABEL: if.else:
; CHECK: %mul1 = fmul fast double %1, %2
; CHECK-NEXT: %sub1 = fsub fast double %mul1, %0
- %4 = load double* %a, align 8
+ %4 = load double, double* %a, align 8
%mul1 = fmul fast double %1, %4
%sub1 = fsub fast double %mul1, %0
store double %sub1, double* %y, align 8
br label %if.end
if.end: ; preds = %if.else, %if.then
- %5 = load double* %y, align 8
+ %5 = load double, double* %y, align 8
%cmp2 = fcmp oeq double %5, 2.000000e+00
- %6 = load double* %x, align 8
+ %6 = load double, double* %x, align 8
br i1 %cmp2, label %if.then2, label %if.else2
; fsub (x, (fmul y, z))
@@ -43,7 +43,7 @@ if.then2: ; preds = %entry
; CHECK-LABEL: if.then2:
; CHECK: %7 = fmul fast double %5, 3.000000e+00
; CHECK-NEXT: %mul2 = fsub fast double %6, %7
- %7 = load double* %a, align 8
+ %7 = load double, double* %a, align 8
%8 = fmul fast double %6, 3.0000000e+00
%mul2 = fsub fast double %7, %8
store double %mul2, double* %y, align 8
@@ -62,10 +62,10 @@ if.else2: ; preds = %entry
br label %if.end2
if.end2: ; preds = %if.else, %if.then
- %9 = load double* %x, align 8
- %10 = load double* %y, align 8
+ %9 = load double, double* %x, align 8
+ %10 = load double, double* %y, align 8
%add = fadd fast double %9, %10
- %11 = load double* %a, align 8
+ %11 = load double, double* %a, align 8
%add2 = fadd fast double %add, %11
ret double %add2
}
diff --git a/llvm/test/Transforms/SimplifyCFG/EmptyBlockMerge.ll b/llvm/test/Transforms/SimplifyCFG/EmptyBlockMerge.ll
index aba08dc073a..32a0202364d 100644
--- a/llvm/test/Transforms/SimplifyCFG/EmptyBlockMerge.ll
+++ b/llvm/test/Transforms/SimplifyCFG/EmptyBlockMerge.ll
@@ -6,7 +6,7 @@ declare void @foo()
define void @cprop_test12(i32* %data) {
bb0:
- %reg108 = load i32* %data ; <i32> [#uses=2]
+ %reg108 = load i32, i32* %data ; <i32> [#uses=2]
%cond218 = icmp ne i32 %reg108, 5 ; <i1> [#uses=1]
br i1 %cond218, label %bb3, label %bb2
bb2: ; preds = %bb0
diff --git a/llvm/test/Transforms/SimplifyCFG/PR17073.ll b/llvm/test/Transforms/SimplifyCFG/PR17073.ll
index 8dc9fb28d61..e6e98b26687 100644
--- a/llvm/test/Transforms/SimplifyCFG/PR17073.ll
+++ b/llvm/test/Transforms/SimplifyCFG/PR17073.ll
@@ -18,7 +18,7 @@ target triple = "i386-apple-macosx10.9.0"
; CHECK-NOT: select i1 %tobool, i32* null, i32* select (i1 icmp eq (i64 urem (i64 2, i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64)), i64 0), i32* null, i32* @a)
define i32* @can_trap1() {
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %exit, label %block1
@@ -38,7 +38,7 @@ exit:
; CHECK-NOT: select i1 %tobool, i32* select (i1 icmp eq (i64 urem (i64 2, i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64)), i64 0), i32* null, i32* @a), i32* null
define i32* @can_trap2() {
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %exit, label %block1
@@ -57,7 +57,7 @@ exit:
; CHECK: select i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a), i32* select (i1 icmp eq (i64 add (i64 zext (i1 icmp eq (i32* bitcast (i8* @b to i32*), i32* @a) to i64), i64 2), i64 0), i32* null, i32* @a), i32* null
define i32* @cannot_trap() {
entry:
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %exit, label %block1
diff --git a/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll b/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll
index 31de3c8b2f1..c23a96df52e 100644
--- a/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll
+++ b/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll
@@ -34,11 +34,11 @@ define i8* @test4(i1* %dummy, i8* %a, i8* %b) {
; CHECK-LABEL: @test4(
entry:
- %cond1 = load volatile i1* %dummy
+ %cond1 = load volatile i1, i1* %dummy
br i1 %cond1, label %if, label %end
if:
- %cond2 = load volatile i1* %dummy
+ %cond2 = load volatile i1, i1* %dummy
br i1 %cond2, label %then, label %end
then:
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
index 0df7963b182..f3e5506ad93 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-apple-darwin12.0.0"
; CHECK-NEXT: sub i3 %arg, -4
; CHECK-NEXT: zext i3 %switch.tableidx to i4
; CHECK-NEXT: getelementptr inbounds [8 x i64], [8 x i64]* @switch.table, i32 0, i4 %switch.tableidx.zext
-; CHECK-NEXT: load i64* %switch.gep
+; CHECK-NEXT: load i64, i64* %switch.gep
; CHECK-NEXT: add i64
; CHECK-NEXT: ret i64
define i64 @test(i3 %arg) {
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
index 18e04f60b97..26008700f5b 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-darwin12.0.0"
; CHECK-NEXT: sub i2 %0, -2
; CHECK-NEXT: zext i2 %switch.tableidx to i3
; CHECK-NEXT: getelementptr inbounds [4 x i64], [4 x i64]* @switch.table, i32 0, i3 %switch.tableidx.zext
-; CHECK-NEXT: load i64* %switch.gep
+; CHECK-NEXT: load i64, i64* %switch.gep
; CHECK-NEXT: ret i64 %switch.load
define i64 @_TFO6reduce1E5toRawfS0_FT_Si(i2) {
entry:
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
index 85a3680387b..6bdd649a905 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
@@ -59,7 +59,7 @@ return:
; CHECK-NEXT: br i1 %0, label %switch.lookup, label %return
; CHECK: switch.lookup:
; CHECK-NEXT: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table, i32 0, i32 %switch.tableidx
-; CHECK-NEXT: %switch.load = load i32* %switch.gep
+; CHECK-NEXT: %switch.load = load i32, i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
; CHECK: return:
; CHECK-NEXT: ret i32 15
@@ -98,7 +98,7 @@ sw.epilog:
; CHECK-NEXT: %switch.downshift = lshr i32 89655594, %switch.shiftamt
; CHECK-NEXT: %switch.masked = trunc i32 %switch.downshift to i8
; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x float], [4 x float]* @switch.table1, i32 0, i32 %switch.tableidx
-; CHECK-NEXT: %switch.load = load float* %switch.gep
+; CHECK-NEXT: %switch.load = load float, float* %switch.gep
; CHECK-NEXT: br label %sw.epilog
; CHECK: sw.epilog:
; CHECK-NEXT: %a.0 = phi i8 [ %switch.masked, %switch.lookup ], [ 7, %entry ]
@@ -145,7 +145,7 @@ return:
; CHECK-NEXT: br i1 %0, label %switch.lookup, label %return
; CHECK: switch.lookup:
; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i8*], [4 x i8*]* @switch.table2, i32 0, i32 %switch.tableidx
-; CHECK-NEXT: %switch.load = load i8** %switch.gep
+; CHECK-NEXT: %switch.load = load i8*, i8** %switch.gep
; CHECK-NEXT: ret i8* %switch.load
}
@@ -174,7 +174,7 @@ sw.epilog:
; CHECK-LABEL: @earlyreturncrash(
; CHECK: switch.lookup:
; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table3, i32 0, i32 %switch.tableidx
-; CHECK-NEXT: %switch.load = load i32* %switch.gep
+; CHECK-NEXT: %switch.load = load i32, i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
; CHECK: sw.epilog:
; CHECK-NEXT: ret i32 7
@@ -806,7 +806,7 @@ return:
; CHECK-NOT: icmp
; CHECK-NOT: br 1i
; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table7, i32 0, i32 %switch.tableidx
-; CHECK-NEXT: %switch.load = load i32* %switch.gep
+; CHECK-NEXT: %switch.load = load i32, i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
}
diff --git a/llvm/test/Transforms/SimplifyCFG/basictest.ll b/llvm/test/Transforms/SimplifyCFG/basictest.ll
index 5d9dad4cf7e..d228499b2ec 100644
--- a/llvm/test/Transforms/SimplifyCFG/basictest.ll
+++ b/llvm/test/Transforms/SimplifyCFG/basictest.ll
@@ -50,7 +50,7 @@ define i8 @test6f() {
; CHECK: alloca i8, align 1
; CHECK-NEXT: call i8 @test6g
; CHECK-NEXT: icmp eq i8 %tmp, 0
-; CHECK-NEXT: load i8* %r, align 1{{$}}
+; CHECK-NEXT: load i8, i8* %r, align 1{{$}}
bb0:
%r = alloca i8, align 1
@@ -58,7 +58,7 @@ bb0:
%tmp1 = icmp eq i8 %tmp, 0
br i1 %tmp1, label %bb2, label %bb1
bb1:
- %tmp3 = load i8* %r, align 1, !range !2, !tbaa !1
+ %tmp3 = load i8, i8* %r, align 1, !range !2, !tbaa !1
%tmp4 = icmp eq i8 %tmp3, 1
br i1 %tmp4, label %bb2, label %bb3
bb2:
diff --git a/llvm/test/Transforms/SimplifyCFG/branch-fold-threshold.ll b/llvm/test/Transforms/SimplifyCFG/branch-fold-threshold.ll
index 878c0a4837a..fad5fce1d4b 100644
--- a/llvm/test/Transforms/SimplifyCFG/branch-fold-threshold.ll
+++ b/llvm/test/Transforms/SimplifyCFG/branch-fold-threshold.ll
@@ -19,7 +19,7 @@ lor.lhs.false:
; AGGRESSIVE-NOT: br i1
cond.false:
- %0 = load i32* %input, align 4
+ %0 = load i32, i32* %input, align 4
br label %cond.end
cond.end:
diff --git a/llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll b/llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll
index c19ba69866b..4c1b7e68e25 100644
--- a/llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll
+++ b/llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll
@@ -51,7 +51,7 @@ E:
br i1 %C, label %T, label %F
T: ; preds = %A, %E
call void @f3( )
- %XX = load i32* %AP ; <i32> [#uses=1]
+ %XX = load i32, i32* %AP ; <i32> [#uses=1]
store i32 %XX, i32* %BP
br i1 %C, label %B, label %A
A: ; preds = %T
diff --git a/llvm/test/Transforms/SimplifyCFG/dbginfo.ll b/llvm/test/Transforms/SimplifyCFG/dbginfo.ll
index 1a9f20ac871..12aec91c6c5 100644
--- a/llvm/test/Transforms/SimplifyCFG/dbginfo.ll
+++ b/llvm/test/Transforms/SimplifyCFG/dbginfo.ll
@@ -58,7 +58,7 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
call void @llvm.dbg.func.start({ }* bitcast (%llvm.dbg.subprogram.type* @llvm.dbg.subprogram947 to { }*))
store %struct.__false_type* %this, %struct.__false_type** %this_addr
- %0 = load %struct.__false_type** %this_addr, align 4 ; <%struct.__false_type*> [#uses=1]
+ %0 = load %struct.__false_type*, %struct.__false_type** %this_addr, align 4 ; <%struct.__false_type*> [#uses=1]
call void @_ZN9__gnu_cxx13new_allocatorIP5SceneED2Ev(%struct.__false_type* %0) nounwind
br label %bb
diff --git a/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll b/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll
index 5c83e2a3aa4..c1ca605cc20 100644
--- a/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll
+++ b/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll
@@ -6,12 +6,12 @@ define void @test(i1 %P, i32* %Q) {
br i1 %P, label %T, label %F
T: ; preds = %0
store i32 1, i32* %Q
- %A = load i32* %Q ; <i32> [#uses=1]
+ %A = load i32, i32* %Q ; <i32> [#uses=1]
call void @bar( i32 %A )
ret void
F: ; preds = %0
store i32 1, i32* %Q
- %B = load i32* %Q ; <i32> [#uses=1]
+ %B = load i32, i32* %Q ; <i32> [#uses=1]
call void @bar( i32 %B )
ret void
}
diff --git a/llvm/test/Transforms/SimplifyCFG/hoist-with-range.ll b/llvm/test/Transforms/SimplifyCFG/hoist-with-range.ll
index 7ca3ff247dc..0a2b28267ef 100644
--- a/llvm/test/Transforms/SimplifyCFG/hoist-with-range.ll
+++ b/llvm/test/Transforms/SimplifyCFG/hoist-with-range.ll
@@ -2,15 +2,15 @@
define void @foo(i1 %c, i8* %p) {
; CHECK: if:
-; CHECK-NEXT: load i8* %p, !range !0
+; CHECK-NEXT: load i8, i8* %p, !range !0
; CHECK: !0 = !{i8 0, i8 1, i8 3, i8 5}
if:
br i1 %c, label %then, label %else
then:
- %t = load i8* %p, !range !0
+ %t = load i8, i8* %p, !range !0
br label %out
else:
- %e = load i8* %p, !range !1
+ %e = load i8, i8* %p, !range !1
br label %out
out:
ret void
diff --git a/llvm/test/Transforms/SimplifyCFG/indirectbr.ll b/llvm/test/Transforms/SimplifyCFG/indirectbr.ll
index 7001a4ed40a..67e23d2fe93 100644
--- a/llvm/test/Transforms/SimplifyCFG/indirectbr.ll
+++ b/llvm/test/Transforms/SimplifyCFG/indirectbr.ll
@@ -17,7 +17,7 @@ entry:
store i8* blockaddress(@indbrtest0, %BB1), i8** %P
store i8* blockaddress(@indbrtest0, %BB2), i8** %P
call void @foo()
- %t = load i8** %Q
+ %t = load i8*, i8** %Q
indirectbr i8* %t, [label %BB0, label %BB1, label %BB2, label %BB0, label %BB1, label %BB2]
BB0:
call void @A()
@@ -42,7 +42,7 @@ define void @indbrtest1(i8** %P, i8** %Q) {
entry:
store i8* blockaddress(@indbrtest1, %BB0), i8** %P
call void @foo()
- %t = load i8** %Q
+ %t = load i8*, i8** %Q
indirectbr i8* %t, [label %BB0, label %BB0]
BB0:
call void @A()
@@ -193,7 +193,7 @@ escape-string.top:
xlab8x: ; preds = %xlab5x
%xvaluex = call i32 @xselectorx()
%xblkx.x = getelementptr [9 x i8*], [9 x i8*]* @xblkx.bbs, i32 0, i32 %xvaluex
- %xblkx.load = load i8** %xblkx.x
+ %xblkx.load = load i8*, i8** %xblkx.x
indirectbr i8* %xblkx.load, [label %xblkx.begin, label %xblkx.begin3, label %xblkx.begin4, label %xblkx.begin5, label %xblkx.begin6, label %xblkx.begin7, label %xblkx.begin8, label %xblkx.begin9, label %xblkx.end]
xblkx.begin:
diff --git a/llvm/test/Transforms/SimplifyCFG/iterative-simplify.ll b/llvm/test/Transforms/SimplifyCFG/iterative-simplify.ll
index a3974110576..60728b9a957 100644
--- a/llvm/test/Transforms/SimplifyCFG/iterative-simplify.ll
+++ b/llvm/test/Transforms/SimplifyCFG/iterative-simplify.ll
@@ -17,13 +17,13 @@ cond_true: ; preds = %entry
br label %bb
bb: ; preds = %cond_next, %cond_true
- %tmp = load i32* %z ; <i32> [#uses=1]
+ %tmp = load i32, i32* %z ; <i32> [#uses=1]
%tmp1 = sub i32 %tmp, 16384 ; <i32> [#uses=1]
store i32 %tmp1, i32* %z
- %tmp2 = load i32* %i ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %i ; <i32> [#uses=1]
%tmp3 = add i32 %tmp2, 1 ; <i32> [#uses=1]
store i32 %tmp3, i32* %i
- %tmp4 = load i32* %i ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
%tmp5 = icmp sgt i32 %tmp4, 262144 ; <i1> [#uses=1]
%tmp56 = zext i1 %tmp5 to i8 ; <i8> [#uses=1]
%toBool7 = icmp ne i8 %tmp56, 0 ; <i1> [#uses=1]
@@ -34,7 +34,7 @@ cond_true8: ; preds = %bb
unreachable
cond_next: ; preds = %bb
- %tmp9 = load i32* %z ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* %z ; <i32> [#uses=1]
%tmp10 = icmp ne i32 %tmp9, 0 ; <i1> [#uses=1]
%tmp1011 = zext i1 %tmp10 to i8 ; <i8> [#uses=1]
%toBool12 = icmp ne i8 %tmp1011, 0 ; <i1> [#uses=1]
@@ -53,13 +53,13 @@ cond_true15: ; preds = %cond_false
br label %bb17
bb17: ; preds = %cond_next27, %cond_true15
- %tmp18 = load i32* %z16 ; <i32> [#uses=1]
+ %tmp18 = load i32, i32* %z16 ; <i32> [#uses=1]
%tmp19 = sub i32 %tmp18, 16384 ; <i32> [#uses=1]
store i32 %tmp19, i32* %z16
- %tmp20 = load i32* %i ; <i32> [#uses=1]
+ %tmp20 = load i32, i32* %i ; <i32> [#uses=1]
%tmp21 = add i32 %tmp20, 1 ; <i32> [#uses=1]
store i32 %tmp21, i32* %i
- %tmp22 = load i32* %i ; <i32> [#uses=1]
+ %tmp22 = load i32, i32* %i ; <i32> [#uses=1]
%tmp23 = icmp sgt i32 %tmp22, 262144 ; <i1> [#uses=1]
%tmp2324 = zext i1 %tmp23 to i8 ; <i8> [#uses=1]
%toBool25 = icmp ne i8 %tmp2324, 0 ; <i1> [#uses=1]
@@ -70,7 +70,7 @@ cond_true26: ; preds = %bb17
unreachable
cond_next27: ; preds = %bb17
- %tmp28 = load i32* %z16 ; <i32> [#uses=1]
+ %tmp28 = load i32, i32* %z16 ; <i32> [#uses=1]
%tmp29 = icmp ne i32 %tmp28, 0 ; <i1> [#uses=1]
%tmp2930 = zext i1 %tmp29 to i8 ; <i8> [#uses=1]
%toBool31 = icmp ne i8 %tmp2930, 0 ; <i1> [#uses=1]
@@ -91,7 +91,7 @@ cond_next35: ; preds = %cond_next34
br label %return
return: ; preds = %cond_next35
- %retval36 = load i32* %retval ; <i32> [#uses=1]
+ %retval36 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval36
}
diff --git a/llvm/test/Transforms/SimplifyCFG/multiple-phis.ll b/llvm/test/Transforms/SimplifyCFG/multiple-phis.ll
index 3ecce2e16ee..a6eef09ae64 100644
--- a/llvm/test/Transforms/SimplifyCFG/multiple-phis.ll
+++ b/llvm/test/Transforms/SimplifyCFG/multiple-phis.ll
@@ -23,7 +23,7 @@ while.body: ; preds = %while.cond
%div = udiv i32 %add, 2
%idxprom = zext i32 %div to i64
%arrayidx = getelementptr inbounds i32, i32* %r, i64 %idxprom
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%cmp1 = icmp ult i32 %k, %0
br i1 %cmp1, label %if.then, label %if.else
diff --git a/llvm/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll b/llvm/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll
index b388cc5830f..4792e9565b2 100644
--- a/llvm/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll
+++ b/llvm/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll
@@ -8,7 +8,7 @@ entry:
br i1 %tobool, label %return, label %if.then
if.then: ; preds = %entry
- %0 = load i32* @g, align 4
+ %0 = load i32, i32* @g, align 4
br label %return
return: ; preds = %entry, %if.then
@@ -26,7 +26,7 @@ entry:
br i1 %tobool, label %return, label %if.then
if.then: ; preds = %entry
- %0 = load i32* @g, align 4
+ %0 = load i32, i32* @g, align 4
br label %return
return: ; preds = %entry, %if.then
@@ -34,7 +34,7 @@ return: ; preds = %entry, %if.then
ret i32 %retval
; CHECK-LABEL: @TestTsan
; CHECK: br i1
-; CHECK: load i32* @g
+; CHECK: load i32, i32* @g
; CHECK: br label
; CHECK: ret i32
}
diff --git a/llvm/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll b/llvm/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll
index ad73f148753..c0f0046a1d2 100644
--- a/llvm/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll
+++ b/llvm/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll
@@ -21,7 +21,7 @@ if.then4: ; preds = %if.else
if.end7: ; preds = %if.else, %if.then4, %if.then
%x.0 = phi i32* [ %a, %if.then ], [ %c, %if.then4 ], [ null, %if.else ]
- %tmp9 = load i32* %x.0
+ %tmp9 = load i32, i32* %x.0
ret i32 %tmp9
; CHECK-LABEL: @test1(
@@ -50,7 +50,7 @@ if.then4: ; preds = %if.else
if.end7: ; preds = %if.else, %if.then4, %if.then
%x.0 = phi i32* [ %a, %if.then ], [ null, %if.then4 ], [ null, %if.else ]
- %tmp9 = load i32* %x.0
+ %tmp9 = load i32, i32* %x.0
ret i32 %tmp9
; CHECK-LABEL: @test2(
; CHECK: if.else:
@@ -79,7 +79,7 @@ if.then4: ; preds = %if.else
if.end7: ; preds = %if.else, %if.then4, %if.then
%x.0 = phi i32* [ %a, %if.then ], [ null, %if.then4 ], [ null, %if.else ]
tail call void @bar() nounwind
- %tmp9 = load i32* %x.0
+ %tmp9 = load i32, i32* %x.0
ret i32 %tmp9
; CHECK-LABEL: @test3(
; CHECK: if.end7:
@@ -106,7 +106,7 @@ if.then4: ; preds = %if.else
if.end7: ; preds = %if.else, %if.then4, %if.then
%x.0 = phi i32* [ %a, %if.then ], [ null, %if.then4 ], [ null, %if.else ]
%gep = getelementptr i32, i32* %x.0, i32 10
- %tmp9 = load i32* %gep
+ %tmp9 = load i32, i32* %gep
%tmp10 = or i32 %tmp9, 1
store i32 %tmp10, i32* %gep
ret i32 %tmp9
diff --git a/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll b/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll
index 7802a059357..ae1794b1c61 100644
--- a/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll
+++ b/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll
@@ -353,7 +353,7 @@ for.cond2:
%tobool = icmp eq i32 %bit.0, 0
br i1 %tobool, label %for.exit, label %for.body3, !prof !10
for.body3:
- %v3 = load i32* @max_regno, align 4
+ %v3 = load i32, i32* @max_regno, align 4
%cmp4 = icmp eq i32 %i.1, %v3
br i1 %cmp4, label %for.exit, label %for.inc, !prof !11
for.inc:
diff --git a/llvm/test/Transforms/SimplifyCFG/speculate-store.ll b/llvm/test/Transforms/SimplifyCFG/speculate-store.ll
index c082f2c08b3..c1ac7bcea24 100644
--- a/llvm/test/Transforms/SimplifyCFG/speculate-store.ll
+++ b/llvm/test/Transforms/SimplifyCFG/speculate-store.ll
@@ -3,14 +3,14 @@
define void @ifconvertstore(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
entry:
%arrayidx = getelementptr inbounds i32, i32* %B, i64 0
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %C
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
; First store to the location.
store i32 %add, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %1, %D
%cmp6 = icmp sgt i32 %add5, %C
br i1 %cmp6, label %if.then, label %ret.end
@@ -31,14 +31,14 @@ ret.end:
define void @noifconvertstore1(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
entry:
%arrayidx = getelementptr inbounds i32, i32* %B, i64 0
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %C
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
; Store to a different location.
store i32 %add, i32* %arrayidx, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %1, %D
%cmp6 = icmp sgt i32 %add5, %C
br i1 %cmp6, label %if.then, label %ret.end
@@ -58,7 +58,7 @@ declare void @unknown_fun()
define void @noifconvertstore2(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
entry:
%arrayidx = getelementptr inbounds i32, i32* %B, i64 0
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %C
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
@@ -66,7 +66,7 @@ entry:
store i32 %add, i32* %arrayidx2, align 4
call void @unknown_fun()
%arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %1, %D
%cmp6 = icmp sgt i32 %add5, %C
br i1 %cmp6, label %if.then, label %ret.end
@@ -84,14 +84,14 @@ ret.end:
define void @noifconvertstore_volatile(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
entry:
%arrayidx = getelementptr inbounds i32, i32* %B, i64 0
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %C
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
; First store to the location.
store i32 %add, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
- %1 = load i32* %arrayidx4, align 4
+ %1 = load i32, i32* %arrayidx4, align 4
%add5 = add nsw i32 %1, %D
%cmp6 = icmp sgt i32 %add5, %C
br i1 %cmp6, label %if.then, label %ret.end
diff --git a/llvm/test/Transforms/SimplifyCFG/speculate-with-offset.ll b/llvm/test/Transforms/SimplifyCFG/speculate-with-offset.ll
index f92e316a9dc..65ebb5c2c3f 100644
--- a/llvm/test/Transforms/SimplifyCFG/speculate-with-offset.ll
+++ b/llvm/test/Transforms/SimplifyCFG/speculate-with-offset.ll
@@ -17,7 +17,7 @@ if.then: ; preds = %entry
br label %return
if.end: ; preds = %entry
- %tmp5 = load i64** %__a.addr, align 8
+ %tmp5 = load i64*, i64** %__a.addr, align 8
br label %return
return: ; preds = %if.end, %if.then
@@ -39,7 +39,7 @@ if.then: ; preds = %entry
br label %return
if.end: ; preds = %entry
- %tmp5 = load i64** %__a.addr, align 8
+ %tmp5 = load i64*, i64** %__a.addr, align 8
br label %return
return: ; preds = %if.end, %if.then
@@ -61,7 +61,7 @@ if.then: ; preds = %entry
br label %return
if.end: ; preds = %entry
- %tmp5 = load i64** %__a.addr, align 8
+ %tmp5 = load i64*, i64** %__a.addr, align 8
br label %return
return: ; preds = %if.end, %if.then
@@ -83,7 +83,7 @@ if.then: ; preds = %entry
br label %return
if.end: ; preds = %entry
- %tmp5 = load i64** %__a.addr, align 8
+ %tmp5 = load i64*, i64** %__a.addr, align 8
br label %return
return: ; preds = %if.end, %if.then
diff --git a/llvm/test/Transforms/SimplifyCFG/switch-to-select-multiple-edge-per-block-phi.ll b/llvm/test/Transforms/SimplifyCFG/switch-to-select-multiple-edge-per-block-phi.ll
index ddf5d1f06ce..f5359b5b2b3 100644
--- a/llvm/test/Transforms/SimplifyCFG/switch-to-select-multiple-edge-per-block-phi.ll
+++ b/llvm/test/Transforms/SimplifyCFG/switch-to-select-multiple-edge-per-block-phi.ll
@@ -20,12 +20,12 @@ define i32 @fn1() {
; CHECK: %switch.selectcmp1 = icmp eq i32 %1, 5
; CHECK: %switch.select2 = select i1 %switch.selectcmp1, i32 5, i32 %switch.select
entry:
- %0 = load i32* @b, align 4
+ %0 = load i32, i32* @b, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end3, label %if.then
if.then:
- %1 = load i32* @a, align 4
+ %1 = load i32, i32* @a, align 4
switch i32 %1, label %if.end3 [
i32 5, label %return
i32 0, label %return
diff --git a/llvm/test/Transforms/SimplifyCFG/switch_create.ll b/llvm/test/Transforms/SimplifyCFG/switch_create.ll
index f14193176a0..490b7513a94 100644
--- a/llvm/test/Transforms/SimplifyCFG/switch_create.ll
+++ b/llvm/test/Transforms/SimplifyCFG/switch_create.ll
@@ -155,7 +155,7 @@ lor.end: ; preds = %entry, %entry, %ent
define i1 @test6({ i32, i32 }* %I) {
entry:
%tmp.1.i = getelementptr { i32, i32 }, { i32, i32 }* %I, i64 0, i32 1 ; <i32*> [#uses=1]
- %tmp.2.i = load i32* %tmp.1.i ; <i32> [#uses=6]
+ %tmp.2.i = load i32, i32* %tmp.1.i ; <i32> [#uses=6]
%tmp.2 = icmp eq i32 %tmp.2.i, 14 ; <i1> [#uses=1]
br i1 %tmp.2, label %shortcirc_done.4, label %shortcirc_next.0
shortcirc_next.0: ; preds = %entry
diff --git a/llvm/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll b/llvm/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
index 5ae62af5458..5881367d961 100644
--- a/llvm/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
+++ b/llvm/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
@@ -11,7 +11,7 @@ entry:
br i1 %0, label %bb, label %return
bb: ; preds = %entry
- %1 = load volatile i32* null
+ %1 = load volatile i32, i32* null
unreachable
br label %return
diff --git a/llvm/test/Transforms/SimplifyCFG/unreachable-blocks.ll b/llvm/test/Transforms/SimplifyCFG/unreachable-blocks.ll
index 0aaafec604a..87a64ad47c6 100644
--- a/llvm/test/Transforms/SimplifyCFG/unreachable-blocks.ll
+++ b/llvm/test/Transforms/SimplifyCFG/unreachable-blocks.ll
@@ -15,7 +15,7 @@ while_block: ; preds = %and_if_cont2, %and_
br i1 %test, label %and_if1, label %and_if_cont2
and_if1: ; preds = %while_block
- %char = load i8* %newptr
+ %char = load i8, i8* %newptr
%test2 = icmp ule i8 %char, 32
br label %and_if_cont2
diff --git a/llvm/test/Transforms/Sink/basic.ll b/llvm/test/Transforms/Sink/basic.ll
index c56922a859b..1bbf161921d 100644
--- a/llvm/test/Transforms/Sink/basic.ll
+++ b/llvm/test/Transforms/Sink/basic.ll
@@ -8,11 +8,11 @@
; CHECK-LABEL: @foo(
; CHECK: true:
-; CHECK-NEXT: %l = load i32* @A
+; CHECK-NEXT: %l = load i32, i32* @A
; CHECK-NEXT: ret i32 %l
define i32 @foo(i1 %z) {
- %l = load i32* @A
+ %l = load i32, i32* @A
store i32 0, i32* @B
br i1 %z, label %true, label %false
true:
@@ -28,7 +28,7 @@ false:
; CHECK-NEXT: store i32
define i32 @foo2(i1 %z) {
- %l = load volatile i32* @A
+ %l = load volatile i32, i32* @A
store i32 0, i32* @B
br i1 %z, label %true, label %false
true:
@@ -79,7 +79,7 @@ if:
store i32 0, i32* %0
store i32 1, i32* %2
%3 = getelementptr i32, i32* %0, i32 %b
- %4 = load i32* %3
+ %4 = load i32, i32* %3
ret i32 %4
endif:
@@ -104,7 +104,7 @@ if:
store i32 0, i32* %0
store i32 1, i32* %2
%3 = getelementptr i32, i32* %0, i32 %b
- %4 = load i32* %3
+ %4 = load i32, i32* %3
ret i32 %4
endif:
@@ -135,7 +135,7 @@ if:
store i32 0, i32* %0
store i32 1, i32* %2
%3 = getelementptr i32, i32* %0, i32 %b
- %4 = load i32* %3
+ %4 = load i32, i32* %3
ret i32 %4
endif:
diff --git a/llvm/test/Transforms/StripSymbols/strip-dead-debug-info.ll b/llvm/test/Transforms/StripSymbols/strip-dead-debug-info.ll
index aca7cd60847..722d2b7cd78 100644
--- a/llvm/test/Transforms/StripSymbols/strip-dead-debug-info.ll
+++ b/llvm/test/Transforms/StripSymbols/strip-dead-debug-info.ll
@@ -19,7 +19,7 @@ entry:
define i32 @foo(i32 %i) #2 {
entry:
tail call void @llvm.dbg.value(metadata i32 %i, i64 0, metadata !15, metadata !{}), !dbg !20
- %.0 = load i32* @xyz, align 4
+ %.0 = load i32, i32* @xyz, align 4
ret i32 %.0, !dbg !21
}
diff --git a/llvm/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll b/llvm/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll
index 8aed08b6e8d..ba9aa291306 100644
--- a/llvm/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll
+++ b/llvm/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll
@@ -29,10 +29,10 @@ for.body: ; preds = %for.body.backedge,
; CHECK: br label %Flow
lor.lhs.false: ; preds = %for.body
%arrayidx = getelementptr inbounds float, float* %nr, i64 %indvars.iv
- %tmp1 = load float* %arrayidx, align 4
+ %tmp1 = load float, float* %arrayidx, align 4
%tmp2 = add nsw i64 %indvars.iv, -1
%arrayidx2 = getelementptr inbounds float, float* %nr, i64 %tmp2
- %tmp3 = load float* %arrayidx2, align 4
+ %tmp3 = load float, float* %arrayidx2, align 4
%cmp3 = fcmp une float %tmp1, %tmp3
br i1 %cmp3, label %if.then, label %for.body.1
@@ -45,7 +45,7 @@ if.then: ; preds = %lor.lhs.false, %for
%sub4 = sub nsw i32 %tmp0, %prev_start.026
%tmp4 = add nsw i64 %indvars.iv, -1
%arrayidx8 = getelementptr inbounds float, float* %nr, i64 %tmp4
- %tmp5 = load float* %arrayidx8, align 4
+ %tmp5 = load float, float* %arrayidx8, align 4
br i1 %cmp1, label %for.end, label %for.body.1
; CHECK: for.end:
@@ -84,7 +84,7 @@ for.body.6: ; preds = %for.body.1
; CHECK: br label %for.body.backedge
if.then6.6: ; preds = %for.body.6
%arrayidx8.6 = getelementptr inbounds float, float* %nr, i64 %indvars.iv.next.454
- %tmp29 = load float* %arrayidx8.6, align 4
+ %tmp29 = load float, float* %arrayidx8.6, align 4
br label %for.body.backedge
; CHECK: Flow3:
diff --git a/llvm/test/Transforms/TailCallElim/basic.ll b/llvm/test/Transforms/TailCallElim/basic.ll
index 8e9814b52bb..2488b552d8f 100644
--- a/llvm/test/Transforms/TailCallElim/basic.ll
+++ b/llvm/test/Transforms/TailCallElim/basic.ll
@@ -183,7 +183,7 @@ define void @test11() {
%a = alloca i8*
%b = alloca i8
call void @test11_helper1(i8** %a, i8* %b) ; a = &b
- %c = load i8** %a
+ %c = load i8*, i8** %a
call void @test11_helper2(i8* %c)
; CHECK: call void @test11_helper2
ret void
diff --git a/llvm/test/Transforms/TailCallElim/dont_reorder_load.ll b/llvm/test/Transforms/TailCallElim/dont_reorder_load.ll
index a29b72e9423..ac399a1bf5a 100644
--- a/llvm/test/Transforms/TailCallElim/dont_reorder_load.ll
+++ b/llvm/test/Transforms/TailCallElim/dont_reorder_load.ll
@@ -21,7 +21,7 @@ if: ; preds = %entry
else: ; preds = %entry
%tmp7 = add i32 %start_arg, 1 ; <i32> [#uses=1]
%tmp8 = call fastcc i32 @no_tailrecelim_1(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; <i32> [#uses=1]
- %tmp9 = load i32* @extern_weak_global ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* @extern_weak_global ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
ret i32 %tmp10
}
@@ -40,7 +40,7 @@ if: ; preds = %entry
else: ; preds = %entry
%tmp7 = add i32 %start_arg, 1 ; <i32> [#uses=1]
%tmp8 = call fastcc i32 @no_tailrecelim_2(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; <i32> [#uses=1]
- %tmp9 = load i32* %a_arg ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
ret i32 %tmp10
}
@@ -58,7 +58,7 @@ if: ; preds = %entry
else: ; preds = %entry
%tmp7 = add i32 %start_arg, 1 ; <i32> [#uses=1]
%tmp8 = call fastcc i32 @no_tailrecelim_3(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; <i32> [#uses=1]
- %tmp9 = load volatile i32* %a_arg ; <i32> [#uses=1]
+ %tmp9 = load volatile i32, i32* %a_arg ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
ret i32 %tmp10
}
diff --git a/llvm/test/Transforms/TailCallElim/reorder_load.ll b/llvm/test/Transforms/TailCallElim/reorder_load.ll
index 2e350d662a3..b989bbf9547 100644
--- a/llvm/test/Transforms/TailCallElim/reorder_load.ll
+++ b/llvm/test/Transforms/TailCallElim/reorder_load.ll
@@ -16,7 +16,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define fastcc i32 @raise_load_1(i32* %a_arg, i32 %a_len_arg, i32 %start_arg) nounwind readonly {
; CHECK-LABEL: @raise_load_1(
; CHECK-NOT: call
-; CHECK: load i32*
+; CHECK: load i32, i32*
; CHECK-NOT: call
; CHECK: }
entry:
@@ -29,7 +29,7 @@ if: ; preds = %entry
else: ; preds = %entry
%tmp7 = add i32 %start_arg, 1 ; <i32> [#uses=1]
%tmp8 = call fastcc i32 @raise_load_1(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; <i32> [#uses=1]
- %tmp9 = load i32* %a_arg ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* %a_arg ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
ret i32 %tmp10
}
@@ -40,7 +40,7 @@ else: ; preds = %entry
define fastcc i32 @raise_load_2(i32* %a_arg, i32 %a_len_arg, i32 %start_arg) readonly {
; CHECK-LABEL: @raise_load_2(
; CHECK-NOT: call
-; CHECK: load i32*
+; CHECK: load i32, i32*
; CHECK-NOT: call
; CHECK: }
entry:
@@ -60,7 +60,7 @@ unwind: ; preds = %else
recurse: ; preds = %else
%tmp7 = add i32 %start_arg, 1 ; <i32> [#uses=1]
%tmp8 = call fastcc i32 @raise_load_2(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; <i32> [#uses=1]
- %tmp9 = load i32* @global ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* @global ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
ret i32 %tmp10
}
@@ -71,7 +71,7 @@ recurse: ; preds = %else
define fastcc i32 @raise_load_3(i32* %a_arg, i32 %a_len_arg, i32 %start_arg) nounwind readonly {
; CHECK-LABEL: @raise_load_3(
; CHECK-NOT: call
-; CHECK: load i32*
+; CHECK: load i32, i32*
; CHECK-NOT: call
; CHECK: }
entry:
@@ -84,7 +84,7 @@ if: ; preds = %entry
else: ; preds = %entry
%tmp7 = add i32 %start_arg, 1 ; <i32> [#uses=1]
%tmp8 = call fastcc i32 @raise_load_3(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; <i32> [#uses=1]
- %tmp9 = load i32* @extern_weak_global ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* @extern_weak_global ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1]
ret i32 %tmp10
}
@@ -96,8 +96,8 @@ else: ; preds = %entry
define fastcc i32 @raise_load_4(i32* %a_arg, i32 %a_len_arg, i32 %start_arg) readonly {
; CHECK-LABEL: @raise_load_4(
; CHECK-NOT: call
-; CHECK: load i32*
-; CHECK-NEXT: load i32*
+; CHECK: load i32, i32*
+; CHECK-NEXT: load i32, i32*
; CHECK-NOT: call
; CHECK: }
entry:
@@ -116,9 +116,9 @@ unwind: ; preds = %else
recurse: ; preds = %else
%tmp7 = add i32 %start_arg, 1 ; <i32> [#uses=1]
- %first = load i32* %a_arg ; <i32> [#uses=1]
+ %first = load i32, i32* %a_arg ; <i32> [#uses=1]
%tmp8 = call fastcc i32 @raise_load_4(i32* %a_arg, i32 %first, i32 %tmp7) ; <i32> [#uses=1]
- %second = load i32* %a_arg ; <i32> [#uses=1]
+ %second = load i32, i32* %a_arg ; <i32> [#uses=1]
%tmp10 = add i32 %second, %tmp8 ; <i32> [#uses=1]
ret i32 %tmp10
}
OpenPOWER on IntegriCloud