summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll4
-rw-r--r--llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll6
-rw-r--r--llvm/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll8
-rw-r--r--llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll8
-rw-r--r--llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll4
-rw-r--r--llvm/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll4
-rw-r--r--llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll8
-rw-r--r--llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll8
-rw-r--r--llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll2
-rw-r--r--llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll4
-rw-r--r--llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll4
-rw-r--r--llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll2
-rw-r--r--llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll10
-rw-r--r--llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll4
-rw-r--r--llvm/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll6
-rw-r--r--llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll4
-rw-r--r--llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll4
-rw-r--r--llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll20
-rw-r--r--llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll6
-rw-r--r--llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll60
-rw-r--r--llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll8
-rw-r--r--llvm/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll4
-rw-r--r--llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll20
-rw-r--r--llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll8
-rw-r--r--llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll4
-rw-r--r--llvm/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll4
-rw-r--r--llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll4
-rw-r--r--llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll68
-rw-r--r--llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll10
-rw-r--r--llvm/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll8
-rw-r--r--llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll8
-rw-r--r--llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll8
-rw-r--r--llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll4
-rw-r--r--llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll8
-rw-r--r--llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-10-31-extractelement-i64.ll24
-rw-r--r--llvm/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll4
-rw-r--r--llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll4
-rw-r--r--llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll4
-rw-r--r--llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll10
-rw-r--r--llvm/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll22
-rw-r--r--llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll24
-rw-r--r--llvm/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll28
-rw-r--r--llvm/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll8
-rw-r--r--llvm/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll6
-rw-r--r--llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll20
-rw-r--r--llvm/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll8
-rw-r--r--llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll10
-rw-r--r--llvm/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll20
-rw-r--r--llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll128
-rw-r--r--llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll10
-rw-r--r--llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll4
-rw-r--r--llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll6
-rw-r--r--llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll8
-rw-r--r--llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll6
-rw-r--r--llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-11-06-testb.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-12-02-IllegalResultType.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll6
-rw-r--r--llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll26
-rw-r--r--llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll6
-rw-r--r--llvm/test/CodeGen/X86/2009-03-03-BTHang.ll4
-rw-r--r--llvm/test/CodeGen/X86/2009-03-05-burr-list-crash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll6
-rw-r--r--llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll48
-rw-r--r--llvm/test/CodeGen/X86/2009-03-25-TestBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll6
-rw-r--r--llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll16
-rw-r--r--llvm/test/CodeGen/X86/2009-04-24.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll170
-rw-r--r--llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll8
-rw-r--r--llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll26
-rw-r--r--llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-04-scale.ll4
-rw-r--r--llvm/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll40
-rw-r--r--llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll8
-rw-r--r--llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll8
-rw-r--r--llvm/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll6
-rw-r--r--llvm/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll6
-rw-r--r--llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll28
-rw-r--r--llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll4
-rw-r--r--llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll10
-rw-r--r--llvm/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll4
-rw-r--r--llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll8
-rw-r--r--llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll8
-rw-r--r--llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll8
-rw-r--r--llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll10
-rw-r--r--llvm/test/CodeGen/X86/20090313-signext.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll10
-rw-r--r--llvm/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-01-18-DbgValue.ll6
-rw-r--r--llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll8
-rw-r--r--llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll18
-rw-r--r--llvm/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-03-17-ISelBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll12
-rw-r--r--llvm/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-05-07-ldconvert.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-05-16-nosseconversion.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll6
-rw-r--r--llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-07-02-UnfoldBug.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-08-04-StackVariable.ll8
-rw-r--r--llvm/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll2
-rw-r--r--llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll4
-rw-r--r--llvm/test/CodeGen/X86/2010-11-09-MOVLPS.ll14
-rw-r--r--llvm/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll4
-rw-r--r--llvm/test/CodeGen/X86/2011-02-12-shuffle.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-03-02-DAGCombiner.ll14
-rw-r--r--llvm/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll6
-rw-r--r--llvm/test/CodeGen/X86/2011-05-09-loaduse.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll4
-rw-r--r--llvm/test/CodeGen/X86/2011-06-01-fildll.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-06-03-x87chain.ll6
-rw-r--r--llvm/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll6
-rw-r--r--llvm/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-09-21-setcc-bug.ll16
-rw-r--r--llvm/test/CodeGen/X86/2011-10-11-srl.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-10-12-MachineCSE.ll32
-rw-r--r--llvm/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll10
-rw-r--r--llvm/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll4
-rw-r--r--llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll4
-rw-r--r--llvm/test/CodeGen/X86/2011-10-27-tstore.ll2
-rw-r--r--llvm/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll18
-rw-r--r--llvm/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll8
-rw-r--r--llvm/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-01-11-split-cv.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-01-12-extract-sv.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-02-12-dagco.ll4
-rw-r--r--llvm/test/CodeGen/X86/2012-02-29-CoalescerBug.ll4
-rw-r--r--llvm/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll8
-rw-r--r--llvm/test/CodeGen/X86/2012-04-26-sdglue.ll4
-rw-r--r--llvm/test/CodeGen/X86/2012-07-10-extload64.ll4
-rw-r--r--llvm/test/CodeGen/X86/2012-07-15-broadcastfold.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-08-17-legalizer-crash.ll4
-rw-r--r--llvm/test/CodeGen/X86/2012-09-28-CGPBug.ll6
-rw-r--r--llvm/test/CodeGen/X86/2012-10-02-DAGCycle.ll8
-rw-r--r--llvm/test/CodeGen/X86/2012-10-03-DAGCycle.ll4
-rw-r--r--llvm/test/CodeGen/X86/2012-10-18-crash-dagco.ll14
-rw-r--r--llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll4
-rw-r--r--llvm/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-11-30-misched-dbg.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-12-06-python27-miscompile.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll2
-rw-r--r--llvm/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll2
-rw-r--r--llvm/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll6
-rw-r--r--llvm/test/CodeGen/X86/Atomics-64.ll200
-rw-r--r--llvm/test/CodeGen/X86/GC/alloc_loop.ll4
-rw-r--r--llvm/test/CodeGen/X86/GC/argpromotion.ll2
-rw-r--r--llvm/test/CodeGen/X86/GC/inline.ll2
-rw-r--r--llvm/test/CodeGen/X86/GC/inline2.ll2
-rw-r--r--llvm/test/CodeGen/X86/MachineBranchProb.ll2
-rw-r--r--llvm/test/CodeGen/X86/MachineSink-DbgValue.ll2
-rw-r--r--llvm/test/CodeGen/X86/MachineSink-eflags.ll12
-rw-r--r--llvm/test/CodeGen/X86/MergeConsecutiveStores.ll50
-rw-r--r--llvm/test/CodeGen/X86/StackColoring.ll2
-rw-r--r--llvm/test/CodeGen/X86/SwitchLowering.ll2
-rw-r--r--llvm/test/CodeGen/X86/SwizzleShuff.ll20
-rw-r--r--llvm/test/CodeGen/X86/abi-isel.ll182
-rw-r--r--llvm/test/CodeGen/X86/addr-mode-matcher.ll4
-rw-r--r--llvm/test/CodeGen/X86/address-type-promotion-constantexpr.ll2
-rw-r--r--llvm/test/CodeGen/X86/aliases.ll6
-rw-r--r--llvm/test/CodeGen/X86/aligned-variadic.ll2
-rw-r--r--llvm/test/CodeGen/X86/and-su.ll2
-rw-r--r--llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll28
-rw-r--r--llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll42
-rw-r--r--llvm/test/CodeGen/X86/atom-call-reg-indirect.ll8
-rw-r--r--llvm/test/CodeGen/X86/atom-cmpb.ll4
-rw-r--r--llvm/test/CodeGen/X86/atom-fixup-lea1.ll2
-rw-r--r--llvm/test/CodeGen/X86/atom-fixup-lea2.ll12
-rw-r--r--llvm/test/CodeGen/X86/atom-fixup-lea3.ll6
-rw-r--r--llvm/test/CodeGen/X86/atom-fixup-lea4.ll2
-rw-r--r--llvm/test/CodeGen/X86/atom-lea-addw-bug.ll6
-rw-r--r--llvm/test/CodeGen/X86/atom-sched.ll8
-rw-r--r--llvm/test/CodeGen/X86/atomic-dagsched.ll18
-rw-r--r--llvm/test/CodeGen/X86/atomic-load-store-wide.ll2
-rw-r--r--llvm/test/CodeGen/X86/atomic-load-store.ll2
-rw-r--r--llvm/test/CodeGen/X86/atomic-or.ll4
-rw-r--r--llvm/test/CodeGen/X86/atomic-pointer.ll2
-rw-r--r--llvm/test/CodeGen/X86/atomic128.ll4
-rw-r--r--llvm/test/CodeGen/X86/atomic_mi.ll60
-rw-r--r--llvm/test/CodeGen/X86/atomic_op.ll2
-rw-r--r--llvm/test/CodeGen/X86/avoid-loop-align-2.ll4
-rw-r--r--llvm/test/CodeGen/X86/avoid-loop-align.ll2
-rw-r--r--llvm/test/CodeGen/X86/avoid_complex_am.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx-arith.ll6
-rw-r--r--llvm/test/CodeGen/X86/avx-basic.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx-bitcast.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx-cvt.ll10
-rw-r--r--llvm/test/CodeGen/X86/avx-intel-ocl.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx-intrinsics-x86.ll14
-rw-r--r--llvm/test/CodeGen/X86/avx-load-store.ll16
-rw-r--r--llvm/test/CodeGen/X86/avx-logic.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx-splat.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx-unpack.ll16
-rw-r--r--llvm/test/CodeGen/X86/avx-varargs-x86_64.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx-vbroadcast.ll30
-rw-r--r--llvm/test/CodeGen/X86/avx-vinsertf128.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx-vperm2x128.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx-vzeroupper.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx.ll12
-rw-r--r--llvm/test/CodeGen/X86/avx1-logical-load-folding.ll8
-rw-r--r--llvm/test/CodeGen/X86/avx2-conversions.ll10
-rw-r--r--llvm/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll24
-rw-r--r--llvm/test/CodeGen/X86/avx2-shift.ll20
-rw-r--r--llvm/test/CodeGen/X86/avx2-vbroadcast.ll50
-rw-r--r--llvm/test/CodeGen/X86/avx512-arith.ll28
-rw-r--r--llvm/test/CodeGen/X86/avx512-build-vector.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512-cvt.ll12
-rw-r--r--llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll8
-rw-r--r--llvm/test/CodeGen/X86/avx512-i1test.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512-insert-extract.ll10
-rw-r--r--llvm/test/CodeGen/X86/avx512-intel-ocl.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512-intrinsics.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512-logic.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512-mask-op.ll6
-rw-r--r--llvm/test/CodeGen/X86/avx512-mov.ll60
-rw-r--r--llvm/test/CodeGen/X86/avx512-round.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512-shift.ll8
-rw-r--r--llvm/test/CodeGen/X86/avx512-vbroadcast.ll12
-rw-r--r--llvm/test/CodeGen/X86/avx512-vec-cmp.ll20
-rw-r--r--llvm/test/CodeGen/X86/avx512bw-arith.ll8
-rw-r--r--llvm/test/CodeGen/X86/avx512bw-mask-op.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512bw-mov.ll12
-rw-r--r--llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll12
-rw-r--r--llvm/test/CodeGen/X86/avx512bwvl-arith.ll16
-rw-r--r--llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll28
-rw-r--r--llvm/test/CodeGen/X86/avx512bwvl-mov.ll24
-rw-r--r--llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll24
-rw-r--r--llvm/test/CodeGen/X86/avx512dq-mask-op.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512er-intrinsics.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512vl-arith.ll40
-rw-r--r--llvm/test/CodeGen/X86/avx512vl-intrinsics.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx512vl-mov.ll96
-rw-r--r--llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll40
-rw-r--r--llvm/test/CodeGen/X86/bitcast-mmx.ll4
-rw-r--r--llvm/test/CodeGen/X86/block-placement.ll186
-rw-r--r--llvm/test/CodeGen/X86/bmi.ll22
-rw-r--r--llvm/test/CodeGen/X86/break-anti-dependencies.ll4
-rw-r--r--llvm/test/CodeGen/X86/break-false-dep.ll24
-rw-r--r--llvm/test/CodeGen/X86/bswap.ll6
-rw-r--r--llvm/test/CodeGen/X86/byval-align.ll8
-rw-r--r--llvm/test/CodeGen/X86/byval.ll2
-rw-r--r--llvm/test/CodeGen/X86/call-push.ll2
-rw-r--r--llvm/test/CodeGen/X86/cas.ll24
-rw-r--r--llvm/test/CodeGen/X86/chain_order.ll8
-rw-r--r--llvm/test/CodeGen/X86/change-compare-stride-1.ll18
-rw-r--r--llvm/test/CodeGen/X86/clobber-fi0.ll6
-rw-r--r--llvm/test/CodeGen/X86/cmov-into-branch.ll8
-rw-r--r--llvm/test/CodeGen/X86/cmov.ll16
-rw-r--r--llvm/test/CodeGen/X86/cmp.ll8
-rw-r--r--llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll2
-rw-r--r--llvm/test/CodeGen/X86/cmpxchg-i1.ll2
-rw-r--r--llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll2
-rw-r--r--llvm/test/CodeGen/X86/coalesce-esp.ll2
-rw-r--r--llvm/test/CodeGen/X86/coalesce-implicitdef.ll12
-rw-r--r--llvm/test/CodeGen/X86/coalescer-commute1.ll4
-rw-r--r--llvm/test/CodeGen/X86/coalescer-commute4.ll4
-rw-r--r--llvm/test/CodeGen/X86/coalescer-cross.ll4
-rw-r--r--llvm/test/CodeGen/X86/coalescer-dce2.ll16
-rw-r--r--llvm/test/CodeGen/X86/coalescer-identity.ll6
-rw-r--r--llvm/test/CodeGen/X86/code_placement.ll38
-rw-r--r--llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll72
-rw-r--r--llvm/test/CodeGen/X86/codegen-prepare-cast.ll4
-rw-r--r--llvm/test/CodeGen/X86/codegen-prepare-extload.ll44
-rw-r--r--llvm/test/CodeGen/X86/codegen-prepare.ll4
-rw-r--r--llvm/test/CodeGen/X86/codemodel.ll12
-rw-r--r--llvm/test/CodeGen/X86/combiner-aa-0.ll6
-rw-r--r--llvm/test/CodeGen/X86/combiner-aa-1.ll4
-rw-r--r--llvm/test/CodeGen/X86/commute-blend-avx2.ll16
-rw-r--r--llvm/test/CodeGen/X86/commute-blend-sse41.ll6
-rw-r--r--llvm/test/CodeGen/X86/commute-clmul.ll8
-rw-r--r--llvm/test/CodeGen/X86/commute-fcmp.ll48
-rw-r--r--llvm/test/CodeGen/X86/commute-intrinsic.ll2
-rw-r--r--llvm/test/CodeGen/X86/commute-xop.ll40
-rw-r--r--llvm/test/CodeGen/X86/compact-unwind.ll10
-rw-r--r--llvm/test/CodeGen/X86/complex-asm.ll4
-rw-r--r--llvm/test/CodeGen/X86/computeKnownBits_urem.ll2
-rw-r--r--llvm/test/CodeGen/X86/const-base-addr.ll6
-rw-r--r--llvm/test/CodeGen/X86/constant-combines.ll2
-rw-r--r--llvm/test/CodeGen/X86/constant-hoisting-optnone.ll4
-rw-r--r--llvm/test/CodeGen/X86/constant-hoisting-shift-immediate.ll4
-rw-r--r--llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll2
-rw-r--r--llvm/test/CodeGen/X86/cppeh-catch-all.ll4
-rw-r--r--llvm/test/CodeGen/X86/cppeh-catch-scalar.ll18
-rw-r--r--llvm/test/CodeGen/X86/cppeh-frame-vars.ll62
-rw-r--r--llvm/test/CodeGen/X86/crash-O0.ll2
-rw-r--r--llvm/test/CodeGen/X86/crash-nosse.ll2
-rw-r--r--llvm/test/CodeGen/X86/crash.ll44
-rw-r--r--llvm/test/CodeGen/X86/critical-anti-dep-breaker.ll4
-rw-r--r--llvm/test/CodeGen/X86/cse-add-with-overflow.ll4
-rw-r--r--llvm/test/CodeGen/X86/cvt16.ll4
-rw-r--r--llvm/test/CodeGen/X86/dagcombine-buildvector.ll2
-rw-r--r--llvm/test/CodeGen/X86/dagcombine-cse.ll4
-rw-r--r--llvm/test/CodeGen/X86/darwin-quote.ll2
-rw-r--r--llvm/test/CodeGen/X86/dbg-changes-codegen.ll6
-rw-r--r--llvm/test/CodeGen/X86/dbg-combine.ll8
-rw-r--r--llvm/test/CodeGen/X86/discontiguous-loops.ll2
-rw-r--r--llvm/test/CodeGen/X86/div8.ll6
-rw-r--r--llvm/test/CodeGen/X86/dllimport-x86_64.ll6
-rw-r--r--llvm/test/CodeGen/X86/dllimport.ll6
-rw-r--r--llvm/test/CodeGen/X86/dollar-name.ll4
-rw-r--r--llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll2
-rw-r--r--llvm/test/CodeGen/X86/dynamic-allocas-VLAs.ll22
-rw-r--r--llvm/test/CodeGen/X86/early-ifcvt.ll2
-rw-r--r--llvm/test/CodeGen/X86/emit-big-cst.ll2
-rw-r--r--llvm/test/CodeGen/X86/expand-opaque-const.ll6
-rw-r--r--llvm/test/CodeGen/X86/extend.ll4
-rw-r--r--llvm/test/CodeGen/X86/extract-extract.ll4
-rw-r--r--llvm/test/CodeGen/X86/extractelement-load.ll8
-rw-r--r--llvm/test/CodeGen/X86/extractps.ll4
-rw-r--r--llvm/test/CodeGen/X86/f16c-intrinsics.ll4
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-args-fail.ll2
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll6
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-call-bool.ll2
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-fold-mem.ll2
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll4
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-gep.ll18
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-gv.ll6
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-i1.ll2
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-int-float-conversion.ll4
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-mem.ll4
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-tailcall.ll2
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-tls.ll4
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-x86-64.ll6
-rw-r--r--llvm/test/CodeGen/X86/fast-isel-x86.ll4
-rw-r--r--llvm/test/CodeGen/X86/fast-isel.ll16
-rw-r--r--llvm/test/CodeGen/X86/fastcc-byval.ll2
-rw-r--r--llvm/test/CodeGen/X86/fastcc-sret.ll2
-rw-r--r--llvm/test/CodeGen/X86/fastcc.ll8
-rw-r--r--llvm/test/CodeGen/X86/fastisel-gep-promote-before-add.ll10
-rw-r--r--llvm/test/CodeGen/X86/fma-do-not-commute.ll4
-rw-r--r--llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll24
-rw-r--r--llvm/test/CodeGen/X86/fma_patterns.ll4
-rw-r--r--llvm/test/CodeGen/X86/fmul-zero.ll2
-rw-r--r--llvm/test/CodeGen/X86/fold-add.ll4
-rw-r--r--llvm/test/CodeGen/X86/fold-and-shift.ll12
-rw-r--r--llvm/test/CodeGen/X86/fold-call-2.ll2
-rw-r--r--llvm/test/CodeGen/X86/fold-call-3.ll10
-rw-r--r--llvm/test/CodeGen/X86/fold-call-oper.ll6
-rw-r--r--llvm/test/CodeGen/X86/fold-call.ll2
-rw-r--r--llvm/test/CodeGen/X86/fold-load-unops.ll8
-rw-r--r--llvm/test/CodeGen/X86/fold-load-vec.ll18
-rw-r--r--llvm/test/CodeGen/X86/fold-load.ll8
-rw-r--r--llvm/test/CodeGen/X86/fold-mul-lohi.ll2
-rw-r--r--llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll4
-rw-r--r--llvm/test/CodeGen/X86/fold-sext-trunc.ll4
-rw-r--r--llvm/test/CodeGen/X86/fold-tied-op.ll12
-rw-r--r--llvm/test/CodeGen/X86/fold-vex.ll2
-rw-r--r--llvm/test/CodeGen/X86/fold-zext-trunc.ll4
-rw-r--r--llvm/test/CodeGen/X86/force-align-stack-alloca.ll2
-rw-r--r--llvm/test/CodeGen/X86/fp-double-rounding.ll2
-rw-r--r--llvm/test/CodeGen/X86/fp-load-trunc.ll8
-rw-r--r--llvm/test/CodeGen/X86/fp-stack-O0-crash.ll8
-rw-r--r--llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll2
-rw-r--r--llvm/test/CodeGen/X86/fp-stack-compare.ll2
-rw-r--r--llvm/test/CodeGen/X86/fp-stack-ret.ll2
-rw-r--r--llvm/test/CodeGen/X86/fp-stack.ll6
-rw-r--r--llvm/test/CodeGen/X86/fp2sint.ll4
-rw-r--r--llvm/test/CodeGen/X86/fp_load_cast_fold.ll6
-rw-r--r--llvm/test/CodeGen/X86/fp_load_fold.ll12
-rw-r--r--llvm/test/CodeGen/X86/frameallocate.ll2
-rw-r--r--llvm/test/CodeGen/X86/full-lsr.ll8
-rw-r--r--llvm/test/CodeGen/X86/gather-addresses.ll16
-rw-r--r--llvm/test/CodeGen/X86/ghc-cc.ll8
-rw-r--r--llvm/test/CodeGen/X86/ghc-cc64.ll32
-rw-r--r--llvm/test/CodeGen/X86/gs-fold.ll4
-rw-r--r--llvm/test/CodeGen/X86/h-register-addressing-32.ll14
-rw-r--r--llvm/test/CodeGen/X86/h-register-addressing-64.ll14
-rw-r--r--llvm/test/CodeGen/X86/half.ll8
-rw-r--r--llvm/test/CodeGen/X86/hidden-vis-2.ll2
-rw-r--r--llvm/test/CodeGen/X86/hidden-vis-3.ll4
-rw-r--r--llvm/test/CodeGen/X86/hidden-vis-4.ll2
-rw-r--r--llvm/test/CodeGen/X86/hidden-vis-pic.ll2
-rw-r--r--llvm/test/CodeGen/X86/hipe-cc.ll12
-rw-r--r--llvm/test/CodeGen/X86/hipe-cc64.ll14
-rw-r--r--llvm/test/CodeGen/X86/hoist-invariant-load.ll2
-rw-r--r--llvm/test/CodeGen/X86/i128-mul.ll2
-rw-r--r--llvm/test/CodeGen/X86/i128-ret.ll2
-rw-r--r--llvm/test/CodeGen/X86/i1narrowfail.ll2
-rw-r--r--llvm/test/CodeGen/X86/i256-add.ll8
-rw-r--r--llvm/test/CodeGen/X86/i2k.ll4
-rw-r--r--llvm/test/CodeGen/X86/i486-fence-loop.ll4
-rw-r--r--llvm/test/CodeGen/X86/i64-mem-copy.ll2
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-fpstack.ll6
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-out-regs.ll4
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll6
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-stack-realign.ll2
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-stack-realign2.ll2
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll2
-rw-r--r--llvm/test/CodeGen/X86/inline-asm-tied.ll6
-rw-r--r--llvm/test/CodeGen/X86/ins_split_regalloc.ll2
-rw-r--r--llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll2
-rw-r--r--llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll16
-rw-r--r--llvm/test/CodeGen/X86/insertps-O0-bug.ll4
-rw-r--r--llvm/test/CodeGen/X86/invalid-shift-immediate.ll2
-rw-r--r--llvm/test/CodeGen/X86/isel-optnone.ll12
-rw-r--r--llvm/test/CodeGen/X86/isel-sink.ll2
-rw-r--r--llvm/test/CodeGen/X86/isel-sink2.ll4
-rw-r--r--llvm/test/CodeGen/X86/isel-sink3.ll4
-rw-r--r--llvm/test/CodeGen/X86/jump_sign.ll8
-rw-r--r--llvm/test/CodeGen/X86/large-constants.ll16
-rw-r--r--llvm/test/CodeGen/X86/ldzero.ll12
-rw-r--r--llvm/test/CodeGen/X86/lea-5.ll4
-rw-r--r--llvm/test/CodeGen/X86/lea-recursion.ll16
-rw-r--r--llvm/test/CodeGen/X86/legalize-shift-64.ll2
-rw-r--r--llvm/test/CodeGen/X86/licm-nested.ll4
-rw-r--r--llvm/test/CodeGen/X86/liveness-local-regalloc.ll2
-rw-r--r--llvm/test/CodeGen/X86/load-slice.ll10
-rw-r--r--llvm/test/CodeGen/X86/longlong-deadload.ll2
-rw-r--r--llvm/test/CodeGen/X86/loop-strength-reduce4.ll16
-rw-r--r--llvm/test/CodeGen/X86/loop-strength-reduce7.ll2
-rw-r--r--llvm/test/CodeGen/X86/loop-strength-reduce8.ll8
-rw-r--r--llvm/test/CodeGen/X86/lsr-delayed-fold.ll4
-rw-r--r--llvm/test/CodeGen/X86/lsr-i386.ll2
-rw-r--r--llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll40
-rw-r--r--llvm/test/CodeGen/X86/lsr-normalization.ll10
-rw-r--r--llvm/test/CodeGen/X86/lsr-redundant-addressing.ll8
-rw-r--r--llvm/test/CodeGen/X86/lsr-reuse-trunc.ll6
-rw-r--r--llvm/test/CodeGen/X86/lsr-reuse.ll86
-rw-r--r--llvm/test/CodeGen/X86/lsr-static-addr.ll2
-rw-r--r--llvm/test/CodeGen/X86/lsr-wrap.ll2
-rw-r--r--llvm/test/CodeGen/X86/lzcnt-tzcnt.ll36
-rw-r--r--llvm/test/CodeGen/X86/machine-cse.ll2
-rw-r--r--llvm/test/CodeGen/X86/masked-iv-safe.ll48
-rw-r--r--llvm/test/CodeGen/X86/masked-iv-unsafe.ll78
-rw-r--r--llvm/test/CodeGen/X86/mcinst-lowering.ll2
-rw-r--r--llvm/test/CodeGen/X86/mem-intrin-base-reg.ll12
-rw-r--r--llvm/test/CodeGen/X86/mem-promote-integers.ll70
-rw-r--r--llvm/test/CodeGen/X86/misaligned-memset.ll2
-rw-r--r--llvm/test/CodeGen/X86/misched-aa-colored.ll4
-rw-r--r--llvm/test/CodeGen/X86/misched-aa-mmos.ll4
-rw-r--r--llvm/test/CodeGen/X86/misched-balance.ll90
-rw-r--r--llvm/test/CodeGen/X86/misched-code-difference-with-debug.ll8
-rw-r--r--llvm/test/CodeGen/X86/misched-crash.ll4
-rw-r--r--llvm/test/CodeGen/X86/misched-fusion.ll14
-rw-r--r--llvm/test/CodeGen/X86/misched-matmul.ll64
-rw-r--r--llvm/test/CodeGen/X86/misched-matrix.ll64
-rw-r--r--llvm/test/CodeGen/X86/misched-new.ll6
-rw-r--r--llvm/test/CodeGen/X86/mmx-arg-passing-x86-64.ll2
-rw-r--r--llvm/test/CodeGen/X86/mmx-arith.ll66
-rw-r--r--llvm/test/CodeGen/X86/mmx-bitcast.ll8
-rw-r--r--llvm/test/CodeGen/X86/mmx-copy-gprs.ll2
-rw-r--r--llvm/test/CodeGen/X86/mmx-fold-load.ll50
-rw-r--r--llvm/test/CodeGen/X86/movbe.ll6
-rw-r--r--llvm/test/CodeGen/X86/movfs.ll4
-rw-r--r--llvm/test/CodeGen/X86/movgs.ll16
-rw-r--r--llvm/test/CodeGen/X86/movmsk.ll4
-rw-r--r--llvm/test/CodeGen/X86/movtopush.ll6
-rw-r--r--llvm/test/CodeGen/X86/ms-inline-asm.ll6
-rw-r--r--llvm/test/CodeGen/X86/mul128_sext_loop.ll2
-rw-r--r--llvm/test/CodeGen/X86/muloti.ll10
-rw-r--r--llvm/test/CodeGen/X86/mult-alt-generic-i686.ll38
-rw-r--r--llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll38
-rw-r--r--llvm/test/CodeGen/X86/mult-alt-x86.ll48
-rw-r--r--llvm/test/CodeGen/X86/multiple-loop-post-inc.ll28
-rw-r--r--llvm/test/CodeGen/X86/mulx32.ll2
-rw-r--r--llvm/test/CodeGen/X86/mulx64.ll2
-rw-r--r--llvm/test/CodeGen/X86/musttail-indirect.ll30
-rw-r--r--llvm/test/CodeGen/X86/musttail-varargs.ll6
-rw-r--r--llvm/test/CodeGen/X86/nancvt.ll54
-rw-r--r--llvm/test/CodeGen/X86/narrow-shl-load.ll6
-rw-r--r--llvm/test/CodeGen/X86/narrow_op-1.ll4
-rw-r--r--llvm/test/CodeGen/X86/negate-add-zero.ll16
-rw-r--r--llvm/test/CodeGen/X86/no-cmov.ll2
-rw-r--r--llvm/test/CodeGen/X86/norex-subreg.ll6
-rw-r--r--llvm/test/CodeGen/X86/nosse-error1.ll8
-rw-r--r--llvm/test/CodeGen/X86/nosse-error2.ll8
-rw-r--r--llvm/test/CodeGen/X86/nosse-varargs.ll8
-rw-r--r--llvm/test/CodeGen/X86/object-size.ll16
-rw-r--r--llvm/test/CodeGen/X86/opt-ext-uses.ll2
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-0.ll12
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-2.ll2
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-3.ll2
-rw-r--r--llvm/test/CodeGen/X86/packed_struct.ll10
-rw-r--r--llvm/test/CodeGen/X86/palignr-2.ll4
-rw-r--r--llvm/test/CodeGen/X86/patchpoint.ll6
-rw-r--r--llvm/test/CodeGen/X86/peep-test-0.ll2
-rw-r--r--llvm/test/CodeGen/X86/peep-test-1.ll2
-rw-r--r--llvm/test/CodeGen/X86/peephole-fold-movsd.ll4
-rw-r--r--llvm/test/CodeGen/X86/peephole-multiple-folds.ll4
-rw-r--r--llvm/test/CodeGen/X86/phi-bit-propagation.ll4
-rw-r--r--llvm/test/CodeGen/X86/phielim-split.ll2
-rw-r--r--llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll6
-rw-r--r--llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll2
-rw-r--r--llvm/test/CodeGen/X86/pic.ll8
-rw-r--r--llvm/test/CodeGen/X86/pic_jumptable.ll2
-rw-r--r--llvm/test/CodeGen/X86/pmovext.ll2
-rw-r--r--llvm/test/CodeGen/X86/pmovsx-inreg.ll24
-rw-r--r--llvm/test/CodeGen/X86/pmulld.ll2
-rw-r--r--llvm/test/CodeGen/X86/pointer-vector.ll24
-rw-r--r--llvm/test/CodeGen/X86/postra-licm.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr10475.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr10525.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr11334.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr12360.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr12889.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr13209.ll30
-rw-r--r--llvm/test/CodeGen/X86/pr13859.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr13899.ll20
-rw-r--r--llvm/test/CodeGen/X86/pr14161.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr14562.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr1505b.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr15267.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr15309.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr18023.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr18162.ll6
-rw-r--r--llvm/test/CodeGen/X86/pr18846.ll24
-rw-r--r--llvm/test/CodeGen/X86/pr20020.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr2177.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr2182.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr2326.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr2656.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr2849.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr2924.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr2982.ll6
-rw-r--r--llvm/test/CodeGen/X86/pr3216.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr3241.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr3244.ll4
-rw-r--r--llvm/test/CodeGen/X86/pr3317.ll8
-rw-r--r--llvm/test/CodeGen/X86/pr3366.ll2
-rw-r--r--llvm/test/CodeGen/X86/pr9127.ll2
-rw-r--r--llvm/test/CodeGen/X86/pre-ra-sched.ll14
-rw-r--r--llvm/test/CodeGen/X86/private-2.ll2
-rw-r--r--llvm/test/CodeGen/X86/private.ll2
-rw-r--r--llvm/test/CodeGen/X86/promote-assert-zext.ll2
-rw-r--r--llvm/test/CodeGen/X86/promote-trunc.ll4
-rw-r--r--llvm/test/CodeGen/X86/promote.ll4
-rw-r--r--llvm/test/CodeGen/X86/pshufb-mask-comments.ll4
-rw-r--r--llvm/test/CodeGen/X86/psubus.ll24
-rw-r--r--llvm/test/CodeGen/X86/ragreedy-bug.ll50
-rw-r--r--llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll4
-rw-r--r--llvm/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll34
-rw-r--r--llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll30
-rw-r--r--llvm/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll22
-rw-r--r--llvm/test/CodeGen/X86/regpressure.ll60
-rw-r--r--llvm/test/CodeGen/X86/remat-constant.ll2
-rw-r--r--llvm/test/CodeGen/X86/remat-fold-load.ll20
-rw-r--r--llvm/test/CodeGen/X86/remat-invalid-liveness.ll6
-rw-r--r--llvm/test/CodeGen/X86/remat-scalar-zero.ll34
-rw-r--r--llvm/test/CodeGen/X86/reverse_branches.ll2
-rw-r--r--llvm/test/CodeGen/X86/rip-rel-address.ll2
-rw-r--r--llvm/test/CodeGen/X86/rot32.ll4
-rw-r--r--llvm/test/CodeGen/X86/rot64.ll4
-rw-r--r--llvm/test/CodeGen/X86/rotate4.ll8
-rw-r--r--llvm/test/CodeGen/X86/sandybridge-loads.ll10
-rw-r--r--llvm/test/CodeGen/X86/scalar-extract.ll2
-rw-r--r--llvm/test/CodeGen/X86/scalar_widen_div.ll20
-rw-r--r--llvm/test/CodeGen/X86/scalarize-bitcast.ll2
-rw-r--r--llvm/test/CodeGen/X86/scev-interchange.ll2
-rw-r--r--llvm/test/CodeGen/X86/segmented-stacks.ll2
-rw-r--r--llvm/test/CodeGen/X86/seh-safe-div.ll14
-rw-r--r--llvm/test/CodeGen/X86/select-with-and-or.ll2
-rw-r--r--llvm/test/CodeGen/X86/select.ll10
-rw-r--r--llvm/test/CodeGen/X86/setcc-narrowing.ll2
-rw-r--r--llvm/test/CodeGen/X86/sext-load.ll2
-rw-r--r--llvm/test/CodeGen/X86/sha.ll14
-rw-r--r--llvm/test/CodeGen/X86/shift-and.ll4
-rw-r--r--llvm/test/CodeGen/X86/shift-bmi2.ll16
-rw-r--r--llvm/test/CodeGen/X86/shift-coalesce.ll2
-rw-r--r--llvm/test/CodeGen/X86/shift-codegen.ll4
-rw-r--r--llvm/test/CodeGen/X86/shift-combine.ll2
-rw-r--r--llvm/test/CodeGen/X86/shift-folding.ll4
-rw-r--r--llvm/test/CodeGen/X86/shift-one.ll2
-rw-r--r--llvm/test/CodeGen/X86/shift-parts.ll2
-rw-r--r--llvm/test/CodeGen/X86/shl-i64.ll4
-rw-r--r--llvm/test/CodeGen/X86/shl_undef.ll4
-rw-r--r--llvm/test/CodeGen/X86/shrink-compare.ll4
-rw-r--r--llvm/test/CodeGen/X86/shuffle-combine-crash.ll2
-rw-r--r--llvm/test/CodeGen/X86/sibcall-4.ll2
-rw-r--r--llvm/test/CodeGen/X86/sibcall-5.ll2
-rw-r--r--llvm/test/CodeGen/X86/sibcall.ll4
-rw-r--r--llvm/test/CodeGen/X86/simple-zext.ll2
-rw-r--r--llvm/test/CodeGen/X86/sink-hoist.ll10
-rw-r--r--llvm/test/CodeGen/X86/slow-incdec.ll4
-rw-r--r--llvm/test/CodeGen/X86/split-vector-bitcast.ll2
-rw-r--r--llvm/test/CodeGen/X86/sse-align-0.ll4
-rw-r--r--llvm/test/CodeGen/X86/sse-align-1.ll4
-rw-r--r--llvm/test/CodeGen/X86/sse-align-10.ll2
-rw-r--r--llvm/test/CodeGen/X86/sse-align-12.ll8
-rw-r--r--llvm/test/CodeGen/X86/sse-align-2.ll4
-rw-r--r--llvm/test/CodeGen/X86/sse-align-5.ll2
-rw-r--r--llvm/test/CodeGen/X86/sse-align-6.ll2
-rw-r--r--llvm/test/CodeGen/X86/sse-align-9.ll4
-rw-r--r--llvm/test/CodeGen/X86/sse-domains.ll2
-rw-r--r--llvm/test/CodeGen/X86/sse-intel-ocl.ll4
-rw-r--r--llvm/test/CodeGen/X86/sse-load-ret.ll2
-rw-r--r--llvm/test/CodeGen/X86/sse-unaligned-mem-feature.ll2
-rw-r--r--llvm/test/CodeGen/X86/sse2.ll38
-rw-r--r--llvm/test/CodeGen/X86/sse3-avx-addsub.ll8
-rw-r--r--llvm/test/CodeGen/X86/sse3.ll18
-rw-r--r--llvm/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll24
-rw-r--r--llvm/test/CodeGen/X86/sse41.ll30
-rw-r--r--llvm/test/CodeGen/X86/sse42-intrinsics-x86.ll12
-rw-r--r--llvm/test/CodeGen/X86/ssp-data-layout.ll38
-rw-r--r--llvm/test/CodeGen/X86/stack-align.ll4
-rw-r--r--llvm/test/CodeGen/X86/stack-protector-dbginfo.ll2
-rw-r--r--llvm/test/CodeGen/X86/stack-protector-vreg-to-vreg-copy.ll2
-rw-r--r--llvm/test/CodeGen/X86/stack-protector-weight.ll2
-rw-r--r--llvm/test/CodeGen/X86/stack-protector.ll132
-rw-r--r--llvm/test/CodeGen/X86/stackmap.ll2
-rw-r--r--llvm/test/CodeGen/X86/statepoint-forward.ll12
-rw-r--r--llvm/test/CodeGen/X86/store-narrow.ll22
-rw-r--r--llvm/test/CodeGen/X86/store_op_load_fold.ll4
-rw-r--r--llvm/test/CodeGen/X86/store_op_load_fold2.ll4
-rw-r--r--llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll2
-rw-r--r--llvm/test/CodeGen/X86/stride-reuse.ll2
-rw-r--r--llvm/test/CodeGen/X86/subreg-to-reg-0.ll2
-rw-r--r--llvm/test/CodeGen/X86/subreg-to-reg-2.ll6
-rw-r--r--llvm/test/CodeGen/X86/subreg-to-reg-4.ll16
-rw-r--r--llvm/test/CodeGen/X86/subreg-to-reg-6.ll2
-rw-r--r--llvm/test/CodeGen/X86/switch-bt.ll4
-rw-r--r--llvm/test/CodeGen/X86/switch-zextload.ll2
-rw-r--r--llvm/test/CodeGen/X86/tail-call-win64.ll2
-rw-r--r--llvm/test/CodeGen/X86/tail-dup-addr.ll2
-rw-r--r--llvm/test/CodeGen/X86/tail-opts.ll24
-rw-r--r--llvm/test/CodeGen/X86/tailcall-64.ll4
-rw-r--r--llvm/test/CodeGen/X86/tailcall-returndup-void.ll4
-rw-r--r--llvm/test/CodeGen/X86/tailcall-ri64.ll4
-rw-r--r--llvm/test/CodeGen/X86/tailcallbyval.ll2
-rw-r--r--llvm/test/CodeGen/X86/tailcallbyval64.ll2
-rw-r--r--llvm/test/CodeGen/X86/tbm-intrinsics-x86_64.ll4
-rw-r--r--llvm/test/CodeGen/X86/tbm_patterns.ll4
-rw-r--r--llvm/test/CodeGen/X86/test-shrink-bug.ll2
-rw-r--r--llvm/test/CodeGen/X86/testl-commute.ll12
-rw-r--r--llvm/test/CodeGen/X86/tls-addr-non-leaf-function.ll2
-rw-r--r--llvm/test/CodeGen/X86/tls-local-dynamic.ll4
-rw-r--r--llvm/test/CodeGen/X86/tls-pic.ll8
-rw-r--r--llvm/test/CodeGen/X86/tls-pie.ll4
-rw-r--r--llvm/test/CodeGen/X86/tls.ll18
-rw-r--r--llvm/test/CodeGen/X86/tlv-1.ll4
-rw-r--r--llvm/test/CodeGen/X86/trunc-ext-ld-st.ll12
-rw-r--r--llvm/test/CodeGen/X86/trunc-to-bool.ll2
-rw-r--r--llvm/test/CodeGen/X86/twoaddr-pass-sink.ll6
-rw-r--r--llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll38
-rw-r--r--llvm/test/CodeGen/X86/unaligned-spill-folding.ll2
-rw-r--r--llvm/test/CodeGen/X86/unwindraise.ll40
-rw-r--r--llvm/test/CodeGen/X86/use-add-flags.ll2
-rw-r--r--llvm/test/CodeGen/X86/v4i32load-crash.ll8
-rw-r--r--llvm/test/CodeGen/X86/v8i1-masks.ll10
-rw-r--r--llvm/test/CodeGen/X86/vaargs.ll4
-rw-r--r--llvm/test/CodeGen/X86/vararg_tailcall.ll28
-rw-r--r--llvm/test/CodeGen/X86/vec-loadsingles-alignment.ll16
-rw-r--r--llvm/test/CodeGen/X86/vec-trunc-store.ll4
-rw-r--r--llvm/test/CodeGen/X86/vec_align.ll8
-rw-r--r--llvm/test/CodeGen/X86/vec_anyext.ll24
-rw-r--r--llvm/test/CodeGen/X86/vec_extract-mmx.ll6
-rw-r--r--llvm/test/CodeGen/X86/vec_extract-sse4.ll8
-rw-r--r--llvm/test/CodeGen/X86/vec_extract.ll6
-rw-r--r--llvm/test/CodeGen/X86/vec_fpext.ll6
-rw-r--r--llvm/test/CodeGen/X86/vec_i64.ll4
-rw-r--r--llvm/test/CodeGen/X86/vec_ins_extract.ll12
-rw-r--r--llvm/test/CodeGen/X86/vec_insert-5.ll6
-rw-r--r--llvm/test/CodeGen/X86/vec_insert-mmx.ll4
-rw-r--r--llvm/test/CodeGen/X86/vec_loadsingles.ll52
-rw-r--r--llvm/test/CodeGen/X86/vec_logical.ll2
-rw-r--r--llvm/test/CodeGen/X86/vec_set-7.ll2
-rw-r--r--llvm/test/CodeGen/X86/vec_set-F.ll2
-rw-r--r--llvm/test/CodeGen/X86/vec_setcc-2.ll4
-rw-r--r--llvm/test/CodeGen/X86/vec_ss_load_fold.ll4
-rw-r--r--llvm/test/CodeGen/X86/vec_trunc_sext.ll2
-rw-r--r--llvm/test/CodeGen/X86/vec_zero.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-gep.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-intrinsics.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-sext.ll18
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll16
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll14
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll14
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining.ll20
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-mmx.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-sse1.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-variable-idx2.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-zext.ll6
-rw-r--r--llvm/test/CodeGen/X86/vector-zmov.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector.ll42
-rw-r--r--llvm/test/CodeGen/X86/viabs.ll2
-rw-r--r--llvm/test/CodeGen/X86/visibility2.ll2
-rw-r--r--llvm/test/CodeGen/X86/volatile.ll6
-rw-r--r--llvm/test/CodeGen/X86/vselect-avx.ll2
-rw-r--r--llvm/test/CodeGen/X86/vselect-minmax.ll768
-rw-r--r--llvm/test/CodeGen/X86/vshift-5.ll4
-rw-r--r--llvm/test/CodeGen/X86/vshift-6.ll2
-rw-r--r--llvm/test/CodeGen/X86/weak_def_can_be_hidden.ll4
-rw-r--r--llvm/test/CodeGen/X86/widen_arith-1.ll16
-rw-r--r--llvm/test/CodeGen/X86/widen_arith-2.ll24
-rw-r--r--llvm/test/CodeGen/X86/widen_arith-3.ll16
-rw-r--r--llvm/test/CodeGen/X86/widen_arith-4.ll16
-rw-r--r--llvm/test/CodeGen/X86/widen_arith-5.ll16
-rw-r--r--llvm/test/CodeGen/X86/widen_arith-6.ll18
-rw-r--r--llvm/test/CodeGen/X86/widen_cast-1.ll14
-rw-r--r--llvm/test/CodeGen/X86/widen_cast-2.ll14
-rw-r--r--llvm/test/CodeGen/X86/widen_cast-4.ll24
-rw-r--r--llvm/test/CodeGen/X86/widen_conversions.ll2
-rw-r--r--llvm/test/CodeGen/X86/widen_load-0.ll4
-rw-r--r--llvm/test/CodeGen/X86/widen_load-1.ll4
-rw-r--r--llvm/test/CodeGen/X86/widen_load-2.ll44
-rw-r--r--llvm/test/CodeGen/X86/win32_sret.ll4
-rw-r--r--llvm/test/CodeGen/X86/win64_eh.ll16
-rw-r--r--llvm/test/CodeGen/X86/win_eh_prepare.ll2
-rw-r--r--llvm/test/CodeGen/X86/x32-function_pointer-1.ll4
-rw-r--r--llvm/test/CodeGen/X86/x86-64-gv-offset.ll4
-rw-r--r--llvm/test/CodeGen/X86/x86-64-jumps.ll4
-rw-r--r--llvm/test/CodeGen/X86/x86-64-mem.ll2
-rw-r--r--llvm/test/CodeGen/X86/x86-64-pic-4.ll2
-rw-r--r--llvm/test/CodeGen/X86/x86-64-pic-5.ll2
-rw-r--r--llvm/test/CodeGen/X86/x86-64-pic-6.ll2
-rw-r--r--llvm/test/CodeGen/X86/x86-64-ptr-arg-simple.ll2
-rw-r--r--llvm/test/CodeGen/X86/x86-64-sret-return.ll18
-rw-r--r--llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll2
-rw-r--r--llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll8
-rw-r--r--llvm/test/CodeGen/X86/xop-intrinsics-x86_64.ll40
-rw-r--r--llvm/test/CodeGen/X86/zext-extract_subreg.ll2
-rw-r--r--llvm/test/CodeGen/X86/zext-sext.ll10
-rw-r--r--llvm/test/CodeGen/X86/zlib-longest-match.ll74
794 files changed, 4518 insertions, 4518 deletions
diff --git a/llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll b/llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
index fe6674da041..48236cd0c8f 100644
--- a/llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
+++ b/llvm/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
@@ -8,9 +8,9 @@
@GLOBAL = external global i32 ; <i32*> [#uses=1]
define i32 @test(i32* %P1, i32* %P2, i32* %P3) nounwind {
- %L = load i32* @GLOBAL ; <i32> [#uses=1]
+ %L = load i32, i32* @GLOBAL ; <i32> [#uses=1]
store i32 12, i32* %P2
- %Y = load i32* %P3 ; <i32> [#uses=1]
+ %Y = load i32, i32* %P3 ; <i32> [#uses=1]
%Z = sub i32 %Y, %L ; <i32> [#uses=1]
ret i32 %Z
}
diff --git a/llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll b/llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
index 1b3fc382e89..f6b5b2c103f 100644
--- a/llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
+++ b/llvm/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
@@ -7,7 +7,7 @@
@A = external global i32 ; <i32*> [#uses=2]
define i32 @test5(i32 %B, i8 %C) {
- %tmp.1 = load i32* @A ; <i32> [#uses=1]
+ %tmp.1 = load i32, i32* @A ; <i32> [#uses=1]
%shift.upgrd.1 = zext i8 %C to i32 ; <i32> [#uses=1]
%tmp.2 = shl i32 %tmp.1, %shift.upgrd.1 ; <i32> [#uses=1]
%tmp.3 = sub i8 32, %C ; <i8> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll b/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
index 6df040d1e24..9f44bc348e3 100644
--- a/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
+++ b/llvm/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
@@ -13,14 +13,14 @@ label.0.no_exit.1_crit_edge.exitStub: ; preds = %label.0
codeRepl5.exitStub: ; preds = %label.0
ret i1 false
label.0: ; preds = %newFuncRoot
- %tmp.35 = load i32* @last ; <i32> [#uses=1]
+ %tmp.35 = load i32, i32* @last ; <i32> [#uses=1]
%inc.1 = add i32 %tmp.35, 1 ; <i32> [#uses=2]
store i32 %inc.1, i32* @last
- %tmp.36 = load i8** @block ; <i8*> [#uses=1]
+ %tmp.36 = load i8*, i8** @block ; <i8*> [#uses=1]
%tmp.38 = getelementptr i8, i8* %tmp.36, i32 %inc.1 ; <i8*> [#uses=1]
%tmp.40 = trunc i32 %tmp.21.reload to i8 ; <i8> [#uses=1]
store i8 %tmp.40, i8* %tmp.38
- %tmp.910 = load i32* @last ; <i32> [#uses=1]
+ %tmp.910 = load i32, i32* @last ; <i32> [#uses=1]
%tmp.1111 = icmp slt i32 %tmp.910, %tmp.8 ; <i1> [#uses=1]
%tmp.1412 = icmp ne i32 %tmp.21.reload, 257 ; <i1> [#uses=1]
%tmp.1613 = and i1 %tmp.1111, %tmp.1412 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll b/llvm/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
index faa3e21a934..583877e6658 100644
--- a/llvm/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
+++ b/llvm/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
@@ -5,10 +5,10 @@
define i32 @foo(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c, <4 x float>* %d) {
- %tmp44 = load <4 x float>* %a ; <<4 x float>> [#uses=9]
- %tmp46 = load <4 x float>* %b ; <<4 x float>> [#uses=1]
- %tmp48 = load <4 x float>* %c ; <<4 x float>> [#uses=1]
- %tmp50 = load <4 x float>* %d ; <<4 x float>> [#uses=1]
+ %tmp44 = load <4 x float>, <4 x float>* %a ; <<4 x float>> [#uses=9]
+ %tmp46 = load <4 x float>, <4 x float>* %b ; <<4 x float>> [#uses=1]
+ %tmp48 = load <4 x float>, <4 x float>* %c ; <<4 x float>> [#uses=1]
+ %tmp50 = load <4 x float>, <4 x float>* %d ; <<4 x float>> [#uses=1]
%tmp51 = bitcast <4 x float> %tmp44 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp = shufflevector <4 x i32> %tmp51, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>> [#uses=2]
%tmp52 = bitcast <4 x i32> %tmp to <4 x float> ; <<4 x float>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
index 92c271b6aa4..46c5e88955f 100644
--- a/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
+++ b/llvm/test/CodeGen/X86/2006-05-02-InstrSched1.ll
@@ -11,12 +11,12 @@
define i32 @compare(i8* %a, i8* %b) nounwind {
%tmp = bitcast i8* %a to i32* ; <i32*> [#uses=1]
%tmp1 = bitcast i8* %b to i32* ; <i32*> [#uses=1]
- %tmp.upgrd.1 = load i32* @size20 ; <i32> [#uses=1]
- %tmp.upgrd.2 = load i8** @in5 ; <i8*> [#uses=2]
- %tmp3 = load i32* %tmp1 ; <i32> [#uses=1]
+ %tmp.upgrd.1 = load i32, i32* @size20 ; <i32> [#uses=1]
+ %tmp.upgrd.2 = load i8*, i8** @in5 ; <i8*> [#uses=2]
+ %tmp3 = load i32, i32* %tmp1 ; <i32> [#uses=1]
%gep.upgrd.3 = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
%tmp4 = getelementptr i8, i8* %tmp.upgrd.2, i64 %gep.upgrd.3 ; <i8*> [#uses=2]
- %tmp7 = load i32* %tmp ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %tmp ; <i32> [#uses=1]
%gep.upgrd.4 = zext i32 %tmp7 to i64 ; <i64> [#uses=1]
%tmp8 = getelementptr i8, i8* %tmp.upgrd.2, i64 %gep.upgrd.4 ; <i8*> [#uses=2]
%tmp.upgrd.5 = tail call i32 @memcmp( i8* %tmp8, i8* %tmp4, i32 %tmp.upgrd.1 ) ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll b/llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll
index 222b7a0b41f..3281c68e933 100644
--- a/llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll
+++ b/llvm/test/CodeGen/X86/2006-05-02-InstrSched2.ll
@@ -12,13 +12,13 @@ cond_true456.i: ; preds = %cond_true456.i, %newFuncRoot
%__s441.2.4.i = phi i8* [ %tmp451.i.upgrd.1, %cond_true456.i ], [ %tmp435.i, %newFuncRoot ] ; <i8*> [#uses=2]
%__h.2.4.i = phi i32 [ %tmp449.i, %cond_true456.i ], [ 0, %newFuncRoot ] ; <i32> [#uses=1]
%tmp446.i = mul i32 %__h.2.4.i, 5 ; <i32> [#uses=1]
- %tmp.i = load i8* %__s441.2.4.i ; <i8> [#uses=1]
+ %tmp.i = load i8, i8* %__s441.2.4.i ; <i8> [#uses=1]
%tmp448.i = sext i8 %tmp.i to i32 ; <i32> [#uses=1]
%tmp449.i = add i32 %tmp448.i, %tmp446.i ; <i32> [#uses=2]
%tmp450.i = ptrtoint i8* %__s441.2.4.i to i32 ; <i32> [#uses=1]
%tmp451.i = add i32 %tmp450.i, 1 ; <i32> [#uses=1]
%tmp451.i.upgrd.1 = inttoptr i32 %tmp451.i to i8* ; <i8*> [#uses=2]
- %tmp45435.i = load i8* %tmp451.i.upgrd.1 ; <i8> [#uses=1]
+ %tmp45435.i = load i8, i8* %tmp451.i.upgrd.1 ; <i8> [#uses=1]
%tmp45536.i = icmp eq i8 %tmp45435.i, 0 ; <i1> [#uses=1]
br i1 %tmp45536.i, label %bb459.i.exitStub, label %cond_true456.i
}
diff --git a/llvm/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll b/llvm/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
index 8421483ecb5..b70d375bf51 100644
--- a/llvm/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
+++ b/llvm/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
@@ -9,13 +9,13 @@
define void @test(i32 %A) {
%A.upgrd.1 = trunc i32 %A to i8 ; <i8> [#uses=1]
- %tmp2 = load i32* @B ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* @B ; <i32> [#uses=1]
%tmp3 = and i8 %A.upgrd.1, 16 ; <i8> [#uses=1]
%shift.upgrd.2 = zext i8 %tmp3 to i32 ; <i32> [#uses=1]
%tmp4 = shl i32 %tmp2, %shift.upgrd.2 ; <i32> [#uses=1]
store i32 %tmp4, i32* @B
%tmp6 = lshr i32 %A, 3 ; <i32> [#uses=1]
- %tmp = load i16** @C ; <i16*> [#uses=1]
+ %tmp = load i16*, i16** @C ; <i16*> [#uses=1]
%tmp8 = ptrtoint i16* %tmp to i32 ; <i32> [#uses=1]
%tmp9 = add i32 %tmp8, %tmp6 ; <i32> [#uses=1]
%tmp9.upgrd.3 = inttoptr i32 %tmp9 to i16* ; <i16*> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll b/llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll
index d55cead4f16..cd46ecfef52 100644
--- a/llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll
+++ b/llvm/test/CodeGen/X86/2006-05-08-InstrSched.ll
@@ -5,13 +5,13 @@
@C = external global i32 ; <i32*> [#uses=2]
define void @test() {
- %tmp = load i16** @A ; <i16*> [#uses=1]
+ %tmp = load i16*, i16** @A ; <i16*> [#uses=1]
%tmp1 = getelementptr i16, i16* %tmp, i32 1 ; <i16*> [#uses=1]
- %tmp.upgrd.1 = load i16* %tmp1 ; <i16> [#uses=1]
+ %tmp.upgrd.1 = load i16, i16* %tmp1 ; <i16> [#uses=1]
%tmp3 = zext i16 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
- %tmp.upgrd.2 = load i32* @B ; <i32> [#uses=1]
+ %tmp.upgrd.2 = load i32, i32* @B ; <i32> [#uses=1]
%tmp4 = and i32 %tmp.upgrd.2, 16 ; <i32> [#uses=1]
- %tmp5 = load i32* @C ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* @C ; <i32> [#uses=1]
%tmp6 = trunc i32 %tmp4 to i8 ; <i8> [#uses=2]
%shift.upgrd.3 = zext i8 %tmp6 to i32 ; <i32> [#uses=1]
%tmp7 = shl i32 %tmp5, %shift.upgrd.3 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll b/llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll
index 6be044bc616..b1deb2c5f56 100644
--- a/llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll
+++ b/llvm/test/CodeGen/X86/2006-05-11-InstrSched.ll
@@ -15,19 +15,19 @@ cond_true: ; preds = %cond_true, %entry
%tmp31 = add nsw i32 %tmp.10, -1 ; <i32> [#uses=4]
%tmp32 = getelementptr i32, i32* %mpp, i32 %tmp31 ; <i32*> [#uses=1]
%tmp34 = bitcast i32* %tmp32 to <16 x i8>* ; <i8*> [#uses=1]
- %tmp = load <16 x i8>* %tmp34, align 1
+ %tmp = load <16 x i8>, <16 x i8>* %tmp34, align 1
%tmp42 = getelementptr i32, i32* %tpmm, i32 %tmp31 ; <i32*> [#uses=1]
%tmp42.upgrd.1 = bitcast i32* %tmp42 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp46 = load <4 x i32>* %tmp42.upgrd.1 ; <<4 x i32>> [#uses=1]
+ %tmp46 = load <4 x i32>, <4 x i32>* %tmp42.upgrd.1 ; <<4 x i32>> [#uses=1]
%tmp54 = bitcast <16 x i8> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp55 = add <4 x i32> %tmp54, %tmp46 ; <<4 x i32>> [#uses=2]
%tmp55.upgrd.2 = bitcast <4 x i32> %tmp55 to <2 x i64> ; <<2 x i64>> [#uses=1]
%tmp62 = getelementptr i32, i32* %ip, i32 %tmp31 ; <i32*> [#uses=1]
%tmp65 = bitcast i32* %tmp62 to <16 x i8>* ; <i8*> [#uses=1]
- %tmp66 = load <16 x i8>* %tmp65, align 1
+ %tmp66 = load <16 x i8>, <16 x i8>* %tmp65, align 1
%tmp73 = getelementptr i32, i32* %tpim, i32 %tmp31 ; <i32*> [#uses=1]
%tmp73.upgrd.3 = bitcast i32* %tmp73 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %tmp77 = load <4 x i32>* %tmp73.upgrd.3 ; <<4 x i32>> [#uses=1]
+ %tmp77 = load <4 x i32>, <4 x i32>* %tmp73.upgrd.3 ; <<4 x i32>> [#uses=1]
%tmp87 = bitcast <16 x i8> %tmp66 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp88 = add <4 x i32> %tmp87, %tmp77 ; <<4 x i32>> [#uses=2]
%tmp88.upgrd.4 = bitcast <4 x i32> %tmp88 to <2 x i64> ; <<2 x i64>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll b/llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
index 0288278d626..6ff879760ea 100644
--- a/llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
+++ b/llvm/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
@@ -6,7 +6,7 @@ cond_true12: ; preds = %0
ret i32 0
cond_next33: ; preds = %0
%tmp44.i = call double @foo( double 0.000000e+00, i32 32 ) ; <double> [#uses=1]
- %tmp61.i = load i8* null ; <i8> [#uses=1]
+ %tmp61.i = load i8, i8* null ; <i8> [#uses=1]
%tmp61.i.upgrd.1 = zext i8 %tmp61.i to i32 ; <i32> [#uses=1]
%tmp58.i = or i32 0, %tmp61.i.upgrd.1 ; <i32> [#uses=1]
%tmp62.i = or i32 %tmp58.i, 0 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll b/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
index 1facf15b9f4..795e898df34 100644
--- a/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
+++ b/llvm/test/CodeGen/X86/2006-07-20-InlineAsm.ll
@@ -8,7 +8,7 @@ entry:
%X_addr = alloca i32 ; <i32*> [#uses=3]
store i32 %X, i32* %X_addr
call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,m,1,~{dirflag},~{fpsr},~{flags}"( i32* @G, i32* %X_addr, i32* @G, i32 %X )
- %tmp1 = load i32* %X_addr ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %X_addr ; <i32> [#uses=1]
ret i32 %tmp1
}
@@ -17,7 +17,7 @@ entry:
%X_addr = alloca i32 ; <i32*> [#uses=3]
store i32 %X, i32* %X_addr
call void asm sideeffect "xchg{l} {$0,$1|$1,$0}", "=*m,=*r,1,~{dirflag},~{fpsr},~{flags}"( i32* @G, i32* %X_addr, i32 %X )
- %tmp1 = load i32* %X_addr ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %X_addr ; <i32> [#uses=1]
ret i32 %tmp1
}
diff --git a/llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll b/llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll
index aea707ee8fe..397bc26dbec 100644
--- a/llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll
+++ b/llvm/test/CodeGen/X86/2006-08-07-CycleInDAG.ll
@@ -8,10 +8,10 @@ cond_true.i: ; preds = %0
ret i32 0
ilog2.exit: ; preds = %0
- %tmp24.i = load i32* null ; <i32> [#uses=1]
+ %tmp24.i = load i32, i32* null ; <i32> [#uses=1]
%tmp13.i12.i = tail call double @ldexp( double 0.000000e+00, i32 0 ) ; <double> [#uses=1]
%tmp13.i13.i = fptrunc double %tmp13.i12.i to float ; <float> [#uses=1]
- %tmp11.s = load i32* null ; <i32> [#uses=1]
+ %tmp11.s = load i32, i32* null ; <i32> [#uses=1]
%tmp11.i = bitcast i32 %tmp11.s to i32 ; <i32> [#uses=1]
%n.i = bitcast i32 %tmp24.i to i32 ; <i32> [#uses=1]
%tmp13.i7 = mul i32 %tmp11.i, %n.i ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll b/llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll
index 8dbabc7f0d6..2c44adf6829 100644
--- a/llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll
+++ b/llvm/test/CodeGen/X86/2006-08-16-CycleInDAG.ll
@@ -6,7 +6,7 @@
%struct.u = type { [1 x i64] }
define void @test() {
- %tmp = load i32* null ; <i32> [#uses=1]
+ %tmp = load i32, i32* null ; <i32> [#uses=1]
%tmp8 = call i32 @hash_rtx( ) ; <i32> [#uses=1]
%tmp11 = urem i32 %tmp8, %tmp ; <i32> [#uses=1]
br i1 false, label %cond_next, label %return
diff --git a/llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll b/llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll
index d7e727cbe49..a7a10afaae1 100644
--- a/llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll
+++ b/llvm/test/CodeGen/X86/2006-09-01-CycleInDAG.ll
@@ -111,21 +111,21 @@ bb3502.exitStub: ; preds = %cond_true3632
ret void
cond_true3632: ; preds = %newFuncRoot
- %tmp3378 = load i32* %tmp3629 ; <i32> [#uses=1]
+ %tmp3378 = load i32, i32* %tmp3629 ; <i32> [#uses=1]
%tmp3379 = add i32 %tmp3378, -1 ; <i32> [#uses=1]
%tmp3381 = getelementptr %struct.varray_head_tag, %struct.varray_head_tag* %stack3023.6, i32 0, i32 4 ; <%struct.varray_data*> [#uses=1]
%tmp3382 = bitcast %struct.varray_data* %tmp3381 to [1 x i32]* ; <[1 x i32]*> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp3379 to i64 ; <i64> [#uses=1]
%tmp3383 = getelementptr [1 x i32], [1 x i32]* %tmp3382, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- %tmp3384 = load i32* %tmp3383 ; <i32> [#uses=1]
- %tmp3387 = load i32* %tmp3629 ; <i32> [#uses=1]
+ %tmp3384 = load i32, i32* %tmp3383 ; <i32> [#uses=1]
+ %tmp3387 = load i32, i32* %tmp3629 ; <i32> [#uses=1]
%tmp3388 = add i32 %tmp3387, -1 ; <i32> [#uses=1]
store i32 %tmp3388, i32* %tmp3629
- %tmp3391 = load %struct.varray_head_tag** @basic_block_info ; <%struct.varray_head_tag*> [#uses=1]
+ %tmp3391 = load %struct.varray_head_tag*, %struct.varray_head_tag** @basic_block_info ; <%struct.varray_head_tag*> [#uses=1]
%tmp3393 = getelementptr %struct.varray_head_tag, %struct.varray_head_tag* %tmp3391, i32 0, i32 4 ; <%struct.varray_data*> [#uses=1]
%tmp3394 = bitcast %struct.varray_data* %tmp3393 to [1 x %struct.basic_block_def*]* ; <[1 x %struct.basic_block_def*]*> [#uses=1]
%tmp3395 = getelementptr [1 x %struct.basic_block_def*], [1 x %struct.basic_block_def*]* %tmp3394, i32 0, i32 %tmp3384 ; <%struct.basic_block_def**> [#uses=1]
- %tmp3396 = load %struct.basic_block_def** %tmp3395 ; <%struct.basic_block_def*> [#uses=1]
+ %tmp3396 = load %struct.basic_block_def*, %struct.basic_block_def** %tmp3395 ; <%struct.basic_block_def*> [#uses=1]
%tmp3397 = getelementptr %struct.basic_block_def, %struct.basic_block_def* %tmp3396, i32 0, i32 3 ; <%struct.VEC_edge**> [#uses=1]
br label %bb3502.exitStub
}
diff --git a/llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll b/llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
index fbb14ee1615..e2c84ea569e 100644
--- a/llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
+++ b/llvm/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
@@ -1,9 +1,9 @@
; RUN: llc < %s -march=x86
define void @_ZN13QFSFileEngine4readEPcx() {
- %tmp201 = load i32* null ; <i32> [#uses=1]
+ %tmp201 = load i32, i32* null ; <i32> [#uses=1]
%tmp201.upgrd.1 = sext i32 %tmp201 to i64 ; <i64> [#uses=1]
- %tmp202 = load i64* null ; <i64> [#uses=1]
+ %tmp202 = load i64, i64* null ; <i64> [#uses=1]
%tmp203 = add i64 %tmp201.upgrd.1, %tmp202 ; <i64> [#uses=1]
store i64 %tmp203, i64* null
ret void
diff --git a/llvm/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll b/llvm/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
index 63b63188502..43558258785 100644
--- a/llvm/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
+++ b/llvm/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
@@ -4,14 +4,14 @@
@tree_code_type = external global [0 x i32] ; <[0 x i32]*> [#uses=1]
define void @copy_if_shared_r() {
- %tmp = load i32* null ; <i32> [#uses=1]
+ %tmp = load i32, i32* null ; <i32> [#uses=1]
%tmp56 = and i32 %tmp, 255 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp56 to i64 ; <i64> [#uses=1]
%tmp8 = getelementptr [0 x i32], [0 x i32]* @tree_code_type, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
- %tmp9 = load i32* %tmp8 ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* %tmp8 ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, -1 ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp ugt i32 %tmp10, 2 ; <i1> [#uses=1]
- %tmp14 = load i32* null ; <i32> [#uses=1]
+ %tmp14 = load i32, i32* null ; <i32> [#uses=1]
%tmp15 = lshr i32 %tmp14, 31 ; <i32> [#uses=1]
%tmp15.upgrd.3 = trunc i32 %tmp15 to i8 ; <i8> [#uses=1]
%tmp16 = icmp ne i8 %tmp15.upgrd.3, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll b/llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll
index 17eb41d4cd3..7a32ef7801d 100644
--- a/llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll
+++ b/llvm/test/CodeGen/X86/2006-10-12-CycleInDAG.ll
@@ -29,10 +29,10 @@ bb441: ; preds = %cond_next330
ret void
cond_next472: ; preds = %cond_next330
- %tmp490 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
+ %tmp490 = load %struct.tree_node*, %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
%tmp492 = getelementptr %struct.tree_node, %struct.tree_node* %tmp490, i32 0, i32 0, i32 0, i32 3 ; <i8*> [#uses=1]
%tmp492.upgrd.1 = bitcast i8* %tmp492 to i32* ; <i32*> [#uses=1]
- %tmp493 = load i32* %tmp492.upgrd.1 ; <i32> [#uses=1]
+ %tmp493 = load i32, i32* %tmp492.upgrd.1 ; <i32> [#uses=1]
%tmp495 = trunc i32 %tmp493 to i8 ; <i8> [#uses=1]
%tmp496 = icmp eq i8 %tmp495, 11 ; <i1> [#uses=1]
%tmp496.upgrd.2 = zext i1 %tmp496 to i8 ; <i8> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll b/llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll
index 6ed2e7bb575..7804e74696e 100644
--- a/llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll
+++ b/llvm/test/CodeGen/X86/2006-10-13-CycleInDAG.ll
@@ -3,9 +3,9 @@
define void @test() {
bb.i:
- %tmp.i660 = load <4 x float>* null ; <<4 x float>> [#uses=1]
+ %tmp.i660 = load <4 x float>, <4 x float>* null ; <<4 x float>> [#uses=1]
call void (i32, ...)* @printf( i32 0, i8* getelementptr ([18 x i8]* @str, i32 0, i64 0), double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00 )
- %tmp152.i = load <4 x i32>* null ; <<4 x i32>> [#uses=1]
+ %tmp152.i = load <4 x i32>, <4 x i32>* null ; <<4 x i32>> [#uses=1]
%tmp156.i = bitcast <4 x i32> %tmp152.i to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp175.i = bitcast <4 x float> %tmp.i660 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp176.i = xor <4 x i32> %tmp156.i, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll b/llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll
index 40128f3a199..9adfff33e11 100644
--- a/llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll
+++ b/llvm/test/CodeGen/X86/2006-11-12-CSRetCC.ll
@@ -18,43 +18,43 @@ entry:
%z = alloca { double, double }, align 16 ; <{ double, double }*> [#uses=4]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store double 0x400921FB54442D18, double* %pi
- %tmp.upgrd.1 = load double* %pi ; <double> [#uses=1]
+ %tmp.upgrd.1 = load double, double* %pi ; <double> [#uses=1]
%real = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
store double 0.000000e+00, double* %real
%real3 = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
store double %tmp.upgrd.1, double* %real3
%tmp.upgrd.2 = getelementptr { double, double }, { double, double }* %tmp, i64 0, i32 0 ; <double*> [#uses=1]
%tmp4 = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp5 = load double* %tmp4 ; <double> [#uses=1]
+ %tmp5 = load double, double* %tmp4 ; <double> [#uses=1]
store double %tmp5, double* %tmp.upgrd.2
%tmp6 = getelementptr { double, double }, { double, double }* %tmp, i64 0, i32 1 ; <double*> [#uses=1]
%tmp7 = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp8 = load double* %tmp7 ; <double> [#uses=1]
+ %tmp8 = load double, double* %tmp7 ; <double> [#uses=1]
store double %tmp8, double* %tmp6
%tmp.upgrd.3 = bitcast { double, double }* %tmp to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
%tmp.upgrd.4 = getelementptr { i64, i64 }, { i64, i64 }* %tmp.upgrd.3, i64 0, i32 0 ; <i64*> [#uses=1]
- %tmp.upgrd.5 = load i64* %tmp.upgrd.4 ; <i64> [#uses=1]
+ %tmp.upgrd.5 = load i64, i64* %tmp.upgrd.4 ; <i64> [#uses=1]
%tmp9 = bitcast { double, double }* %tmp to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
%tmp10 = getelementptr { i64, i64 }, { i64, i64 }* %tmp9, i64 0, i32 1 ; <i64*> [#uses=1]
- %tmp11 = load i64* %tmp10 ; <i64> [#uses=1]
+ %tmp11 = load i64, i64* %tmp10 ; <i64> [#uses=1]
call void @cexp( { double, double }* sret %tmp2, i64 %tmp.upgrd.5, i64 %tmp11 )
%tmp12 = getelementptr { double, double }, { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
%tmp13 = getelementptr { double, double }, { double, double }* %tmp2, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp14 = load double* %tmp13 ; <double> [#uses=1]
+ %tmp14 = load double, double* %tmp13 ; <double> [#uses=1]
store double %tmp14, double* %tmp12
%tmp15 = getelementptr { double, double }, { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
%tmp16 = getelementptr { double, double }, { double, double }* %tmp2, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp17 = load double* %tmp16 ; <double> [#uses=1]
+ %tmp17 = load double, double* %tmp16 ; <double> [#uses=1]
store double %tmp17, double* %tmp15
%tmp18 = getelementptr { double, double }, { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp19 = load double* %tmp18 ; <double> [#uses=1]
+ %tmp19 = load double, double* %tmp18 ; <double> [#uses=1]
%tmp20 = getelementptr { double, double }, { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp21 = load double* %tmp20 ; <double> [#uses=1]
+ %tmp21 = load double, double* %tmp20 ; <double> [#uses=1]
%tmp.upgrd.6 = getelementptr [9 x i8], [9 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
%tmp.upgrd.7 = call i32 (i8*, ...)* @printf( i8* %tmp.upgrd.6, double %tmp21, double %tmp19 ) ; <i32> [#uses=0]
br label %finish
finish:
- %retval.upgrd.8 = load i32* %retval ; <i32> [#uses=1]
+ %retval.upgrd.8 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval.upgrd.8
}
diff --git a/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll b/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll
index 783d9f94cae..c0bd6f72842 100644
--- a/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll
+++ b/llvm/test/CodeGen/X86/2006-11-17-IllegalMove.ll
@@ -5,7 +5,7 @@
define void @handle_vector_size_attribute() nounwind {
entry:
- %tmp69 = load i32* null ; <i32> [#uses=1]
+ %tmp69 = load i32, i32* null ; <i32> [#uses=1]
switch i32 %tmp69, label %bb84 [
i32 2, label %bb77
i32 1, label %bb77
@@ -13,7 +13,7 @@ entry:
bb77: ; preds = %entry, %entry
%tmp99 = udiv i64 0, 0 ; <i64> [#uses=1]
- %tmp = load i8* null ; <i8> [#uses=1]
+ %tmp = load i8, i8* null ; <i8> [#uses=1]
%tmp114 = icmp eq i64 0, 0 ; <i1> [#uses=1]
br label %cond_true115
@@ -21,7 +21,7 @@ bb84: ; preds = %entry
ret void
cond_true115: ; preds = %bb77
- %tmp118 = load i8* null ; <i8> [#uses=1]
+ %tmp118 = load i8, i8* null ; <i8> [#uses=1]
br label %cond_true120
cond_true120: ; preds = %cond_true115
diff --git a/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll b/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
index 3e581051ee4..080de1fb553 100644
--- a/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
+++ b/llvm/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
@@ -20,7 +20,7 @@ target triple = "i686-pc-linux-gnu"
%"struct.QString::Data" = type { %struct.QBasicAtomic, i32, i32, i16*, i8, i8, [1 x i16] }
define i1 @_ZNK12QImageWriter8canWriteEv() {
- %tmp62 = load %struct.QImageWriterPrivate** null ; <%struct.QImageWriterPrivate*> [#uses=1]
+ %tmp62 = load %struct.QImageWriterPrivate*, %struct.QImageWriterPrivate** null ; <%struct.QImageWriterPrivate*> [#uses=1]
%tmp = getelementptr %struct.QImageWriterPrivate, %struct.QImageWriterPrivate* %tmp62, i32 0, i32 9 ; <%struct.QString*> [#uses=1]
%tmp75 = call %struct.QString* @_ZN7QStringaSERKS_( %struct.QString* %tmp, %struct.QString* null ) ; <%struct.QString*> [#uses=0]
call void asm sideeffect "lock\0Adecl $0\0Asetne 1", "=*m"( i32* null )
diff --git a/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll b/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
index 8f569e2179d..f83eea179d6 100644
--- a/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
+++ b/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
@@ -8,12 +8,12 @@ target datalayout = "e-p:64:64"
define void @foo(i32* %a0, i32* %a1, i32* %a2, i32* %a3, i32* %a4, i32* %a5) {
b:
- %r = load i32* %a0
- %r2 = load i32* %a1
- %r4 = load i32* %a2
- %r6 = load i32* %a3
- %r8 = load i32* %a4
- %r14 = load i32* %a5
+ %r = load i32, i32* %a0
+ %r2 = load i32, i32* %a1
+ %r4 = load i32, i32* %a2
+ %r6 = load i32, i32* %a3
+ %r8 = load i32, i32* %a4
+ %r14 = load i32, i32* %a5
%rx = sext i32 %r2 to i64
%r9 = sext i32 %r to i64
%r11 = add i64 %rx, 0
@@ -133,13 +133,13 @@ b341:
%r343 = add i64 %s661, 0
%r346 = add i64 %r343, 0
%r347 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r346
- %r348 = load float* %r347
+ %r348 = load float, float* %r347
%r352 = add i64 %r343, 0
%r353 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r352
- %r354 = load float* %r353
- %r362 = load float* bitcast ([128 x i64]* @i6000 to float*)
+ %r354 = load float, float* %r353
+ %r362 = load float, float* bitcast ([128 x i64]* @i6000 to float*)
%r363 = fadd float 0.000000e+00, %r362
- %r370 = load float* bitcast ([128 x i64]* @i6000 to float*)
+ %r370 = load float, float* bitcast ([128 x i64]* @i6000 to float*)
%r376 = icmp slt i64 %r16, 0
br i1 %r376, label %b377, label %a35b
b377:
@@ -184,8 +184,8 @@ b535:
%s933 = phi i64 [ %r533, %b514 ], [ %r795, %b712 ]
%r538 = add i64 %w1855, 0
%r539 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r538
- %r540 = load float* %r539
- %r551 = load float* bitcast ([128 x i64]* @i6000 to float*)
+ %r540 = load float, float* %r539
+ %r551 = load float, float* bitcast ([128 x i64]* @i6000 to float*)
%r562 = sub i64 %s933, 0
%r564 = icmp slt i64 %r512, 0
br i1 %r564, label %b565, label %a45b
@@ -213,22 +213,22 @@ a45b714:
%r717 = add i64 %e944, 0
%r720 = add i64 %r717, 0
%r721 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r720
- %r722 = load float* %r721
+ %r722 = load float, float* %r721
%r726 = add i64 %r717, 0
%r727 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r726
- %r728 = load float* %r727
+ %r728 = load float, float* %r727
%r732 = add i64 %r717, 0
%r733 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r732
- %r734 = load float* %r733
+ %r734 = load float, float* %r733
%r738 = add i64 %r717, 0
%r739 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r738
- %r740 = load float* %r739
+ %r740 = load float, float* %r739
%r744 = add i64 %r717, 0
%r745 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r744
- %r746 = load float* %r745
+ %r746 = load float, float* %r745
%r750 = add i64 %r717, 0
%r751 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r750
- %r752 = load float* %r751
+ %r752 = load float, float* %r751
%r753 = fadd float %r752, %r746
%r754 = fadd float %r728, %r722
%r755 = fadd float %r734, %r754
@@ -237,10 +237,10 @@ a45b714:
%r759 = fadd float %r757, %r540
%r770 = add i64 %r717, 0
%r771 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r770
- %r772 = load float* %r771
+ %r772 = load float, float* %r771
%r776 = add i64 %r717, 0
%r777 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r776
- %r778 = load float* %r777
+ %r778 = load float, float* %r777
%r781 = fadd float %r363, %r772
%r782 = fadd float %r781, %r778
%r783 = fadd float %r551, %r782
@@ -253,7 +253,7 @@ b820:
%r844 = add i64 %r16, 0
%r846 = sext i32 %r60 to i64
%r847 = add i64 %r846, 0
- %r851 = load float* bitcast ([128 x i64]* @i6000 to float*)
+ %r851 = load float, float* bitcast ([128 x i64]* @i6000 to float*)
%r856 = sub i64 %rx, 0
br label %b858
b858:
@@ -266,10 +266,10 @@ b858:
%r859 = add i64 %r856, 0
%r862 = add i64 %w1891, 0
%r863 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r862
- %r864 = load float* %r863
+ %r864 = load float, float* %r863
%r868 = add i64 %w1891, 0
%r869 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r868
- %r870 = load float* %r869
+ %r870 = load float, float* %r869
%r873 = sub i64 %r859, 0
%r876 = sub i64 %s1173, 0
%r878 = icmp slt i64 %r847, 0
@@ -302,11 +302,11 @@ a53b1019:
%r1024 = bitcast i8* %c2 to float*
%r1025 = add i64 %r1022, 0
%r1026 = getelementptr float, float* %r1024, i64 %r1025
- %r1027 = load float* %r1026
+ %r1027 = load float, float* %r1026
%r1032 = add i64 %r873, 0
%r1033 = add i64 %r1032, 0
%r1034 = getelementptr float, float* %r1024, i64 %r1033
- %r1035 = load float* %r1034
+ %r1035 = load float, float* %r1034
%r1037 = bitcast i8* %c22010 to float*
%r1040 = getelementptr float, float* %r1037, i64 %r1025
%r1044 = fadd float %r864, %r1035
@@ -336,10 +336,10 @@ b1117:
%r1120 = add i64 %s661, 0
%r1121 = add i64 %r1120, 0
%r1122 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1121
- %r1123 = load float* %r1122
+ %r1123 = load float, float* %r1122
%r1132 = bitcast i8* %c22012 to float*
%r1134 = getelementptr float, float* %r1132, i64 %w1915
- %r1135 = load float* %r1134
+ %r1135 = load float, float* %r1134
%r1136 = fadd float %r1123, %r1135
%r1138 = icmp slt i64 %r1114, 0
br i1 %r1138, label %b1139, label %a63b
@@ -410,7 +410,7 @@ b1342:
%r1355 = sub i64 %r1352, 0
%r1370 = add i64 %d1533, 0
%r1371 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1370
- %r1372 = load float* %r1371
+ %r1372 = load float, float* %r1371
br label %a74b
a74b:
%w1958 = phi i64 [ 0, %b1342 ], [ %v1959, %a74b ]
@@ -441,10 +441,10 @@ a97b:
%r1622 = add i64 %r1614, 0
%r1754 = bitcast i8* %r28 to float*
%r1756 = getelementptr float, float* %r1754, i64 %w1970
- %r1757 = load float* %r1756
+ %r1757 = load float, float* %r1756
%r1761 = add i64 %r1622, 0
%r1762 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1761
- %r1763 = load float* %r1762
+ %r1763 = load float, float* %r1762
%r1767 = add i64 %r1622, 0
%r1768 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1767
%r1772 = fadd float %r1763, 0.000000e+00
diff --git a/llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll b/llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll
index ee6e562a7f4..f05175259c8 100644
--- a/llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll
+++ b/llvm/test/CodeGen/X86/2007-02-04-OrAddrMode.ll
@@ -7,7 +7,7 @@ define i32 @test(float ** %tmp2, i32 %tmp12) nounwind {
; CHECK: orl $1, %{{.*}}
; CHECK: ret
- %tmp3 = load float** %tmp2
+ %tmp3 = load float*, float** %tmp2
%tmp132 = shl i32 %tmp12, 2 ; <i32> [#uses=1]
%tmp4 = bitcast float* %tmp3 to i8* ; <i8*> [#uses=1]
%ctg2 = getelementptr i8, i8* %tmp4, i32 %tmp132 ; <i8*> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll b/llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll
index 2e40e246036..f4754a53531 100644
--- a/llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll
+++ b/llvm/test/CodeGen/X86/2007-02-16-BranchFold.ll
@@ -55,14 +55,14 @@ bb28.i.i938.exitStub: ; preds = %LeafBlock3
bb.i9.i.i932.ce: ; preds = %newFuncRoot
%tmp1.i3.i.i930 = getelementptr %struct.list, %struct.list* %l_addr.01.0.i2.i.i929, i32 0, i32 0 ; <i8**> [#uses=1]
- %tmp2.i4.i.i931 = load i8** %tmp1.i3.i.i930 ; <i8*> [#uses=1]
+ %tmp2.i4.i.i931 = load i8*, i8** %tmp1.i3.i.i930 ; <i8*> [#uses=1]
%tmp66.i62.i = bitcast i8* %tmp2.i4.i.i931 to %struct.operator* ; <%struct.operator*> [#uses=7]
%tmp1.i6.i = getelementptr %struct.operator, %struct.operator* %tmp66.i62.i, i32 0, i32 2 ; <i32*> [#uses=1]
- %tmp2.i7.i = load i32* %tmp1.i6.i ; <i32> [#uses=1]
- %tmp3.i8.i = load %struct.FILE** @outfile ; <%struct.FILE*> [#uses=1]
+ %tmp2.i7.i = load i32, i32* %tmp1.i6.i ; <i32> [#uses=1]
+ %tmp3.i8.i = load %struct.FILE*, %struct.FILE** @outfile ; <%struct.FILE*> [#uses=1]
%tmp5.i9.i = call i32 (%struct.FILE*, i8*, ...)* @fprintf( %struct.FILE* %tmp3.i8.i, i8* getelementptr ([11 x i8]* @str1, i32 0, i32 0), i32 %tmp2.i7.i ) ; <i32> [#uses=0]
%tmp7.i10.i = getelementptr %struct.operator, %struct.operator* %tmp66.i62.i, i32 0, i32 5 ; <i32*> [#uses=1]
- %tmp8.i11.i = load i32* %tmp7.i10.i ; <i32> [#uses=7]
+ %tmp8.i11.i = load i32, i32* %tmp7.i10.i ; <i32> [#uses=7]
br label %NodeBlock5
NodeBlock5: ; preds = %bb.i9.i.i932.ce
diff --git a/llvm/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll b/llvm/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll
index 954c95d6961..5d2c01aeef6 100644
--- a/llvm/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll
+++ b/llvm/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll
@@ -6,9 +6,9 @@
@stderr = external global %struct._IO_FILE*
define void @__eprintf(i8* %string, i8* %expression, i32 %line, i8* %filename) {
- %tmp = load %struct._IO_FILE** @stderr
+ %tmp = load %struct._IO_FILE*, %struct._IO_FILE** @stderr
%tmp5 = tail call i32 (%struct._IO_FILE*, i8*, ...)* @fprintf( %struct._IO_FILE* %tmp, i8* %string, i8* %expression, i32 %line, i8* %filename )
- %tmp6 = load %struct._IO_FILE** @stderr
+ %tmp6 = load %struct._IO_FILE*, %struct._IO_FILE** @stderr
%tmp7 = tail call i32 @fflush( %struct._IO_FILE* %tmp6 )
tail call void @abort( )
unreachable
diff --git a/llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll b/llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll
index 112d1ab65e7..dbbb611dc75 100644
--- a/llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll
+++ b/llvm/test/CodeGen/X86/2007-03-01-SpillerCrash.ll
@@ -4,7 +4,7 @@
define void @test() nounwind {
test.exit:
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:0 [#uses=4]
- load <4 x float>* null ; <<4 x float>>:1 [#uses=1]
+ load <4 x float>, <4 x float>* null ; <<4 x float>>:1 [#uses=1]
shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:2 [#uses=1]
fmul <4 x float> %0, %2 ; <<4 x float>>:3 [#uses=1]
fsub <4 x float> zeroinitializer, %3 ; <<4 x float>>:4 [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll b/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
index f2cbbf8f1e9..f159bcdee13 100644
--- a/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
+++ b/llvm/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
@@ -27,47 +27,47 @@ bb: ; preds = %bb, %bb.preheader
%p_addr.076.0.rec = mul i32 %i.073.0, 9 ; <i32> [#uses=9]
%p_addr.076.0 = getelementptr i8, i8* %p, i32 %p_addr.076.0.rec ; <i8*> [#uses=1]
%tmp2 = getelementptr i8*, i8** %buf, i32 %i.073.0 ; <i8**> [#uses=1]
- %tmp3 = load i8** %tmp2 ; <i8*> [#uses=8]
+ %tmp3 = load i8*, i8** %tmp2 ; <i8*> [#uses=8]
%tmp5 = getelementptr i8, i8* %tmp3, i32 %col ; <i8*> [#uses=1]
- %tmp7 = load i8* %p_addr.076.0 ; <i8> [#uses=1]
+ %tmp7 = load i8, i8* %p_addr.076.0 ; <i8> [#uses=1]
store i8 %tmp7, i8* %tmp5
%p_addr.076.0.sum93 = add i32 %p_addr.076.0.rec, 1 ; <i32> [#uses=1]
%tmp11 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum93 ; <i8*> [#uses=1]
- %tmp13 = load i8* %tmp11 ; <i8> [#uses=1]
+ %tmp13 = load i8, i8* %tmp11 ; <i8> [#uses=1]
%tmp15 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum72 ; <i8*> [#uses=1]
store i8 %tmp13, i8* %tmp15
%p_addr.076.0.sum92 = add i32 %p_addr.076.0.rec, 2 ; <i32> [#uses=1]
%tmp17 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum92 ; <i8*> [#uses=1]
- %tmp19 = load i8* %tmp17 ; <i8> [#uses=1]
+ %tmp19 = load i8, i8* %tmp17 ; <i8> [#uses=1]
%tmp21 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum71 ; <i8*> [#uses=1]
store i8 %tmp19, i8* %tmp21
%p_addr.076.0.sum91 = add i32 %p_addr.076.0.rec, 3 ; <i32> [#uses=1]
%tmp23 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum91 ; <i8*> [#uses=1]
- %tmp25 = load i8* %tmp23 ; <i8> [#uses=1]
+ %tmp25 = load i8, i8* %tmp23 ; <i8> [#uses=1]
%tmp27 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum70 ; <i8*> [#uses=1]
store i8 %tmp25, i8* %tmp27
%p_addr.076.0.sum90 = add i32 %p_addr.076.0.rec, 4 ; <i32> [#uses=1]
%tmp29 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum90 ; <i8*> [#uses=1]
- %tmp31 = load i8* %tmp29 ; <i8> [#uses=1]
+ %tmp31 = load i8, i8* %tmp29 ; <i8> [#uses=1]
%tmp33 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum69 ; <i8*> [#uses=2]
store i8 %tmp31, i8* %tmp33
%p_addr.076.0.sum89 = add i32 %p_addr.076.0.rec, 5 ; <i32> [#uses=1]
%tmp35 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum89 ; <i8*> [#uses=1]
- %tmp37 = load i8* %tmp35 ; <i8> [#uses=1]
+ %tmp37 = load i8, i8* %tmp35 ; <i8> [#uses=1]
%tmp39 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum68 ; <i8*> [#uses=1]
store i8 %tmp37, i8* %tmp39
%p_addr.076.0.sum88 = add i32 %p_addr.076.0.rec, 6 ; <i32> [#uses=1]
%tmp41 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum88 ; <i8*> [#uses=1]
- %tmp43 = load i8* %tmp41 ; <i8> [#uses=1]
+ %tmp43 = load i8, i8* %tmp41 ; <i8> [#uses=1]
store i8 %tmp43, i8* %tmp33
%p_addr.076.0.sum87 = add i32 %p_addr.076.0.rec, 7 ; <i32> [#uses=1]
%tmp47 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum87 ; <i8*> [#uses=1]
- %tmp49 = load i8* %tmp47 ; <i8> [#uses=1]
+ %tmp49 = load i8, i8* %tmp47 ; <i8> [#uses=1]
%tmp51 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum66 ; <i8*> [#uses=1]
store i8 %tmp49, i8* %tmp51
%p_addr.076.0.sum = add i32 %p_addr.076.0.rec, 8 ; <i32> [#uses=1]
%tmp53 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum ; <i8*> [#uses=1]
- %tmp55 = load i8* %tmp53 ; <i8> [#uses=1]
+ %tmp55 = load i8, i8* %tmp53 ; <i8> [#uses=1]
%tmp57 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum ; <i8*> [#uses=1]
store i8 %tmp55, i8* %tmp57
%indvar.next = add i32 %i.073.0, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll b/llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll
index 3bd6d590efc..61746814f9a 100644
--- a/llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll
+++ b/llvm/test/CodeGen/X86/2007-03-16-InlineAsm.ll
@@ -11,16 +11,16 @@ entry:
%ret = alloca i32, align 4 ; <i32*> [#uses=2]
store i32 %A, i32* %A_addr
store i32 %B, i32* %B_addr
- %tmp1 = load i32* %A_addr ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %A_addr ; <i32> [#uses=1]
%tmp2 = call i32 asm "roll $1,$0", "=r,I,0,~{dirflag},~{fpsr},~{flags},~{cc}"( i32 7, i32 %tmp1 ) ; <i32> [#uses=1]
store i32 %tmp2, i32* %ret
- %tmp3 = load i32* %ret ; <i32> [#uses=1]
+ %tmp3 = load i32, i32* %ret ; <i32> [#uses=1]
store i32 %tmp3, i32* %tmp
- %tmp4 = load i32* %tmp ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp ; <i32> [#uses=1]
store i32 %tmp4, i32* %retval
br label %return
return: ; preds = %entry
- %retval5 = load i32* %retval ; <i32> [#uses=1]
+ %retval5 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval5
}
diff --git a/llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll b/llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll
index 9676f143bca..b9689e9330c 100644
--- a/llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2007-03-26-CoalescerBug.ll
@@ -4,7 +4,7 @@
define void @foo(...) {
bb1:
- %t43 = load i64* getelementptr ([339 x i64]* @data, i32 0, i64 212), align 4
+ %t43 = load i64, i64* getelementptr ([339 x i64]* @data, i32 0, i64 212), align 4
br i1 false, label %bb80, label %bb6
bb6:
br i1 false, label %bb38, label %bb265
diff --git a/llvm/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll b/llvm/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
index f916753c819..31c6b532d8c 100644
--- a/llvm/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
+++ b/llvm/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
@@ -20,7 +20,7 @@ bb32: ; preds = %bb32, %cond_true
%i.2115.0 = phi i32 [ 0, %cond_true ], [ %indvar.next127, %bb32 ] ; <i32> [#uses=1]
%c.2112.0 = phi i32 [ 0, %cond_true ], [ %tmp49, %bb32 ] ; <i32> [#uses=1]
%tmp43 = getelementptr %struct.partition_def, %struct.partition_def* %part, i32 0, i32 1, i32 %c.2112.0, i32 1 ; <%struct.partition_elem**> [#uses=1]
- %tmp44 = load %struct.partition_elem** %tmp43 ; <%struct.partition_elem*> [#uses=1]
+ %tmp44 = load %struct.partition_elem*, %struct.partition_elem** %tmp43 ; <%struct.partition_elem*> [#uses=1]
%tmp4445 = ptrtoint %struct.partition_elem* %tmp44 to i32 ; <i32> [#uses=1]
%tmp48 = sub i32 %tmp4445, 0 ; <i32> [#uses=1]
%tmp49 = sdiv i32 %tmp48, 12 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll b/llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll
index e58b1932197..1274398ac3f 100644
--- a/llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll
+++ b/llvm/test/CodeGen/X86/2007-05-05-VecCastExpand.ll
@@ -5,9 +5,9 @@
define void @test() {
bb.i:
- %tmp.i660 = load <4 x float>* null ; <<4 x float>> [#uses=1]
+ %tmp.i660 = load <4 x float>, <4 x float>* null ; <<4 x float>> [#uses=1]
call void (i32, ...)* @printf( i32 0, i8* getelementptr ([18 x i8]* @str, i32 0, i64 0), double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00 )
- %tmp152.i = load <4 x i32>* null ; <<4 x i32>> [#uses=1]
+ %tmp152.i = load <4 x i32>, <4 x i32>* null ; <<4 x i32>> [#uses=1]
%tmp156.i = bitcast <4 x i32> %tmp152.i to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp175.i = bitcast <4 x float> %tmp.i660 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp176.i = xor <4 x i32> %tmp156.i, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll b/llvm/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll
index b3df7d46d79..87edab77ac1 100644
--- a/llvm/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll
+++ b/llvm/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll
@@ -4,7 +4,7 @@ define void @test(<4 x float>* %arg) {
%tmp89 = getelementptr <4 x float>, <4 x float>* %arg, i64 3
%tmp1144 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, zeroinitializer
store <4 x float> %tmp1144, <4 x float>* null
- %tmp1149 = load <4 x float>* %tmp89
+ %tmp1149 = load <4 x float>, <4 x float>* %tmp89
%tmp1150 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1149
store <4 x float> %tmp1150, <4 x float>* %tmp89
ret void
diff --git a/llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll b/llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll
index d611677942c..f8b09d0c340 100644
--- a/llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll
+++ b/llvm/test/CodeGen/X86/2007-07-10-StackerAssert.ll
@@ -22,11 +22,11 @@ cond_true354: ; preds = %bb164
ret i32 0
bb383: ; preds = %bb164
- %tmp408 = load float* null ; <float> [#uses=2]
+ %tmp408 = load float, float* null ; <float> [#uses=2]
br i1 false, label %cond_true425, label %cond_next443
cond_true425: ; preds = %bb383
- %tmp430 = load float* null ; <float> [#uses=1]
+ %tmp430 = load float, float* null ; <float> [#uses=1]
%tmp432 = fsub float %tmp430, %tmp408 ; <float> [#uses=1]
%tmp432433 = fpext float %tmp432 to double ; <double> [#uses=1]
%tmp434435 = fpext float %tmp408 to double ; <double> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll b/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
index 41b6e94f993..63ed4601a04 100644
--- a/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
+++ b/llvm/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
@@ -5,13 +5,13 @@
define i64 @foo_0(<2 x i64>* %val) {
entry:
%val12 = getelementptr <2 x i64>, <2 x i64>* %val, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp7 = load i64* %val12 ; <i64> [#uses=1]
+ %tmp7 = load i64, i64* %val12 ; <i64> [#uses=1]
ret i64 %tmp7
}
define i64 @foo_1(<2 x i64>* %val) {
entry:
%tmp2.gep = getelementptr <2 x i64>, <2 x i64>* %val, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp4 = load i64* %tmp2.gep ; <i64> [#uses=1]
+ %tmp4 = load i64, i64* %tmp2.gep ; <i64> [#uses=1]
ret i64 %tmp4
}
diff --git a/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll b/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
index 9bf098f2dac..2b3ac5c36d7 100644
--- a/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
+++ b/llvm/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
@@ -46,14 +46,14 @@ _ubyte_convert2_to_ctypes.exit: ; preds = %entry
]
bb4: ; preds = %_ubyte_convert2_to_ctypes.exit, %cond_next.i
- %tmp5 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
+ %tmp5 = load i8**, i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
%tmp6 = getelementptr i8*, i8** %tmp5, i64 2 ; <i8**> [#uses=1]
- %tmp7 = load i8** %tmp6 ; <i8*> [#uses=1]
+ %tmp7 = load i8*, i8** %tmp6 ; <i8*> [#uses=1]
%tmp78 = bitcast i8* %tmp7 to %struct._typeobject* ; <%struct._typeobject*> [#uses=1]
%tmp9 = getelementptr %struct._typeobject, %struct._typeobject* %tmp78, i32 0, i32 12 ; <%struct.PyNumberMethods**> [#uses=1]
- %tmp10 = load %struct.PyNumberMethods** %tmp9 ; <%struct.PyNumberMethods*> [#uses=1]
+ %tmp10 = load %struct.PyNumberMethods*, %struct.PyNumberMethods** %tmp9 ; <%struct.PyNumberMethods*> [#uses=1]
%tmp11 = getelementptr %struct.PyNumberMethods, %struct.PyNumberMethods* %tmp10, i32 0, i32 5 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)**> [#uses=1]
- %tmp12 = load %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)** %tmp11 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*> [#uses=1]
+ %tmp12 = load %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)** %tmp11 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*> [#uses=1]
%tmp15 = call %struct.PyObject* %tmp12( %struct.PyObject* %a, %struct.PyObject* %b ) ; <%struct.PyObject*> [#uses=1]
ret %struct.PyObject* %tmp15
@@ -63,38 +63,38 @@ bb17: ; preds = %_ubyte_convert2_to_ctypes.exit, %cond_next.i
br i1 %tmp19, label %cond_next, label %UnifiedReturnBlock
cond_next: ; preds = %bb17
- %tmp22 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
+ %tmp22 = load i8**, i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
%tmp23 = getelementptr i8*, i8** %tmp22, i64 10 ; <i8**> [#uses=1]
- %tmp24 = load i8** %tmp23 ; <i8*> [#uses=1]
+ %tmp24 = load i8*, i8** %tmp23 ; <i8*> [#uses=1]
%tmp2425 = bitcast i8* %tmp24 to %struct._typeobject* ; <%struct._typeobject*> [#uses=1]
%tmp26 = getelementptr %struct._typeobject, %struct._typeobject* %tmp2425, i32 0, i32 12 ; <%struct.PyNumberMethods**> [#uses=1]
- %tmp27 = load %struct.PyNumberMethods** %tmp26 ; <%struct.PyNumberMethods*> [#uses=1]
+ %tmp27 = load %struct.PyNumberMethods*, %struct.PyNumberMethods** %tmp26 ; <%struct.PyNumberMethods*> [#uses=1]
%tmp28 = getelementptr %struct.PyNumberMethods, %struct.PyNumberMethods* %tmp27, i32 0, i32 5 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)**> [#uses=1]
- %tmp29 = load %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)** %tmp28 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*> [#uses=1]
+ %tmp29 = load %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*, %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)** %tmp28 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*> [#uses=1]
%tmp32 = call %struct.PyObject* %tmp29( %struct.PyObject* %a, %struct.PyObject* %b ) ; <%struct.PyObject*> [#uses=1]
ret %struct.PyObject* %tmp32
bb35: ; preds = %_ubyte_convert2_to_ctypes.exit, %cond_next.i
- %tmp36 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
+ %tmp36 = load i8**, i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
%tmp37 = getelementptr i8*, i8** %tmp36, i64 27 ; <i8**> [#uses=1]
- %tmp38 = load i8** %tmp37 ; <i8*> [#uses=1]
+ %tmp38 = load i8*, i8** %tmp37 ; <i8*> [#uses=1]
%tmp3839 = bitcast i8* %tmp38 to void ()* ; <void ()*> [#uses=1]
call void %tmp3839( )
- %tmp40 = load i8* %arg2, align 1 ; <i8> [#uses=4]
+ %tmp40 = load i8, i8* %arg2, align 1 ; <i8> [#uses=4]
%tmp1.i = icmp eq i8 %tmp40, 0 ; <i1> [#uses=2]
br i1 %tmp1.i, label %cond_true.i, label %cond_false.i
cond_true.i: ; preds = %bb35
%tmp3.i196 = call i32 @feraiseexcept( i32 4 ) ; <i32> [#uses=0]
- %tmp46207 = load i8* %arg2, align 1 ; <i8> [#uses=3]
- %tmp48208 = load i8* %arg1, align 1 ; <i8> [#uses=2]
+ %tmp46207 = load i8, i8* %arg2, align 1 ; <i8> [#uses=3]
+ %tmp48208 = load i8, i8* %arg1, align 1 ; <i8> [#uses=2]
%tmp1.i197210 = icmp eq i8 %tmp48208, 0 ; <i1> [#uses=1]
%tmp4.i212 = icmp eq i8 %tmp46207, 0 ; <i1> [#uses=1]
%tmp7.i198213 = or i1 %tmp1.i197210, %tmp4.i212 ; <i1> [#uses=1]
br i1 %tmp7.i198213, label %cond_true.i200, label %cond_next17.i
cond_false.i: ; preds = %bb35
- %tmp42 = load i8* %arg1, align 1 ; <i8> [#uses=3]
+ %tmp42 = load i8, i8* %arg1, align 1 ; <i8> [#uses=3]
%tmp7.i = udiv i8 %tmp42, %tmp40 ; <i8> [#uses=2]
%tmp1.i197 = icmp eq i8 %tmp42, 0 ; <i1> [#uses=1]
%tmp7.i198 = or i1 %tmp1.i197, %tmp1.i ; <i1> [#uses=1]
@@ -120,18 +120,18 @@ cond_next17.i: ; preds = %cond_false.i, %cond_true.i
ubyte_ctype_remainder.exit: ; preds = %cond_next17.i, %cond_true14.i, %cond_true.i200
%out2.0 = phi i8 [ %tmp20.i, %cond_next17.i ], [ 0, %cond_true14.i ], [ 0, %cond_true.i200 ] ; <i8> [#uses=1]
%out.2 = phi i8 [ %out.1, %cond_next17.i ], [ %out.0, %cond_true14.i ], [ %out.0, %cond_true.i200 ] ; <i8> [#uses=1]
- %tmp52 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
+ %tmp52 = load i8**, i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
%tmp53 = getelementptr i8*, i8** %tmp52, i64 28 ; <i8**> [#uses=1]
- %tmp54 = load i8** %tmp53 ; <i8*> [#uses=1]
+ %tmp54 = load i8*, i8** %tmp53 ; <i8*> [#uses=1]
%tmp5455 = bitcast i8* %tmp54 to i32 ()* ; <i32 ()*> [#uses=1]
%tmp56 = call i32 %tmp5455( ) ; <i32> [#uses=2]
%tmp58 = icmp eq i32 %tmp56, 0 ; <i1> [#uses=1]
br i1 %tmp58, label %cond_next89, label %cond_true61
cond_true61: ; preds = %ubyte_ctype_remainder.exit
- %tmp62 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
+ %tmp62 = load i8**, i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
%tmp63 = getelementptr i8*, i8** %tmp62, i64 25 ; <i8**> [#uses=1]
- %tmp64 = load i8** %tmp63 ; <i8*> [#uses=1]
+ %tmp64 = load i8*, i8** %tmp63 ; <i8*> [#uses=1]
%tmp6465 = bitcast i8* %tmp64 to i32 (i8*, i32*, i32*, %struct.PyObject**)* ; <i32 (i8*, i32*, i32*, %struct.PyObject**)*> [#uses=1]
%tmp67 = call i32 %tmp6465( i8* getelementptr ([14 x i8]* @.str5, i32 0, i64 0), i32* %bufsize, i32* %errmask, %struct.PyObject** %errobj ) ; <i32> [#uses=1]
%tmp68 = icmp slt i32 %tmp67, 0 ; <i1> [#uses=1]
@@ -139,12 +139,12 @@ cond_true61: ; preds = %ubyte_ctype_remainder.exit
cond_next73: ; preds = %cond_true61
store i32 1, i32* %first, align 4
- %tmp74 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
+ %tmp74 = load i8**, i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
%tmp75 = getelementptr i8*, i8** %tmp74, i64 29 ; <i8**> [#uses=1]
- %tmp76 = load i8** %tmp75 ; <i8*> [#uses=1]
+ %tmp76 = load i8*, i8** %tmp75 ; <i8*> [#uses=1]
%tmp7677 = bitcast i8* %tmp76 to i32 (i32, %struct.PyObject*, i32, i32*)* ; <i32 (i32, %struct.PyObject*, i32, i32*)*> [#uses=1]
- %tmp79 = load %struct.PyObject** %errobj, align 8 ; <%struct.PyObject*> [#uses=1]
- %tmp80 = load i32* %errmask, align 4 ; <i32> [#uses=1]
+ %tmp79 = load %struct.PyObject*, %struct.PyObject** %errobj, align 8 ; <%struct.PyObject*> [#uses=1]
+ %tmp80 = load i32, i32* %errmask, align 4 ; <i32> [#uses=1]
%tmp82 = call i32 %tmp7677( i32 %tmp80, %struct.PyObject* %tmp79, i32 %tmp56, i32* %first ) ; <i32> [#uses=1]
%tmp83 = icmp eq i32 %tmp82, 0 ; <i1> [#uses=1]
br i1 %tmp83, label %cond_next89, label %UnifiedReturnBlock
@@ -155,19 +155,19 @@ cond_next89: ; preds = %cond_next73, %ubyte_ctype_remainder.exit
br i1 %tmp92, label %UnifiedReturnBlock, label %cond_next97
cond_next97: ; preds = %cond_next89
- %tmp98 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
+ %tmp98 = load i8**, i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
%tmp99 = getelementptr i8*, i8** %tmp98, i64 25 ; <i8**> [#uses=1]
- %tmp100 = load i8** %tmp99 ; <i8*> [#uses=1]
+ %tmp100 = load i8*, i8** %tmp99 ; <i8*> [#uses=1]
%tmp100101 = bitcast i8* %tmp100 to %struct._typeobject* ; <%struct._typeobject*> [#uses=2]
%tmp102 = getelementptr %struct._typeobject, %struct._typeobject* %tmp100101, i32 0, i32 38 ; <%struct.PyObject* (%struct._typeobject*, i64)**> [#uses=1]
- %tmp103 = load %struct.PyObject* (%struct._typeobject*, i64)** %tmp102 ; <%struct.PyObject* (%struct._typeobject*, i64)*> [#uses=1]
+ %tmp103 = load %struct.PyObject* (%struct._typeobject*, i64)*, %struct.PyObject* (%struct._typeobject*, i64)** %tmp102 ; <%struct.PyObject* (%struct._typeobject*, i64)*> [#uses=1]
%tmp108 = call %struct.PyObject* %tmp103( %struct._typeobject* %tmp100101, i64 0 ) ; <%struct.PyObject*> [#uses=3]
%tmp110 = icmp eq %struct.PyObject* %tmp108, null ; <i1> [#uses=1]
br i1 %tmp110, label %cond_true113, label %cond_next135
cond_true113: ; preds = %cond_next97
%tmp115 = getelementptr %struct.PyObject, %struct.PyObject* %tmp90, i32 0, i32 0 ; <i64*> [#uses=2]
- %tmp116 = load i64* %tmp115 ; <i64> [#uses=1]
+ %tmp116 = load i64, i64* %tmp115 ; <i64> [#uses=1]
%tmp117 = add i64 %tmp116, -1 ; <i64> [#uses=2]
store i64 %tmp117, i64* %tmp115
%tmp123 = icmp eq i64 %tmp117, 0 ; <i1> [#uses=1]
@@ -175,9 +175,9 @@ cond_true113: ; preds = %cond_next97
cond_true126: ; preds = %cond_true113
%tmp128 = getelementptr %struct.PyObject, %struct.PyObject* %tmp90, i32 0, i32 1 ; <%struct._typeobject**> [#uses=1]
- %tmp129 = load %struct._typeobject** %tmp128 ; <%struct._typeobject*> [#uses=1]
+ %tmp129 = load %struct._typeobject*, %struct._typeobject** %tmp128 ; <%struct._typeobject*> [#uses=1]
%tmp130 = getelementptr %struct._typeobject, %struct._typeobject* %tmp129, i32 0, i32 6 ; <void (%struct.PyObject*)**> [#uses=1]
- %tmp131 = load void (%struct.PyObject*)** %tmp130 ; <void (%struct.PyObject*)*> [#uses=1]
+ %tmp131 = load void (%struct.PyObject*)*, void (%struct.PyObject*)** %tmp130 ; <void (%struct.PyObject*)*> [#uses=1]
call void %tmp131( %struct.PyObject* %tmp90 )
ret %struct.PyObject* null
@@ -188,19 +188,19 @@ cond_next135: ; preds = %cond_next97
%tmp140141 = bitcast %struct.PyObject* %tmp90 to %struct.PyTupleObject* ; <%struct.PyTupleObject*> [#uses=2]
%tmp143 = getelementptr %struct.PyTupleObject, %struct.PyTupleObject* %tmp140141, i32 0, i32 3, i64 0 ; <%struct.PyObject**> [#uses=1]
store %struct.PyObject* %tmp108, %struct.PyObject** %tmp143
- %tmp145 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
+ %tmp145 = load i8**, i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
%tmp146 = getelementptr i8*, i8** %tmp145, i64 25 ; <i8**> [#uses=1]
- %tmp147 = load i8** %tmp146 ; <i8*> [#uses=1]
+ %tmp147 = load i8*, i8** %tmp146 ; <i8*> [#uses=1]
%tmp147148 = bitcast i8* %tmp147 to %struct._typeobject* ; <%struct._typeobject*> [#uses=2]
%tmp149 = getelementptr %struct._typeobject, %struct._typeobject* %tmp147148, i32 0, i32 38 ; <%struct.PyObject* (%struct._typeobject*, i64)**> [#uses=1]
- %tmp150 = load %struct.PyObject* (%struct._typeobject*, i64)** %tmp149 ; <%struct.PyObject* (%struct._typeobject*, i64)*> [#uses=1]
+ %tmp150 = load %struct.PyObject* (%struct._typeobject*, i64)*, %struct.PyObject* (%struct._typeobject*, i64)** %tmp149 ; <%struct.PyObject* (%struct._typeobject*, i64)*> [#uses=1]
%tmp155 = call %struct.PyObject* %tmp150( %struct._typeobject* %tmp147148, i64 0 ) ; <%struct.PyObject*> [#uses=3]
%tmp157 = icmp eq %struct.PyObject* %tmp155, null ; <i1> [#uses=1]
br i1 %tmp157, label %cond_true160, label %cond_next182
cond_true160: ; preds = %cond_next135
%tmp162 = getelementptr %struct.PyObject, %struct.PyObject* %tmp90, i32 0, i32 0 ; <i64*> [#uses=2]
- %tmp163 = load i64* %tmp162 ; <i64> [#uses=1]
+ %tmp163 = load i64, i64* %tmp162 ; <i64> [#uses=1]
%tmp164 = add i64 %tmp163, -1 ; <i64> [#uses=2]
store i64 %tmp164, i64* %tmp162
%tmp170 = icmp eq i64 %tmp164, 0 ; <i1> [#uses=1]
@@ -208,9 +208,9 @@ cond_true160: ; preds = %cond_next135
cond_true173: ; preds = %cond_true160
%tmp175 = getelementptr %struct.PyObject, %struct.PyObject* %tmp90, i32 0, i32 1 ; <%struct._typeobject**> [#uses=1]
- %tmp176 = load %struct._typeobject** %tmp175 ; <%struct._typeobject*> [#uses=1]
+ %tmp176 = load %struct._typeobject*, %struct._typeobject** %tmp175 ; <%struct._typeobject*> [#uses=1]
%tmp177 = getelementptr %struct._typeobject, %struct._typeobject* %tmp176, i32 0, i32 6 ; <void (%struct.PyObject*)**> [#uses=1]
- %tmp178 = load void (%struct.PyObject*)** %tmp177 ; <void (%struct.PyObject*)*> [#uses=1]
+ %tmp178 = load void (%struct.PyObject*)*, void (%struct.PyObject*)** %tmp177 ; <void (%struct.PyObject*)*> [#uses=1]
call void %tmp178( %struct.PyObject* %tmp90 )
ret %struct.PyObject* null
diff --git a/llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll b/llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll
index 93bd51a04fd..eb715125b17 100644
--- a/llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll
+++ b/llvm/test/CodeGen/X86/2007-09-05-InvalidAsm.ll
@@ -14,12 +14,12 @@ entry:
cond_true: ; preds = %entry
%tmp1415 = shl i16 %param, 3 ; <i16> [#uses=1]
%tmp17 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp18 = load %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
+ %tmp18 = load %struct.ComponentParameters*, %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp1920 = bitcast %struct.ComponentParameters* %tmp18 to i8* ; <i8*> [#uses=1]
%tmp212223 = sext i16 %tmp1415 to i64 ; <i64> [#uses=1]
%tmp24 = getelementptr i8, i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
%tmp2425 = bitcast i8* %tmp24 to i64* ; <i64*> [#uses=1]
- %tmp28 = load i64* %tmp2425, align 8 ; <i64> [#uses=1]
+ %tmp28 = load i64, i64* %tmp2425, align 8 ; <i64> [#uses=1]
%tmp2829 = inttoptr i64 %tmp28 to i32* ; <i32*> [#uses=1]
%tmp31 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
store i32* %tmp2829, i32** %tmp31, align 8
@@ -28,18 +28,18 @@ cond_true: ; preds = %entry
cond_next: ; preds = %cond_true, %entry
%tmp4243 = shl i16 %param, 3 ; <i16> [#uses=1]
%tmp46 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp47 = load %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
+ %tmp47 = load %struct.ComponentParameters*, %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp4849 = bitcast %struct.ComponentParameters* %tmp47 to i8* ; <i8*> [#uses=1]
%tmp505152 = sext i16 %tmp4243 to i64 ; <i64> [#uses=1]
%tmp53 = getelementptr i8, i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
%tmp5354 = bitcast i8* %tmp53 to i64* ; <i64*> [#uses=1]
- %tmp58 = load i64* %tmp5354, align 8 ; <i64> [#uses=1]
+ %tmp58 = load i64, i64* %tmp5354, align 8 ; <i64> [#uses=1]
%tmp59 = icmp eq i64 %tmp58, 0 ; <i1> [#uses=1]
br i1 %tmp59, label %UnifiedReturnBlock, label %cond_true63
cond_true63: ; preds = %cond_next
%tmp65 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
- %tmp66 = load %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
+ %tmp66 = load %struct.AGenericManager*, %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
%tmp69 = tail call i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord( %struct.AGenericManager* %tmp66, %struct.ComponentInstanceRecord** %instance ) ; <i32> [#uses=1]
ret i32 %tmp69
diff --git a/llvm/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll b/llvm/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll
index 6fc8ec907ea..7eb018ce525 100644
--- a/llvm/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll
+++ b/llvm/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll
@@ -5,7 +5,7 @@
define fastcc void @sample_3d_linear(%struct.gl_texture_object* %tObj, %struct.gl_texture_image* %img, float %s, float %t, float %r, i8* %red, i8* %green, i8* %blue, i8* %alpha) {
entry:
- %tmp15 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp15 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%tmp16 = icmp eq i32 %tmp15, 10497 ; <i1> [#uses=1]
%tmp2152 = call float @floorf( float 0.000000e+00 ) ; <float> [#uses=0]
br i1 %tmp16, label %cond_true, label %cond_false
diff --git a/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll b/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
index d3a47aefb7d..c535392ffdf 100644
--- a/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
+++ b/llvm/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
@@ -7,15 +7,15 @@ entry:
cond_next127: ; preds = %cond_next391, %entry
%v.1 = phi i32 [ undef, %entry ], [ %tmp411, %cond_next391 ] ; <i32> [#uses=1]
%tmp149 = mul i32 0, %v.1 ; <i32> [#uses=0]
- %tmpss = load i32* %ss, align 4 ; <i32> [#uses=1]
- %tmpbp = load i32* %bp, align 4 ; <i32> [#uses=2]
+ %tmpss = load i32, i32* %ss, align 4 ; <i32> [#uses=1]
+ %tmpbp = load i32, i32* %bp, align 4 ; <i32> [#uses=2]
%tmp254 = and i32 %tmpss, 15 ; <i32> [#uses=1]
%tmp256 = and i32 %tmpbp, 15 ; <i32> [#uses=2]
br label %cond_next391
cond_next391: ; preds = %cond_next127
- %tmp393 = load i32* %ss, align 4 ; <i32> [#uses=1]
- %tmp395 = load i32* %bp, align 4 ; <i32> [#uses=2]
+ %tmp393 = load i32, i32* %ss, align 4 ; <i32> [#uses=1]
+ %tmp395 = load i32, i32* %bp, align 4 ; <i32> [#uses=2]
%tmp396 = shl i32 %tmp393, %tmp395 ; <i32> [#uses=2]
%tmp398 = sub i32 32, %tmp256 ; <i32> [#uses=2]
%tmp399 = lshr i32 %tmp396, %tmp398 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll b/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll
index 1e3f2b9b9d3..c4d5cb970c3 100644
--- a/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll
+++ b/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll
@@ -7,19 +7,19 @@ entry:
bb171.preheader: ; preds = %entry
%tmp176 = fadd float 0.000000e+00, 1.000000e+00 ; <float> [#uses=2]
%gi.1 = getelementptr float, float* %fz, i32 0 ; <float*> [#uses=2]
- %tmp240 = load float* %gi.1, align 4 ; <float> [#uses=1]
+ %tmp240 = load float, float* %gi.1, align 4 ; <float> [#uses=1]
%tmp242 = fsub float %tmp240, 0.000000e+00 ; <float> [#uses=2]
%tmp251 = getelementptr float, float* %fz, i32 0 ; <float*> [#uses=1]
- %tmp252 = load float* %tmp251, align 4 ; <float> [#uses=1]
+ %tmp252 = load float, float* %tmp251, align 4 ; <float> [#uses=1]
%tmp258 = getelementptr float, float* %fz, i32 0 ; <float*> [#uses=2]
- %tmp259 = load float* %tmp258, align 4 ; <float> [#uses=2]
+ %tmp259 = load float, float* %tmp258, align 4 ; <float> [#uses=2]
%tmp261 = fmul float %tmp259, %tmp176 ; <float> [#uses=1]
%tmp262 = fsub float 0.000000e+00, %tmp261 ; <float> [#uses=2]
%tmp269 = fmul float %tmp252, %tmp176 ; <float> [#uses=1]
%tmp276 = fmul float %tmp259, 0.000000e+00 ; <float> [#uses=1]
%tmp277 = fadd float %tmp269, %tmp276 ; <float> [#uses=2]
%tmp281 = getelementptr float, float* %fz, i32 0 ; <float*> [#uses=1]
- %tmp282 = load float* %tmp281, align 4 ; <float> [#uses=2]
+ %tmp282 = load float, float* %tmp281, align 4 ; <float> [#uses=2]
%tmp284 = fsub float %tmp282, %tmp277 ; <float> [#uses=1]
%tmp291 = fadd float %tmp282, %tmp277 ; <float> [#uses=1]
%tmp298 = fsub float 0.000000e+00, %tmp262 ; <float> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll b/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
index f6ee02f5437..4b1c1d77f7a 100644
--- a/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
+++ b/llvm/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
@@ -22,12 +22,12 @@ cond_next245: ; preds = %cond_next127
br i1 false, label %cond_true267, label %cond_next391
cond_true267: ; preds = %cond_next245
- %tmp269 = load i8** %byteptr, align 4 ; <i8*> [#uses=3]
- %tmp270 = load i8* %tmp269, align 1 ; <i8> [#uses=1]
+ %tmp269 = load i8*, i8** %byteptr, align 4 ; <i8*> [#uses=3]
+ %tmp270 = load i8, i8* %tmp269, align 1 ; <i8> [#uses=1]
%tmp270271 = zext i8 %tmp270 to i32 ; <i32> [#uses=1]
%tmp272 = getelementptr i8, i8* %tmp269, i32 1 ; <i8*> [#uses=2]
store i8* %tmp272, i8** %byteptr, align 4
- %tmp276 = load i8* %tmp272, align 1 ; <i8> [#uses=1]
+ %tmp276 = load i8, i8* %tmp272, align 1 ; <i8> [#uses=1]
%tmp278 = getelementptr i8, i8* %tmp269, i32 2 ; <i8*> [#uses=1]
store i8* %tmp278, i8** %byteptr, align 4
%tmp286 = icmp eq i32 %tmp270271, %markerPrefix ; <i1> [#uses=1]
@@ -42,7 +42,7 @@ cond_next327: ; preds = %cond_true267
br i1 false, label %cond_true343, label %cond_next391
cond_true343: ; preds = %cond_next327
- %tmp345 = load i8** %byteptr, align 4 ; <i8*> [#uses=1]
+ %tmp345 = load i8*, i8** %byteptr, align 4 ; <i8*> [#uses=1]
store i8* null, i8** %byteptr, align 4
store i8* %tmp345, i8** %byteptr, align 4
br label %cond_next391
diff --git a/llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll b/llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll
index 8a55935cc1f..941925987fd 100644
--- a/llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll
+++ b/llvm/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll
@@ -10,8 +10,8 @@ bb: ; preds = %bb31, %entry
br i1 false, label %bb6, label %bb31
bb6: ; preds = %bb
- %tmp10 = load i64* null, align 8 ; <i64> [#uses=1]
- %tmp16 = load i64* null, align 8 ; <i64> [#uses=1]
+ %tmp10 = load i64, i64* null, align 8 ; <i64> [#uses=1]
+ %tmp16 = load i64, i64* null, align 8 ; <i64> [#uses=1]
br i1 false, label %bb23, label %bb31
bb23: ; preds = %bb6
diff --git a/llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll b/llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
index 82052b13e18..30e1f575caf 100644
--- a/llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
+++ b/llvm/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
@@ -25,7 +25,7 @@ cond_next127: ; preds = %cond_true, %bb
%tmp154155156 = sext i16 %tmp154155 to i32 ; <i32> [#uses=1]
%tmp158 = xor i32 %tmp154155156, %tmp153 ; <i32> [#uses=1]
%tmp160 = or i32 %tmp158, %cnt.0 ; <i32> [#uses=1]
- %tmp171 = load i32* %bitptr, align 4 ; <i32> [#uses=1]
+ %tmp171 = load i32, i32* %bitptr, align 4 ; <i32> [#uses=1]
%tmp180181 = sext i16 0 to i32 ; <i32> [#uses=3]
%tmp183 = add i32 %tmp160, 1 ; <i32> [#uses=1]
br i1 false, label %cond_true188, label %cond_next245
@@ -54,7 +54,7 @@ cond_next327: ; preds = %cond_true267
br i1 false, label %cond_true343, label %cond_next385
cond_true343: ; preds = %cond_next327
- %tmp345 = load i8** %byteptr, align 4 ; <i8*> [#uses=1]
+ %tmp345 = load i8*, i8** %byteptr, align 4 ; <i8*> [#uses=1]
store i8* null, i8** %byteptr, align 4
br i1 false, label %cond_next385, label %cond_true352
@@ -69,8 +69,8 @@ cond_next385: ; preds = %cond_true352, %cond_true343, %cond_next327
br label %cond_next391
cond_next391: ; preds = %cond_next385, %cond_next245
- %tmp393 = load i32* %source, align 4 ; <i32> [#uses=1]
- %tmp395 = load i32* %bitptr, align 4 ; <i32> [#uses=2]
+ %tmp393 = load i32, i32* %source, align 4 ; <i32> [#uses=1]
+ %tmp395 = load i32, i32* %bitptr, align 4 ; <i32> [#uses=2]
%tmp396 = shl i32 %tmp393, %tmp395 ; <i32> [#uses=1]
%tmp398 = sub i32 32, %tmp256 ; <i32> [#uses=1]
%tmp405 = lshr i32 %tmp396, 31 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll b/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
index 573a2177b74..9f57df87fe4 100644
--- a/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
+++ b/llvm/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
@@ -2,7 +2,7 @@
define signext i16 @t() {
entry:
- %tmp180 = load i16* null, align 2 ; <i16> [#uses=3]
+ %tmp180 = load i16, i16* null, align 2 ; <i16> [#uses=3]
%tmp180181 = sext i16 %tmp180 to i32 ; <i32> [#uses=1]
%tmp185 = icmp slt i16 %tmp180, 0 ; <i1> [#uses=1]
br i1 %tmp185, label %cond_true188, label %cond_next245
diff --git a/llvm/test/CodeGen/X86/2007-10-31-extractelement-i64.ll b/llvm/test/CodeGen/X86/2007-10-31-extractelement-i64.ll
index 1b8e67dcc9b..3d52b6cf7b3 100644
--- a/llvm/test/CodeGen/X86/2007-10-31-extractelement-i64.ll
+++ b/llvm/test/CodeGen/X86/2007-10-31-extractelement-i64.ll
@@ -9,16 +9,16 @@ entry:
%retval = alloca <1 x i64>, align 8 ; <<1 x i64>*> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store <2 x i64> %__A, <2 x i64>* %__A_addr
- %tmp = load <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp = load <2 x i64>, <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
%tmp1 = bitcast <2 x i64> %tmp to <2 x i64> ; <<2 x i64>> [#uses=1]
%tmp2 = extractelement <2 x i64> %tmp1, i32 0 ; <i64> [#uses=1]
%tmp3 = bitcast i64 %tmp2 to <1 x i64> ; <<1 x i64>> [#uses=1]
store <1 x i64> %tmp3, <1 x i64>* %retval, align 8
- %tmp4 = load <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
+ %tmp4 = load <1 x i64>, <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
br label %return
return: ; preds = %entry
- %retval5 = load <1 x i64>* %retval ; <<1 x i64>> [#uses=1]
+ %retval5 = load <1 x i64>, <1 x i64>* %retval ; <<1 x i64>> [#uses=1]
ret <1 x i64> %retval5
}
@@ -28,16 +28,16 @@ entry:
%retval = alloca <1 x i64>, align 8 ; <<1 x i64>*> [#uses=3]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store <2 x i64> %__A, <2 x i64>* %__A_addr
- %tmp = load <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp = load <2 x i64>, <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
%tmp1 = bitcast <2 x i64> %tmp to <2 x i64> ; <<2 x i64>> [#uses=1]
%tmp2 = extractelement <2 x i64> %tmp1, i32 1 ; <i64> [#uses=1]
%tmp3 = bitcast i64 %tmp2 to <1 x i64> ; <<1 x i64>> [#uses=1]
store <1 x i64> %tmp3, <1 x i64>* %retval, align 8
- %tmp4 = load <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
+ %tmp4 = load <1 x i64>, <1 x i64>* %retval, align 8 ; <<1 x i64>> [#uses=0]
br label %return
return: ; preds = %entry
- %retval5 = load <1 x i64>* %retval ; <<1 x i64>> [#uses=1]
+ %retval5 = load <1 x i64>, <1 x i64>* %retval ; <<1 x i64>> [#uses=1]
ret <1 x i64> %retval5
}
@@ -48,16 +48,16 @@ entry:
%tmp = alloca i64, align 8 ; <i64*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store <2 x i64> %__A, <2 x i64>* %__A_addr
- %tmp1 = load <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp1 = load <2 x i64>, <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
%tmp2 = bitcast <2 x i64> %tmp1 to <2 x i64> ; <<2 x i64>> [#uses=1]
%tmp3 = extractelement <2 x i64> %tmp2, i32 0 ; <i64> [#uses=1]
store i64 %tmp3, i64* %tmp, align 8
- %tmp4 = load i64* %tmp, align 8 ; <i64> [#uses=1]
+ %tmp4 = load i64, i64* %tmp, align 8 ; <i64> [#uses=1]
store i64 %tmp4, i64* %retval, align 8
br label %return
return: ; preds = %entry
- %retval5 = load i64* %retval ; <i64> [#uses=1]
+ %retval5 = load i64, i64* %retval ; <i64> [#uses=1]
ret i64 %retval5
}
@@ -68,15 +68,15 @@ entry:
%tmp = alloca i64, align 8 ; <i64*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store <2 x i64> %__A, <2 x i64>* %__A_addr
- %tmp1 = load <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp1 = load <2 x i64>, <2 x i64>* %__A_addr, align 16 ; <<2 x i64>> [#uses=1]
%tmp2 = bitcast <2 x i64> %tmp1 to <2 x i64> ; <<2 x i64>> [#uses=1]
%tmp3 = extractelement <2 x i64> %tmp2, i32 1 ; <i64> [#uses=1]
store i64 %tmp3, i64* %tmp, align 8
- %tmp4 = load i64* %tmp, align 8 ; <i64> [#uses=1]
+ %tmp4 = load i64, i64* %tmp, align 8 ; <i64> [#uses=1]
store i64 %tmp4, i64* %retval, align 8
br label %return
return: ; preds = %entry
- %retval5 = load i64* %retval ; <i64> [#uses=1]
+ %retval5 = load i64, i64* %retval ; <i64> [#uses=1]
ret i64 %retval5
}
diff --git a/llvm/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll b/llvm/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll
index 404561848b7..019c4428dc6 100644
--- a/llvm/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll
+++ b/llvm/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll
@@ -12,7 +12,7 @@ entry:
bb: ; preds = %bb, %entry
%name8.0.reg2mem.0.rec = phi i64 [ %indvar.next, %bb ], [ 0, %entry ] ; <i64> [#uses=1]
%hash.0.reg2mem.0 = phi i64 [ %tmp27, %bb ], [ 0, %entry ] ; <i64> [#uses=1]
- %tmp13 = load i8* null, align 1 ; <i8> [#uses=1]
+ %tmp13 = load i8, i8* null, align 1 ; <i8> [#uses=1]
%tmp1314 = zext i8 %tmp13 to i64 ; <i64> [#uses=1]
%tmp25 = lshr i64 %tmp1314, 4 ; <i64> [#uses=1]
%tmp22 = add i64 %tmp25, %hash.0.reg2mem.0 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll b/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
index 63e2ad2c469..d88b45f6839 100644
--- a/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
+++ b/llvm/test/CodeGen/X86/2007-11-06-InstrSched.ll
@@ -9,10 +9,10 @@ bb18: ; preds = %bb18, %entry
%i.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp17, %bb18 ] ; <i32> [#uses=3]
%res.0.reg2mem.0 = phi float [ 0.000000e+00, %entry ], [ %tmp14, %bb18 ] ; <float> [#uses=1]
%tmp3 = getelementptr i32, i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp3, align 4 ; <i32> [#uses=1]
%tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
%tmp8 = getelementptr float, float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
- %tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1]
+ %tmp9 = load float, float* %tmp8, align 4 ; <float> [#uses=1]
%tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1]
%tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2]
%tmp17 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll b/llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll
index d5b630b59d9..06e0a779be1 100644
--- a/llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll
+++ b/llvm/test/CodeGen/X86/2007-11-07-MulBy4.ll
@@ -7,7 +7,7 @@
define fastcc i32 @foo(i16* %eptr, i8* %ecode, %struct.foo_data* %md, i32 %ims) {
entry:
- %tmp36 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp36 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%tmp37 = icmp ult i32 0, %tmp36 ; <i1> [#uses=1]
br i1 %tmp37, label %cond_next79, label %cond_true
@@ -15,7 +15,7 @@ cond_true: ; preds = %entry
ret i32 0
cond_next79: ; preds = %entry
- %tmp85 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp85 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%tmp86 = icmp ult i32 0, %tmp85 ; <i1> [#uses=1]
br i1 %tmp86, label %cond_next130, label %cond_true89
diff --git a/llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll b/llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll
index f9ea2441c72..3404fe61fc1 100644
--- a/llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll
+++ b/llvm/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll
@@ -18,9 +18,9 @@ bb1271: ; preds = %bb898
br i1 true, label %bb4668, label %bb848
bb4668: ; preds = %bb4648
- %tmp5464 = load i64* %x82167, align 8 ; <i64> [#uses=1]
+ %tmp5464 = load i64, i64* %x82167, align 8 ; <i64> [#uses=1]
%tmp5467 = icmp ne i64 0, %tmp5464 ; <i1> [#uses=1]
- %tmp5470 = load i32** %tmp1272, align 8 ; <i32*> [#uses=1]
+ %tmp5470 = load i32*, i32** %tmp1272, align 8 ; <i32*> [#uses=1]
%tmp5471 = icmp eq i32* %tmp5470, null ; <i1> [#uses=1]
call fastcc void @c34007g__pkg__create.311( %struct.c34007g__pkg__parent* null, i32 7, i32 9, i32 2, i32 4, i32 1 )
%tmp5475 = or i1 %tmp5471, %tmp5467 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll b/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
index deb42474dfb..26d18273dd4 100644
--- a/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
+++ b/llvm/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
@@ -12,9 +12,9 @@ entry:
br i1 true, label %bb4668, label %bb848
bb4668: ; preds = %bb4648
- %tmp5464 = load i64* %x82167, align 8 ; <i64> [#uses=1]
+ %tmp5464 = load i64, i64* %x82167, align 8 ; <i64> [#uses=1]
%tmp5467 = icmp ne i64 0, %tmp5464 ; <i1> [#uses=1]
- %tmp5470 = load i32** %tmp1272, align 8 ; <i32*> [#uses=1]
+ %tmp5470 = load i32*, i32** %tmp1272, align 8 ; <i32*> [#uses=1]
%tmp5471 = icmp eq i32* %tmp5470, null ; <i1> [#uses=1]
%tmp5475 = or i1 %tmp5471, %tmp5467 ; <i1> [#uses=1]
%tmp5497 = or i1 %tmp5475, false ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll b/llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
index 0af2eb365b1..7da85d3a9a1 100644
--- a/llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
+++ b/llvm/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
@@ -27,7 +27,7 @@ bb967: ; preds = %bb951
ret i32 0
bb986: ; preds = %bb951
- %tmp993 = load i32* %tmp961, align 4 ; <i32> [#uses=1]
+ %tmp993 = load i32, i32* %tmp961, align 4 ; <i32> [#uses=1]
%tmp995 = icmp ugt i32 %tmp993, %tmp910 ; <i1> [#uses=2]
%tmp1002 = add i32 %tmp955, 1 ; <i32> [#uses=1]
%low.0 = select i1 %tmp995, i32 0, i32 %tmp1002 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll b/llvm/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll
index 9584b718fea..6e98f9cb219 100644
--- a/llvm/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll
+++ b/llvm/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll
@@ -2,29 +2,29 @@
define void @SolveCubic(double %a, double %b, double %c, double %d, i32* %solutions, double* %x) {
entry:
- %tmp71 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
+ %tmp71 = load x86_fp80, x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
%tmp72 = fdiv x86_fp80 %tmp71, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1]
%tmp73 = fadd x86_fp80 0xK00000000000000000000, %tmp72 ; <x86_fp80> [#uses=1]
%tmp7374 = fptrunc x86_fp80 %tmp73 to double ; <double> [#uses=1]
store double %tmp7374, double* null, align 8
- %tmp81 = load double* null, align 8 ; <double> [#uses=1]
+ %tmp81 = load double, double* null, align 8 ; <double> [#uses=1]
%tmp82 = fadd double %tmp81, 0x401921FB54442D18 ; <double> [#uses=1]
%tmp83 = fdiv double %tmp82, 3.000000e+00 ; <double> [#uses=1]
%tmp84 = call double @cos( double %tmp83 ) ; <double> [#uses=1]
%tmp85 = fmul double 0.000000e+00, %tmp84 ; <double> [#uses=1]
%tmp8586 = fpext double %tmp85 to x86_fp80 ; <x86_fp80> [#uses=1]
- %tmp87 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
+ %tmp87 = load x86_fp80, x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
%tmp88 = fdiv x86_fp80 %tmp87, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1]
%tmp89 = fadd x86_fp80 %tmp8586, %tmp88 ; <x86_fp80> [#uses=1]
%tmp8990 = fptrunc x86_fp80 %tmp89 to double ; <double> [#uses=1]
store double %tmp8990, double* null, align 8
- %tmp97 = load double* null, align 8 ; <double> [#uses=1]
+ %tmp97 = load double, double* null, align 8 ; <double> [#uses=1]
%tmp98 = fadd double %tmp97, 0x402921FB54442D18 ; <double> [#uses=1]
%tmp99 = fdiv double %tmp98, 3.000000e+00 ; <double> [#uses=1]
%tmp100 = call double @cos( double %tmp99 ) ; <double> [#uses=1]
%tmp101 = fmul double 0.000000e+00, %tmp100 ; <double> [#uses=1]
%tmp101102 = fpext double %tmp101 to x86_fp80 ; <x86_fp80> [#uses=1]
- %tmp103 = load x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
+ %tmp103 = load x86_fp80, x86_fp80* null, align 16 ; <x86_fp80> [#uses=1]
%tmp104 = fdiv x86_fp80 %tmp103, 0xKC000C000000000000000 ; <x86_fp80> [#uses=1]
%tmp105 = fadd x86_fp80 %tmp101102, %tmp104 ; <x86_fp80> [#uses=1]
%tmp105106 = fptrunc x86_fp80 %tmp105 to double ; <double> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll b/llvm/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll
index a5a693c6e7d..ffc5a1fb6d4 100644
--- a/llvm/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll
+++ b/llvm/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll
@@ -4,27 +4,27 @@
define void @localize_local_bb19_bb(%struct.node_t** %cur_node) {
newFuncRoot:
- %tmp1 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
+ %tmp1 = load %struct.node_t*, %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
%tmp2 = getelementptr %struct.node_t, %struct.node_t* %tmp1, i32 0, i32 4 ; <double**> [#uses=1]
- %tmp3 = load double** %tmp2, align 4 ; <double*> [#uses=1]
- %tmp4 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
+ %tmp3 = load double*, double** %tmp2, align 4 ; <double*> [#uses=1]
+ %tmp4 = load %struct.node_t*, %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
%tmp5 = getelementptr %struct.node_t, %struct.node_t* %tmp4, i32 0, i32 4 ; <double**> [#uses=1]
store double* %tmp3, double** %tmp5, align 4
- %tmp6 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
+ %tmp6 = load %struct.node_t*, %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
%tmp7 = getelementptr %struct.node_t, %struct.node_t* %tmp6, i32 0, i32 3 ; <double***> [#uses=1]
- %tmp8 = load double*** %tmp7, align 4 ; <double**> [#uses=1]
- %tmp9 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
+ %tmp8 = load double**, double*** %tmp7, align 4 ; <double**> [#uses=1]
+ %tmp9 = load %struct.node_t*, %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
%tmp10 = getelementptr %struct.node_t, %struct.node_t* %tmp9, i32 0, i32 3 ; <double***> [#uses=1]
store double** %tmp8, double*** %tmp10, align 4
- %tmp11 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
+ %tmp11 = load %struct.node_t*, %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
%tmp12 = getelementptr %struct.node_t, %struct.node_t* %tmp11, i32 0, i32 0 ; <double**> [#uses=1]
- %tmp13 = load double** %tmp12, align 4 ; <double*> [#uses=1]
- %tmp14 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
+ %tmp13 = load double*, double** %tmp12, align 4 ; <double*> [#uses=1]
+ %tmp14 = load %struct.node_t*, %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
%tmp15 = getelementptr %struct.node_t, %struct.node_t* %tmp14, i32 0, i32 0 ; <double**> [#uses=1]
store double* %tmp13, double** %tmp15, align 4
- %tmp16 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
+ %tmp16 = load %struct.node_t*, %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
%tmp17 = getelementptr %struct.node_t, %struct.node_t* %tmp16, i32 0, i32 1 ; <%struct.node_t**> [#uses=1]
- %tmp18 = load %struct.node_t** %tmp17, align 4 ; <%struct.node_t*> [#uses=1]
+ %tmp18 = load %struct.node_t*, %struct.node_t** %tmp17, align 4 ; <%struct.node_t*> [#uses=1]
store %struct.node_t* %tmp18, %struct.node_t** %cur_node, align 4
ret void
}
diff --git a/llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll b/llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll
index 443a32de3b4..ce233a9a554 100644
--- a/llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll
+++ b/llvm/test/CodeGen/X86/2008-02-05-ISelCrash.ll
@@ -5,7 +5,7 @@
define fastcc i32 @ab(i32 %alpha, i32 %beta) nounwind {
entry:
- %tmp1 = load i64* @nodes, align 8 ; <i64> [#uses=1]
+ %tmp1 = load i64, i64* @nodes, align 8 ; <i64> [#uses=1]
%tmp2 = add i64 %tmp1, 1 ; <i64> [#uses=1]
store i64 %tmp2, i64* @nodes, align 8
ret i32 0
diff --git a/llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll b/llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
index 6e065d158b0..56b1c7836e1 100644
--- a/llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
+++ b/llvm/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
@@ -7,9 +7,9 @@ entry:
%tmp4 = fsub double -0.000000e+00, %z.1 ; <double> [#uses=1]
call void @casinh( { double, double }* sret %memtmp, double %tmp4, double %z.0 ) nounwind
%tmp19 = getelementptr { double, double }, { double, double }* %memtmp, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp20 = load double* %tmp19, align 8 ; <double> [#uses=1]
+ %tmp20 = load double, double* %tmp19, align 8 ; <double> [#uses=1]
%tmp22 = getelementptr { double, double }, { double, double }* %memtmp, i32 0, i32 1 ; <double*> [#uses=1]
- %tmp23 = load double* %tmp22, align 8 ; <double> [#uses=1]
+ %tmp23 = load double, double* %tmp22, align 8 ; <double> [#uses=1]
%tmp32 = fsub double -0.000000e+00, %tmp20 ; <double> [#uses=1]
%tmp37 = getelementptr { double, double }, { double, double }* %agg.result, i32 0, i32 0 ; <double*> [#uses=1]
store double %tmp23, double* %tmp37, align 8
diff --git a/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll b/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
index 6ab89843f62..4ada77025f0 100644
--- a/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
+++ b/llvm/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
@@ -6,13 +6,13 @@
define void @minmax(float* %result) nounwind optsize {
entry:
- %tmp2 = load float* %result, align 4 ; <float> [#uses=6]
+ %tmp2 = load float, float* %result, align 4 ; <float> [#uses=6]
%tmp4 = getelementptr float, float* %result, i32 2 ; <float*> [#uses=5]
- %tmp5 = load float* %tmp4, align 4 ; <float> [#uses=10]
+ %tmp5 = load float, float* %tmp4, align 4 ; <float> [#uses=10]
%tmp7 = getelementptr float, float* %result, i32 4 ; <float*> [#uses=5]
- %tmp8 = load float* %tmp7, align 4 ; <float> [#uses=8]
+ %tmp8 = load float, float* %tmp7, align 4 ; <float> [#uses=8]
%tmp10 = getelementptr float, float* %result, i32 6 ; <float*> [#uses=3]
- %tmp11 = load float* %tmp10, align 4 ; <float> [#uses=8]
+ %tmp11 = load float, float* %tmp10, align 4 ; <float> [#uses=8]
%tmp12 = fcmp olt float %tmp8, %tmp11 ; <i1> [#uses=5]
br i1 %tmp12, label %bb, label %bb21
@@ -59,7 +59,7 @@ bb103: ; preds = %bb80, %bb72
bb111: ; preds = %bb103, %bb80, %bb72, %bb50, %bb40, %bb26
%iftmp.0.0.in = phi float* [ %tmp10, %bb103 ], [ %result, %bb26 ], [ %result, %bb40 ], [ %result, %bb50 ], [ %tmp4.mux, %bb80 ], [ %tmp4.mux787, %bb72 ] ; <float*> [#uses=1]
- %iftmp.0.0 = load float* %iftmp.0.0.in ; <float> [#uses=1]
+ %iftmp.0.0 = load float, float* %iftmp.0.0.in ; <float> [#uses=1]
%tmp125 = fcmp ogt float %tmp8, %tmp11 ; <i1> [#uses=5]
br i1 %tmp125, label %bb128, label %bb136
@@ -106,15 +106,15 @@ bb218: ; preds = %bb195, %bb187
bb226: ; preds = %bb218, %bb195, %bb187, %bb165, %bb155, %bb141
%iftmp.7.0.in = phi float* [ %tmp10, %bb218 ], [ %result, %bb141 ], [ %result, %bb155 ], [ %result, %bb165 ], [ %tmp4.mux789, %bb195 ], [ %tmp4.mux791, %bb187 ] ; <float*> [#uses=1]
- %iftmp.7.0 = load float* %iftmp.7.0.in ; <float> [#uses=1]
+ %iftmp.7.0 = load float, float* %iftmp.7.0.in ; <float> [#uses=1]
%tmp229 = getelementptr float, float* %result, i32 1 ; <float*> [#uses=7]
- %tmp230 = load float* %tmp229, align 4 ; <float> [#uses=6]
+ %tmp230 = load float, float* %tmp229, align 4 ; <float> [#uses=6]
%tmp232 = getelementptr float, float* %result, i32 3 ; <float*> [#uses=5]
- %tmp233 = load float* %tmp232, align 4 ; <float> [#uses=10]
+ %tmp233 = load float, float* %tmp232, align 4 ; <float> [#uses=10]
%tmp235 = getelementptr float, float* %result, i32 5 ; <float*> [#uses=5]
- %tmp236 = load float* %tmp235, align 4 ; <float> [#uses=8]
+ %tmp236 = load float, float* %tmp235, align 4 ; <float> [#uses=8]
%tmp238 = getelementptr float, float* %result, i32 7 ; <float*> [#uses=3]
- %tmp239 = load float* %tmp238, align 4 ; <float> [#uses=8]
+ %tmp239 = load float, float* %tmp238, align 4 ; <float> [#uses=8]
%tmp240 = fcmp olt float %tmp236, %tmp239 ; <i1> [#uses=5]
br i1 %tmp240, label %bb243, label %bb251
@@ -161,7 +161,7 @@ bb333: ; preds = %bb310, %bb302
bb341: ; preds = %bb333, %bb310, %bb302, %bb280, %bb270, %bb256
%iftmp.14.0.in = phi float* [ %tmp238, %bb333 ], [ %tmp229, %bb280 ], [ %tmp229, %bb270 ], [ %tmp229, %bb256 ], [ %tmp232.mux, %bb310 ], [ %tmp232.mux794, %bb302 ] ; <float*> [#uses=1]
- %iftmp.14.0 = load float* %iftmp.14.0.in ; <float> [#uses=1]
+ %iftmp.14.0 = load float, float* %iftmp.14.0.in ; <float> [#uses=1]
%tmp355 = fcmp ogt float %tmp236, %tmp239 ; <i1> [#uses=5]
br i1 %tmp355, label %bb358, label %bb366
@@ -208,7 +208,7 @@ bb448: ; preds = %bb425, %bb417
bb456: ; preds = %bb448, %bb425, %bb417, %bb395, %bb385, %bb371
%iftmp.21.0.in = phi float* [ %tmp238, %bb448 ], [ %tmp229, %bb395 ], [ %tmp229, %bb385 ], [ %tmp229, %bb371 ], [ %tmp232.mux796, %bb425 ], [ %tmp232.mux798, %bb417 ] ; <float*> [#uses=1]
- %iftmp.21.0 = load float* %iftmp.21.0.in ; <float> [#uses=1]
+ %iftmp.21.0 = load float, float* %iftmp.21.0.in ; <float> [#uses=1]
%tmp458459 = fpext float %iftmp.21.0 to double ; <double> [#uses=1]
%tmp460461 = fpext float %iftmp.7.0 to double ; <double> [#uses=1]
%tmp462463 = fpext float %iftmp.14.0 to double ; <double> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll b/llvm/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
index 0190df50308..b3f303f8fd8 100644
--- a/llvm/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
+++ b/llvm/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
@@ -8,7 +8,7 @@ target triple = "i386-apple-darwin8"
define void @test() nounwind {
entry:
- %tmp = load i32* @pixels, align 4 ; <i32> [#uses=1]
+ %tmp = load i32, i32* @pixels, align 4 ; <i32> [#uses=1]
%tmp1 = tail call i32 asm sideeffect "a: $0 $1", "=r,0,~{dirflag},~{fpsr},~{flags},~{ax}"( i32 %tmp ) nounwind ; <i32> [#uses=1]
store i32 %tmp1, i32* @pixels, align 4
ret void
diff --git a/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll b/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
index beaf31f91b4..75f88b0e381 100644
--- a/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
+++ b/llvm/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
@@ -13,38 +13,38 @@ entry:
store i8* %src, i8** %src_addr
store i32 %dst_stride, i32* %dst_stride_addr
store i32 %src_stride, i32* %src_stride_addr
- %tmp = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** %dst_addr, align 4 ; <i8*> [#uses=1]
%tmp1 = getelementptr i8, i8* %tmp, i32 0 ; <i8*> [#uses=1]
%tmp12 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
- %tmp3 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp4 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp3 = load i8*, i8** %dst_addr, align 4 ; <i8*> [#uses=1]
+ %tmp4 = load i32, i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
%tmp5 = getelementptr i8, i8* %tmp3, i32 %tmp4 ; <i8*> [#uses=1]
%tmp56 = bitcast i8* %tmp5 to i32* ; <i32*> [#uses=1]
- %tmp7 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
%tmp8 = mul i32 %tmp7, 2 ; <i32> [#uses=1]
- %tmp9 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
+ %tmp9 = load i8*, i8** %dst_addr, align 4 ; <i8*> [#uses=1]
%tmp10 = getelementptr i8, i8* %tmp9, i32 %tmp8 ; <i8*> [#uses=1]
%tmp1011 = bitcast i8* %tmp10 to i32* ; <i32*> [#uses=1]
- %tmp13 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp13 = load i32, i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
%tmp14 = mul i32 %tmp13, 3 ; <i32> [#uses=1]
- %tmp15 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
+ %tmp15 = load i8*, i8** %dst_addr, align 4 ; <i8*> [#uses=1]
%tmp16 = getelementptr i8, i8* %tmp15, i32 %tmp14 ; <i8*> [#uses=1]
%tmp1617 = bitcast i8* %tmp16 to i32* ; <i32*> [#uses=1]
- %tmp18 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
+ %tmp18 = load i8*, i8** %src_addr, align 4 ; <i8*> [#uses=1]
%tmp19 = getelementptr i8, i8* %tmp18, i32 0 ; <i8*> [#uses=1]
%tmp1920 = bitcast i8* %tmp19 to i32* ; <i32*> [#uses=1]
- %tmp21 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp22 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp21 = load i8*, i8** %src_addr, align 4 ; <i8*> [#uses=1]
+ %tmp22 = load i32, i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
%tmp23 = getelementptr i8, i8* %tmp21, i32 %tmp22 ; <i8*> [#uses=1]
%tmp2324 = bitcast i8* %tmp23 to i32* ; <i32*> [#uses=1]
- %tmp25 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp25 = load i32, i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
%tmp26 = mul i32 %tmp25, 2 ; <i32> [#uses=1]
- %tmp27 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
+ %tmp27 = load i8*, i8** %src_addr, align 4 ; <i8*> [#uses=1]
%tmp28 = getelementptr i8, i8* %tmp27, i32 %tmp26 ; <i8*> [#uses=1]
%tmp2829 = bitcast i8* %tmp28 to i32* ; <i32*> [#uses=1]
- %tmp30 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
+ %tmp30 = load i32, i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
%tmp31 = mul i32 %tmp30, 3 ; <i32> [#uses=1]
- %tmp32 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
+ %tmp32 = load i8*, i8** %src_addr, align 4 ; <i8*> [#uses=1]
%tmp33 = getelementptr i8, i8* %tmp32, i32 %tmp31 ; <i8*> [#uses=1]
%tmp3334 = bitcast i8* %tmp33 to i32* ; <i32*> [#uses=1]
call void asm sideeffect "movd $4, %mm0 \0A\09movd $5, %mm1 \0A\09movd $6, %mm2 \0A\09movd $7, %mm3 \0A\09punpcklbw %mm1, %mm0 \0A\09punpcklbw %mm3, %mm2 \0A\09movq %mm0, %mm1 \0A\09punpcklwd %mm2, %mm0 \0A\09punpckhwd %mm2, %mm1 \0A\09movd %mm0, $0 \0A\09punpckhdq %mm0, %mm0 \0A\09movd %mm0, $1 \0A\09movd %mm1, $2 \0A\09punpckhdq %mm1, %mm1 \0A\09movd %mm1, $3 \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* %tmp12, i32* %tmp56, i32* %tmp1011, i32* %tmp1617, i32* %tmp1920, i32* %tmp2324, i32* %tmp2829, i32* %tmp3334 ) nounwind
diff --git a/llvm/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll
index 15be50d6eb6..382fbed9b88 100644
--- a/llvm/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll
@@ -11,10 +11,10 @@ entry:
bb53: ; preds = %entry
%tmp55 = call %struct.YY** @AA( i64 1, %struct.XX* %uen ) ; <%struct.YY**> [#uses=3]
- %tmp2728128 = load %struct.XX** null ; <%struct.XX*> [#uses=1]
- %tmp61 = load %struct.YY** %tmp55, align 8 ; <%struct.YY*> [#uses=1]
+ %tmp2728128 = load %struct.XX*, %struct.XX** null ; <%struct.XX*> [#uses=1]
+ %tmp61 = load %struct.YY*, %struct.YY** %tmp55, align 8 ; <%struct.YY*> [#uses=1]
%tmp62 = getelementptr %struct.YY, %struct.YY* %tmp61, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp63 = load i64* %tmp62, align 8 ; <i64> [#uses=1]
+ %tmp63 = load i64, i64* %tmp62, align 8 ; <i64> [#uses=1]
%tmp6566 = zext i16 %tmp45 to i64 ; <i64> [#uses=1]
%tmp67 = shl i64 %tmp6566, 1 ; <i64> [#uses=1]
call void @BB( %struct.YY** %tmp55, i64 %tmp67, i8 signext 0, %struct.XX* %uen )
@@ -30,7 +30,7 @@ bb70: ; preds = %bb119, %bb70.preheader
%tmp.135 = trunc i64 %tmp63 to i32 ; <i32> [#uses=1]
%tmp136 = shl i32 %indvar133, 1 ; <i32> [#uses=1]
%DD = add i32 %tmp136, %tmp.135 ; <i32> [#uses=1]
- %tmp73 = load %struct.ZZ*** %tmp72, align 8 ; <%struct.ZZ**> [#uses=0]
+ %tmp73 = load %struct.ZZ**, %struct.ZZ*** %tmp72, align 8 ; <%struct.ZZ**> [#uses=0]
br i1 false, label %bb119, label %bb77
bb77: ; preds = %bb70
diff --git a/llvm/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll b/llvm/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll
index 3c6ba68b3b4..857e6237d14 100644
--- a/llvm/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll
+++ b/llvm/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll
@@ -34,7 +34,7 @@ bb35: ; preds = %bb24, %entry
%tmp56 = add i32 %tmp55, -1 ; <i32> [#uses=1]
%tmp5657 = sitofp i32 %tmp56 to double ; <double> [#uses=1]
%tmp15.i49 = getelementptr %struct.Lattice, %struct.Lattice* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp16.i50 = load double* %tmp15.i49, align 4 ; <double> [#uses=1]
+ %tmp16.i50 = load double, double* %tmp15.i49, align 4 ; <double> [#uses=1]
%tmp17.i = fmul double %tmp5657, %tmp16.i50 ; <double> [#uses=1]
%tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1]
%tmp20.i23 = fadd double %tmp20.i39, %tmp17.i76 ; <double> [#uses=1]
@@ -47,11 +47,11 @@ bb58.preheader: ; preds = %bb35
bb58: ; preds = %bb58, %bb58.preheader
%tmp20.i7 = getelementptr %struct.CompAtom, %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2]
%tmp25.i = getelementptr %struct.CompAtom, %struct.CompAtom* %tmp1819, i32 0, i32 2 ; <i32*> [#uses=2]
- %tmp74.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
+ %tmp74.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
%tmp82.i = and i32 %tmp74.i, 134217728 ; <i32> [#uses=1]
%tmp85.i = or i32 0, %tmp82.i ; <i32> [#uses=1]
store i32 %tmp85.i, i32* %tmp25.i, align 1
- %tmp88.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
+ %tmp88.i = load i32, i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
%tmp95.i = and i32 %tmp88.i, -268435456 ; <i32> [#uses=1]
%tmp97.i = or i32 0, %tmp95.i ; <i32> [#uses=1]
store i32 %tmp97.i, i32* %tmp25.i, align 1
diff --git a/llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll b/llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll
index fb4f97af586..409bcd51a13 100644
--- a/llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll
+++ b/llvm/test/CodeGen/X86/2008-03-07-APIntBug.ll
@@ -18,16 +18,16 @@ newFuncRoot:
bb1233.exitStub: ; preds = %bb1163
ret void
bb1163: ; preds = %newFuncRoot
- %tmp1164 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
+ %tmp1164 = load %struct.rec*, %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
%tmp1165 = getelementptr %struct.rec, %struct.rec* %tmp1164, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11651166 = bitcast %struct.head_type* %tmp1165 to %struct.symbol_type* ; <%struct.symbol_type*> [#uses=1]
%tmp1167 = getelementptr %struct.symbol_type, %struct.symbol_type* %tmp11651166, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
- %tmp1168 = load %struct.rec** %tmp1167, align 1 ; <%struct.rec*> [#uses=2]
- %tmp1169 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
+ %tmp1168 = load %struct.rec*, %struct.rec** %tmp1167, align 1 ; <%struct.rec*> [#uses=2]
+ %tmp1169 = load %struct.rec*, %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
%tmp1170 = getelementptr %struct.rec, %struct.rec* %tmp1169, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11701171 = bitcast %struct.head_type* %tmp1170 to %struct.symbol_type* ; <%struct.symbol_type*> [#uses=1]
%tmp1172 = getelementptr %struct.symbol_type, %struct.symbol_type* %tmp11701171, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
- %tmp1173 = load %struct.rec** %tmp1172, align 1 ; <%struct.rec*> [#uses=2]
+ %tmp1173 = load %struct.rec*, %struct.rec** %tmp1172, align 1 ; <%struct.rec*> [#uses=2]
%tmp1174 = getelementptr %struct.rec, %struct.rec* %tmp1173, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11741175 = bitcast %struct.head_type* %tmp1174 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
%tmp1176 = getelementptr %struct.word_type, %struct.word_type* %tmp11741175, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
@@ -35,7 +35,7 @@ bb1163: ; preds = %newFuncRoot
%tmp11771178 = bitcast { i16, i8, i8 }* %tmp1177 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1179 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11771178, i32 0, i32 2 ; <i8*> [#uses=2]
%mask1180 = and i8 1, 1 ; <i8> [#uses=2]
- %tmp1181 = load i8* %tmp1179, align 1 ; <i8> [#uses=1]
+ %tmp1181 = load i8, i8* %tmp1179, align 1 ; <i8> [#uses=1]
%tmp1182 = shl i8 %mask1180, 7 ; <i8> [#uses=1]
%tmp1183 = and i8 %tmp1181, 127 ; <i8> [#uses=1]
%tmp1184 = or i8 %tmp1183, %tmp1182 ; <i8> [#uses=1]
@@ -47,7 +47,7 @@ bb1163: ; preds = %newFuncRoot
%tmp1189 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1188, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp11891190 = bitcast { i16, i8, i8 }* %tmp1189 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1191 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11891190, i32 0, i32 2 ; <i8*> [#uses=1]
- %tmp1192 = load i8* %tmp1191, align 1 ; <i8> [#uses=1]
+ %tmp1192 = load i8, i8* %tmp1191, align 1 ; <i8> [#uses=1]
%tmp1193 = lshr i8 %tmp1192, 7 ; <i8> [#uses=1]
%mask1194 = and i8 %tmp1193, 1 ; <i8> [#uses=2]
%mask1195 = and i8 %mask1194, 1 ; <i8> [#uses=0]
@@ -58,7 +58,7 @@ bb1163: ; preds = %newFuncRoot
%tmp11991200 = bitcast { i16, i8, i8 }* %tmp1199 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1201 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11991200, i32 0, i32 1 ; <i8*> [#uses=2]
%mask1202 = and i8 %mask1194, 1 ; <i8> [#uses=2]
- %tmp1203 = load i8* %tmp1201, align 1 ; <i8> [#uses=1]
+ %tmp1203 = load i8, i8* %tmp1201, align 1 ; <i8> [#uses=1]
%tmp1204 = shl i8 %mask1202, 1 ; <i8> [#uses=1]
%tmp1205 = and i8 %tmp1204, 2 ; <i8> [#uses=1]
%tmp1206 = and i8 %tmp1203, -3 ; <i8> [#uses=1]
@@ -71,12 +71,12 @@ bb1163: ; preds = %newFuncRoot
%tmp1212 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1211, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp12121213 = bitcast { i16, i8, i8 }* %tmp1212 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1214 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp12121213, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp1215 = load i8* %tmp1214, align 1 ; <i8> [#uses=1]
+ %tmp1215 = load i8, i8* %tmp1214, align 1 ; <i8> [#uses=1]
%tmp1216 = shl i8 %tmp1215, 6 ; <i8> [#uses=1]
%tmp1217 = lshr i8 %tmp1216, 7 ; <i8> [#uses=1]
%mask1218 = and i8 %tmp1217, 1 ; <i8> [#uses=2]
%mask1219 = and i8 %mask1218, 1 ; <i8> [#uses=0]
- %tmp1220 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
+ %tmp1220 = load %struct.rec*, %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
%tmp1221 = getelementptr %struct.rec, %struct.rec* %tmp1220, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp12211222 = bitcast %struct.head_type* %tmp1221 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
%tmp1223 = getelementptr %struct.word_type, %struct.word_type* %tmp12211222, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
@@ -84,7 +84,7 @@ bb1163: ; preds = %newFuncRoot
%tmp12241225 = bitcast { i16, i8, i8 }* %tmp1224 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
%tmp1226 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp12241225, i32 0, i32 1 ; <i8*> [#uses=2]
%mask1227 = and i8 %mask1218, 1 ; <i8> [#uses=2]
- %tmp1228 = load i8* %tmp1226, align 1 ; <i8> [#uses=1]
+ %tmp1228 = load i8, i8* %tmp1226, align 1 ; <i8> [#uses=1]
%tmp1229 = and i8 %mask1227, 1 ; <i8> [#uses=1]
%tmp1230 = and i8 %tmp1228, -2 ; <i8> [#uses=1]
%tmp1231 = or i8 %tmp1230, %tmp1229 ; <i8> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll b/llvm/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
index 40aafb4c54d..9fb325c1223 100644
--- a/llvm/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
+++ b/llvm/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
@@ -5,7 +5,7 @@ declare fastcc i8* @w_addchar(i8*, i32*, i32*, i8 signext ) nounwind
define x86_stdcallcc i32 @parse_backslash(i8** inreg %word, i32* inreg %word_length, i32* inreg %max_length) nounwind {
entry:
- %tmp6 = load i8* null, align 1 ; <i8> [#uses=1]
+ %tmp6 = load i8, i8* null, align 1 ; <i8> [#uses=1]
br label %bb13
bb13: ; preds = %entry
%tmp26 = call fastcc i8* @w_addchar( i8* null, i32* %word_length, i32* %max_length, i8 signext %tmp6 ) nounwind ; <i8*> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll b/llvm/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
index fab9b770045..a9e3f33ec61 100644
--- a/llvm/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
+++ b/llvm/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
@@ -16,12 +16,12 @@ define i32 @foo() {
entry:
%retval = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
+ %tmp = load %struct.__res_state*, %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
%tmp1 = getelementptr %struct.__res_state, %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %tmp1, align 4
br label %return
return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
+ %retval2 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval2
}
@@ -31,11 +31,11 @@ define i32 @bar() {
entry:
%retval = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = load %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
+ %tmp = load %struct.__res_state*, %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
%tmp1 = getelementptr %struct.__res_state, %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp1, align 4
br label %return
return: ; preds = %entry
- %retval2 = load i32* %retval ; <i32> [#uses=1]
+ %retval2 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval2
}
diff --git a/llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll b/llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
index 4c19834963b..d60d0c2fb0b 100644
--- a/llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
+++ b/llvm/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
@@ -10,7 +10,7 @@
define i64 @____wcstoll_l_internal(i32* %nptr, i32** %endptr, i32 %base, i32 %group, %struct.__locale_struct* %loc) nounwind {
entry:
- %tmp27 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp27 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%tmp83 = getelementptr i32, i32* %nptr, i32 1 ; <i32*> [#uses=1]
%tmp233 = add i32 0, -48 ; <i32> [#uses=1]
br label %bb271.us
@@ -32,7 +32,7 @@ bb374.outer: ; preds = %bb311.split, %bb271.us
br label %bb374.us
bb374.us: ; preds = %bb314.us, %bb374.outer
%tmp376.us = getelementptr i32, i32* %s.5.ph, i32 0 ; <i32*> [#uses=3]
- %tmp378.us = load i32* %tmp376.us, align 4 ; <i32> [#uses=2]
+ %tmp378.us = load i32, i32* %tmp376.us, align 4 ; <i32> [#uses=2]
%tmp302.us = icmp eq i32* %tmp376.us, %tmp83 ; <i1> [#uses=1]
%bothcond484.us = or i1 false, %tmp302.us ; <i1> [#uses=1]
br i1 %bothcond484.us, label %bb383, label %bb305.us
diff --git a/llvm/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll b/llvm/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll
index 451624d3583..3e55390de9f 100644
--- a/llvm/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll
+++ b/llvm/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll
@@ -13,12 +13,12 @@ entry:
cond_true: ; preds = %entry
%tmp1415 = shl i16 %param, 3 ; <i16> [#uses=1]
%tmp17 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp18 = load %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
+ %tmp18 = load %struct.ComponentParameters*, %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp1920 = bitcast %struct.ComponentParameters* %tmp18 to i8* ; <i8*> [#uses=1]
%tmp212223 = sext i16 %tmp1415 to i64 ; <i64> [#uses=1]
%tmp24 = getelementptr i8, i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
%tmp2425 = bitcast i8* %tmp24 to i64* ; <i64*> [#uses=1]
- %tmp28 = load i64* %tmp2425, align 8 ; <i64> [#uses=1]
+ %tmp28 = load i64, i64* %tmp2425, align 8 ; <i64> [#uses=1]
%tmp2829 = inttoptr i64 %tmp28 to i32* ; <i32*> [#uses=1]
%tmp31 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
store i32* %tmp2829, i32** %tmp31, align 8
@@ -27,18 +27,18 @@ cond_true: ; preds = %entry
cond_next: ; preds = %cond_true, %entry
%tmp4243 = shl i16 %param, 3 ; <i16> [#uses=1]
%tmp46 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
- %tmp47 = load %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
+ %tmp47 = load %struct.ComponentParameters*, %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp4849 = bitcast %struct.ComponentParameters* %tmp47 to i8* ; <i8*> [#uses=1]
%tmp505152 = sext i16 %tmp4243 to i64 ; <i64> [#uses=1]
%tmp53 = getelementptr i8, i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
%tmp5354 = bitcast i8* %tmp53 to i64* ; <i64*> [#uses=1]
- %tmp58 = load i64* %tmp5354, align 8 ; <i64> [#uses=1]
+ %tmp58 = load i64, i64* %tmp5354, align 8 ; <i64> [#uses=1]
%tmp59 = icmp eq i64 %tmp58, 0 ; <i1> [#uses=1]
br i1 %tmp59, label %UnifiedReturnBlock, label %cond_true63
cond_true63: ; preds = %cond_next
%tmp65 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
- %tmp66 = load %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
+ %tmp66 = load %struct.AGenericManager*, %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
%tmp69 = tail call i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord( %struct.AGenericManager* %tmp66, %struct.ComponentInstanceRecord** %instance ) ; <i32> [#uses=1]
ret i32 %tmp69
diff --git a/llvm/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll b/llvm/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
index 7f27bfcc0a9..681a984a07a 100644
--- a/llvm/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
+++ b/llvm/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
@@ -21,7 +21,7 @@ entry:
store i32 (...)** getelementptr ([4 x i32 (...)*]* @_ZTVSt9basic_iosIcSt11char_traitsIcEE, i32 0, i32 2), i32 (...)*** null, align 4
store i32 (...)** null, i32 (...)*** null, align 4
%ctg2242.i.i163.i = getelementptr i8, i8* %tmp96.i.i142.i, i32 0 ; <i8*> [#uses=1]
- %tmp150.i.i164.i = load i8** getelementptr ([4 x i8*]* @_ZTTSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE, i32 0, i64 2), align 4 ; <i8*> [#uses=1]
+ %tmp150.i.i164.i = load i8*, i8** getelementptr ([4 x i8*]* @_ZTTSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE, i32 0, i64 2), align 4 ; <i8*> [#uses=1]
%tmp150151.i.i165.i = bitcast i8* %tmp150.i.i164.i to i32 (...)** ; <i32 (...)**> [#uses=1]
%tmp153.i.i166.i = bitcast i8* %ctg2242.i.i163.i to i32 (...)*** ; <i32 (...)***> [#uses=1]
store i32 (...)** %tmp150151.i.i165.i, i32 (...)*** %tmp153.i.i166.i, align 4
diff --git a/llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll b/llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll
index f4b2d719ae1..a758fed02ae 100644
--- a/llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll
+++ b/llvm/test/CodeGen/X86/2008-04-09-BranchFolding.ll
@@ -16,7 +16,7 @@ bb140: ; preds = %entry
bb17.i: ; preds = %bb140
ret %struct.tree_node* null
bb143: ; preds = %entry
- %tmp8.i43 = load %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
+ %tmp8.i43 = load %struct.tree_node*, %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
br i1 %tmp3.i40, label %bb160, label %bb9.i48
bb9.i48: ; preds = %bb143
ret %struct.tree_node* null
diff --git a/llvm/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll b/llvm/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
index 0742371dc9b..f83c990451b 100644
--- a/llvm/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
+++ b/llvm/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
@@ -40,8 +40,8 @@
define void @"-[AA BB:optionIndex:delegate:CC:contextInfo:]"(%struct.AA* %self, %struct._message_ref_t* %_cmd, %struct.NSError* %inError, i64 %inOptionIndex, %struct.NSObject* %inDelegate, %struct.objc_selector* %inDidRecoverSelector, i8* %inContextInfo) {
entry:
- %tmp105 = load %struct.NSArray** null, align 8 ; <%struct.NSArray*> [#uses=1]
- %tmp107 = load %struct.NSObject** null, align 8 ; <%struct.NSObject*> [#uses=1]
+ %tmp105 = load %struct.NSArray*, %struct.NSArray** null, align 8 ; <%struct.NSArray*> [#uses=1]
+ %tmp107 = load %struct.NSObject*, %struct.NSObject** null, align 8 ; <%struct.NSObject*> [#uses=1]
call void null( %struct.NSObject* %tmp107, %struct._message_ref_t* @"\01L_OBJC_MESSAGE_REF_228", %struct.NSArray* %tmp105, i8 signext 0 )
%tmp111 = call %struct.NSObject* (%struct.NSObject*, %struct.objc_selector*, ...)* @objc_msgSend( %struct.NSObject* null, %struct.objc_selector* null, i32 0, i8* null ) ; <%struct.NSObject*> [#uses=0]
ret void
diff --git a/llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll
index 3ccc0fe1634..1488034f2eb 100644
--- a/llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-04-16-CoalescerBug.ll
@@ -22,7 +22,7 @@ bb94.us: ; preds = %bb71.us, %bb53.us
store i16 %tmp113.us, i16* null, align 2
br label %bb53.us
bb71.us: ; preds = %bb53.us
- %tmp80.us = load i8* null, align 1 ; <i8> [#uses=1]
+ %tmp80.us = load i8, i8* null, align 1 ; <i8> [#uses=1]
%tmp8081.us = zext i8 %tmp80.us to i32 ; <i32> [#uses=1]
%tmp87.us = mul i32 %tmp8081.us, 0 ; <i32> [#uses=1]
%tmp92.us = add i32 0, %tmp87.us ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
index f244793e7a9..ff704216394 100644
--- a/llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
@@ -33,7 +33,7 @@ bb161.i: ; preds = %bb142.i
bb182.i: ; preds = %bb142.i
ret void
bb3261: ; preds = %bb7834, %bb161.i
- %tmp3263 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp3263 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%tmp3264 = icmp eq i32 %tmp3263, 37 ; <i1> [#uses=1]
br i1 %tmp3264, label %bb3306, label %bb3267
bb3267: ; preds = %bb3261
@@ -42,7 +42,7 @@ bb3306: ; preds = %bb3261
%tmp3310 = invoke %struct.wxStringBase* @_ZN12wxStringBaseaSEPKw( %struct.wxStringBase* null, i32* getelementptr ([5 x i32]* @.str89, i32 0, i32 0) )
to label %bb3314 unwind label %lpad ; <%struct.wxStringBase*> [#uses=0]
bb3314: ; preds = %bb3306
- %tmp3316 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp3316 = load i32, i32* null, align 4 ; <i32> [#uses=1]
switch i32 %tmp3316, label %bb7595 [
i32 0, label %bb7819
i32 37, label %bb7806
diff --git a/llvm/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll b/llvm/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll
index 86bce8e977a..06f7907fec9 100644
--- a/llvm/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll
+++ b/llvm/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll
@@ -6,7 +6,7 @@ target triple = "i386-apple-darwin8"
define i32 @main() nounwind {
entry:
- %tmp122 = load <2 x i64>* null, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp122 = load <2 x i64>, <2 x i64>* null, align 16 ; <<2 x i64>> [#uses=1]
%tmp126 = bitcast <2 x i64> %tmp122 to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp129 = call <8 x i16> @llvm.x86.sse41.pblendw( <8 x i16> zeroinitializer, <8 x i16> %tmp126, i32 2 ) nounwind ; <<8 x i16>> [#uses=0]
ret i32 0
diff --git a/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
index 7c04206de72..06bbd74e8bd 100644
--- a/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
@@ -33,7 +33,7 @@ bb13101: ; preds = %bb13088
bb13107: ; preds = %bb13101, %bb13088
%iftmp.684.0 = phi i32 [ 0, %bb13101 ], [ 65535, %bb13088 ] ; <i32> [#uses=2]
- %tmp13111 = load i64* null, align 8 ; <i64> [#uses=3]
+ %tmp13111 = load i64, i64* null, align 8 ; <i64> [#uses=3]
%tmp13116 = lshr i64 %tmp13111, 16 ; <i64> [#uses=1]
%tmp1311613117 = trunc i64 %tmp13116 to i32 ; <i32> [#uses=1]
%tmp13118 = and i32 %tmp1311613117, 65535 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll b/llvm/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll
index 5ceb5464d2b..0e4ef1c3260 100644
--- a/llvm/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll
+++ b/llvm/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86 -mattr=+sse2
define fastcc void @glgVectorFloatConversion() nounwind {
- %tmp12745 = load <4 x float>* null, align 16 ; <<4 x float>> [#uses=1]
+ %tmp12745 = load <4 x float>, <4 x float>* null, align 16 ; <<4 x float>> [#uses=1]
%tmp12773 = insertelement <4 x float> %tmp12745, float 1.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
%tmp12774 = insertelement <4 x float> %tmp12773, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
%tmp12775 = insertelement <4 x float> %tmp12774, float 1.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll b/llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll
index 1a70b4a0cad..df5ceb0f0d9 100644
--- a/llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll
+++ b/llvm/test/CodeGen/X86/2008-05-12-tailmerge-5.ll
@@ -46,7 +46,7 @@ entry:
store i8 %d, i8* %d_addr
%tmp13 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 0 ; <[3 x i8]*> [#uses=1]
%tmp1314 = bitcast [3 x i8]* %tmp13 to i32* ; <i32*> [#uses=1]
- %tmp15 = load i32* %tmp1314, align 4 ; <i32> [#uses=1]
+ %tmp15 = load i32, i32* %tmp1314, align 4 ; <i32> [#uses=1]
%tmp16 = shl i32 %tmp15, 14 ; <i32> [#uses=1]
%tmp17 = ashr i32 %tmp16, 23 ; <i32> [#uses=1]
%tmp1718 = trunc i32 %tmp17 to i16 ; <i16> [#uses=1]
@@ -57,7 +57,7 @@ entry:
%sextl21 = shl i16 %sextr, 7 ; <i16> [#uses=1]
%sextr22 = ashr i16 %sextl21, 7 ; <i16> [#uses=1]
%sextr2223 = sext i16 %sextr22 to i32 ; <i32> [#uses=1]
- %tmp24 = load i32* %j_addr, align 4 ; <i32> [#uses=1]
+ %tmp24 = load i32, i32* %j_addr, align 4 ; <i32> [#uses=1]
%tmp25 = icmp ne i32 %sextr2223, %tmp24 ; <i1> [#uses=1]
%tmp2526 = zext i1 %tmp25 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp2526, 0 ; <i1> [#uses=1]
@@ -69,8 +69,8 @@ bb: ; preds = %entry
bb27: ; preds = %entry
%tmp28 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 1 ; <i8*> [#uses=1]
- %tmp29 = load i8* %tmp28, align 4 ; <i8> [#uses=1]
- %tmp30 = load i8* %c_addr, align 1 ; <i8> [#uses=1]
+ %tmp29 = load i8, i8* %tmp28, align 4 ; <i8> [#uses=1]
+ %tmp30 = load i8, i8* %c_addr, align 1 ; <i8> [#uses=1]
%tmp31 = icmp ne i8 %tmp29, %tmp30 ; <i1> [#uses=1]
%tmp3132 = zext i1 %tmp31 to i8 ; <i8> [#uses=1]
%toBool33 = icmp ne i8 %tmp3132, 0 ; <i1> [#uses=1]
@@ -82,7 +82,7 @@ bb34: ; preds = %bb27
bb35: ; preds = %bb27
%tmp36 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 2 ; <i16*> [#uses=1]
- %tmp37 = load i16* %tmp36, align 4 ; <i16> [#uses=1]
+ %tmp37 = load i16, i16* %tmp36, align 4 ; <i16> [#uses=1]
%tmp38 = shl i16 %tmp37, 7 ; <i16> [#uses=1]
%tmp39 = ashr i16 %tmp38, 7 ; <i16> [#uses=1]
%sextl40 = shl i16 %tmp39, 7 ; <i16> [#uses=1]
@@ -91,7 +91,7 @@ bb35: ; preds = %bb27
%sextr43 = ashr i16 %sextl42, 7 ; <i16> [#uses=0]
%sextl44 = shl i16 %sextr41, 7 ; <i16> [#uses=1]
%sextr45 = ashr i16 %sextl44, 7 ; <i16> [#uses=1]
- %tmp46 = load i16* %t_addr, align 2 ; <i16> [#uses=1]
+ %tmp46 = load i16, i16* %t_addr, align 2 ; <i16> [#uses=1]
%tmp47 = icmp ne i16 %sextr45, %tmp46 ; <i1> [#uses=1]
%tmp4748 = zext i1 %tmp47 to i8 ; <i8> [#uses=1]
%toBool49 = icmp ne i8 %tmp4748, 0 ; <i1> [#uses=1]
@@ -103,7 +103,7 @@ bb50: ; preds = %bb35
bb51: ; preds = %bb35
%tmp52 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 3 ; <i16*> [#uses=1]
- %tmp53 = load i16* %tmp52, align 4 ; <i16> [#uses=1]
+ %tmp53 = load i16, i16* %tmp52, align 4 ; <i16> [#uses=1]
%tmp54 = shl i16 %tmp53, 7 ; <i16> [#uses=1]
%tmp55 = ashr i16 %tmp54, 7 ; <i16> [#uses=1]
%sextl56 = shl i16 %tmp55, 7 ; <i16> [#uses=1]
@@ -112,7 +112,7 @@ bb51: ; preds = %bb35
%sextr59 = ashr i16 %sextl58, 7 ; <i16> [#uses=0]
%sextl60 = shl i16 %sextr57, 7 ; <i16> [#uses=1]
%sextr61 = ashr i16 %sextl60, 7 ; <i16> [#uses=1]
- %tmp62 = load i16* %u_addr, align 2 ; <i16> [#uses=1]
+ %tmp62 = load i16, i16* %u_addr, align 2 ; <i16> [#uses=1]
%tmp63 = icmp ne i16 %sextr61, %tmp62 ; <i1> [#uses=1]
%tmp6364 = zext i1 %tmp63 to i8 ; <i8> [#uses=1]
%toBool65 = icmp ne i8 %tmp6364, 0 ; <i1> [#uses=1]
@@ -124,8 +124,8 @@ bb66: ; preds = %bb51
bb67: ; preds = %bb51
%tmp68 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 4 ; <i8*> [#uses=1]
- %tmp69 = load i8* %tmp68, align 4 ; <i8> [#uses=1]
- %tmp70 = load i8* %d_addr, align 1 ; <i8> [#uses=1]
+ %tmp69 = load i8, i8* %tmp68, align 4 ; <i8> [#uses=1]
+ %tmp70 = load i8, i8* %d_addr, align 1 ; <i8> [#uses=1]
%tmp71 = icmp ne i8 %tmp69, %tmp70 ; <i1> [#uses=1]
%tmp7172 = zext i1 %tmp71 to i8 ; <i8> [#uses=1]
%toBool73 = icmp ne i8 %tmp7172, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll
index 4b32f57fa47..c6709a86d85 100644
--- a/llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-05-21-CoalescerBug.ll
@@ -74,7 +74,7 @@ entry:
br label %bb497
bb483: ; preds = %bb497
- %tmp496 = load %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
+ %tmp496 = load %struct.tree_node*, %struct.tree_node** null, align 4 ; <%struct.tree_node*> [#uses=1]
br label %bb497
bb497: ; preds = %bb483, %entry
diff --git a/llvm/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll b/llvm/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll
index da56ce7ab58..a91a422f55d 100644
--- a/llvm/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll
+++ b/llvm/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll
@@ -2,7 +2,7 @@
define void @a(<4 x float>* %x) nounwind {
entry:
- %tmp2 = load <4 x float>* %x, align 1
+ %tmp2 = load <4 x float>, <4 x float>* %x, align 1
%inv = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %tmp2)
store <4 x float> %inv, <4 x float>* %x, align 1
ret void
diff --git a/llvm/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll b/llvm/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
index a6234d377df..422d68e7ff4 100644
--- a/llvm/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
+++ b/llvm/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
@@ -13,9 +13,9 @@ define i16 @f(i64 %x) {
%b = bitcast i64 %x to double ; <double> [#uses=1]
store double %b, double* @atomic
store double 0.000000e+00, double* @atomic2
- %l = load i32* @ioport ; <i32> [#uses=1]
+ %l = load i32, i32* @ioport ; <i32> [#uses=1]
%t = trunc i32 %l to i16 ; <i16> [#uses=1]
- %l2 = load i32* @ioport2 ; <i32> [#uses=1]
+ %l2 = load i32, i32* @ioport2 ; <i32> [#uses=1]
%tmp = lshr i32 %l2, 16 ; <i32> [#uses=1]
%t2 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
%f = add i16 %t, %t2 ; <i16> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll b/llvm/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
index 037559edaf5..5a05ec13f35 100644
--- a/llvm/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
+++ b/llvm/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
@@ -12,9 +12,9 @@ define i16 @f(i64 %x, double %y) {
store volatile double 0.000000e+00, double* @atomic2 ; one processor operation only
%b2 = bitcast double %y to i64 ; <i64> [#uses=1]
store volatile i64 %b2, i64* @anything ; may transform to store of double
- %l = load volatile i32* @ioport ; must not narrow
+ %l = load volatile i32, i32* @ioport ; must not narrow
%t = trunc i32 %l to i16 ; <i16> [#uses=1]
- %l2 = load volatile i32* @ioport ; must not narrow
+ %l2 = load volatile i32, i32* @ioport ; must not narrow
%tmp = lshr i32 %l2, 16 ; <i32> [#uses=1]
%t2 = trunc i32 %tmp to i16 ; <i16> [#uses=1]
%f = add i16 %t, %t2 ; <i16> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll b/llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll
index 4d4819ab05d..cdd1b0bfe60 100644
--- a/llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll
+++ b/llvm/test/CodeGen/X86/2008-06-16-SubregsBug.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=i386-apple-darwin | grep mov | count 4
define i16 @test(i16* %tmp179) nounwind {
- %tmp180 = load i16* %tmp179, align 2 ; <i16> [#uses=2]
+ %tmp180 = load i16, i16* %tmp179, align 2 ; <i16> [#uses=2]
%tmp184 = and i16 %tmp180, -1024 ; <i16> [#uses=1]
%tmp186 = icmp eq i16 %tmp184, -32768 ; <i1> [#uses=1]
br i1 %tmp186, label %bb189, label %bb288
diff --git a/llvm/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll b/llvm/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll
index 23d91eb2b79..c92768c91d7 100644
--- a/llvm/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll
+++ b/llvm/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll
@@ -63,7 +63,7 @@ entry:
br i1 false, label %bb17.preheader, label %bb30
bb17.preheader: ; preds = %entry
- load i32* null, align 4 ; <i32>:0 [#uses=0]
+ load i32, i32* null, align 4 ; <i32>:0 [#uses=0]
br label %bb16
bb16: ; preds = %bb16, %bb17.preheader
diff --git a/llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll b/llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll
index cd86ee18894..45ea69943e8 100644
--- a/llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll
+++ b/llvm/test/CodeGen/X86/2008-07-19-movups-spills.ll
@@ -75,38 +75,38 @@ define void @test1() {
; CHECK: movups
; CHECK: movups
; CHECK-NOT: movups
- load <4 x float>* @0, align 1 ; <<4 x float>>:1 [#uses=2]
- load <4 x float>* @1, align 1 ; <<4 x float>>:2 [#uses=3]
- load <4 x float>* @2, align 1 ; <<4 x float>>:3 [#uses=4]
- load <4 x float>* @3, align 1 ; <<4 x float>>:4 [#uses=5]
- load <4 x float>* @4, align 1 ; <<4 x float>>:5 [#uses=6]
- load <4 x float>* @5, align 1 ; <<4 x float>>:6 [#uses=7]
- load <4 x float>* @6, align 1 ; <<4 x float>>:7 [#uses=8]
- load <4 x float>* @7, align 1 ; <<4 x float>>:8 [#uses=9]
- load <4 x float>* @8, align 1 ; <<4 x float>>:9 [#uses=10]
- load <4 x float>* @9, align 1 ; <<4 x float>>:10 [#uses=11]
- load <4 x float>* @10, align 1 ; <<4 x float>>:11 [#uses=12]
- load <4 x float>* @11, align 1 ; <<4 x float>>:12 [#uses=13]
- load <4 x float>* @12, align 1 ; <<4 x float>>:13 [#uses=14]
- load <4 x float>* @13, align 1 ; <<4 x float>>:14 [#uses=15]
- load <4 x float>* @14, align 1 ; <<4 x float>>:15 [#uses=16]
- load <4 x float>* @15, align 1 ; <<4 x float>>:16 [#uses=17]
- load <4 x float>* @16, align 1 ; <<4 x float>>:17 [#uses=18]
- load <4 x float>* @17, align 1 ; <<4 x float>>:18 [#uses=19]
- load <4 x float>* @18, align 1 ; <<4 x float>>:19 [#uses=20]
- load <4 x float>* @19, align 1 ; <<4 x float>>:20 [#uses=21]
- load <4 x float>* @20, align 1 ; <<4 x float>>:21 [#uses=22]
- load <4 x float>* @21, align 1 ; <<4 x float>>:22 [#uses=23]
- load <4 x float>* @22, align 1 ; <<4 x float>>:23 [#uses=24]
- load <4 x float>* @23, align 1 ; <<4 x float>>:24 [#uses=25]
- load <4 x float>* @24, align 1 ; <<4 x float>>:25 [#uses=26]
- load <4 x float>* @25, align 1 ; <<4 x float>>:26 [#uses=27]
- load <4 x float>* @26, align 1 ; <<4 x float>>:27 [#uses=28]
- load <4 x float>* @27, align 1 ; <<4 x float>>:28 [#uses=29]
- load <4 x float>* @28, align 1 ; <<4 x float>>:29 [#uses=30]
- load <4 x float>* @29, align 1 ; <<4 x float>>:30 [#uses=31]
- load <4 x float>* @30, align 1 ; <<4 x float>>:31 [#uses=32]
- load <4 x float>* @31, align 1 ; <<4 x float>>:32 [#uses=33]
+ load <4 x float>, <4 x float>* @0, align 1 ; <<4 x float>>:1 [#uses=2]
+ load <4 x float>, <4 x float>* @1, align 1 ; <<4 x float>>:2 [#uses=3]
+ load <4 x float>, <4 x float>* @2, align 1 ; <<4 x float>>:3 [#uses=4]
+ load <4 x float>, <4 x float>* @3, align 1 ; <<4 x float>>:4 [#uses=5]
+ load <4 x float>, <4 x float>* @4, align 1 ; <<4 x float>>:5 [#uses=6]
+ load <4 x float>, <4 x float>* @5, align 1 ; <<4 x float>>:6 [#uses=7]
+ load <4 x float>, <4 x float>* @6, align 1 ; <<4 x float>>:7 [#uses=8]
+ load <4 x float>, <4 x float>* @7, align 1 ; <<4 x float>>:8 [#uses=9]
+ load <4 x float>, <4 x float>* @8, align 1 ; <<4 x float>>:9 [#uses=10]
+ load <4 x float>, <4 x float>* @9, align 1 ; <<4 x float>>:10 [#uses=11]
+ load <4 x float>, <4 x float>* @10, align 1 ; <<4 x float>>:11 [#uses=12]
+ load <4 x float>, <4 x float>* @11, align 1 ; <<4 x float>>:12 [#uses=13]
+ load <4 x float>, <4 x float>* @12, align 1 ; <<4 x float>>:13 [#uses=14]
+ load <4 x float>, <4 x float>* @13, align 1 ; <<4 x float>>:14 [#uses=15]
+ load <4 x float>, <4 x float>* @14, align 1 ; <<4 x float>>:15 [#uses=16]
+ load <4 x float>, <4 x float>* @15, align 1 ; <<4 x float>>:16 [#uses=17]
+ load <4 x float>, <4 x float>* @16, align 1 ; <<4 x float>>:17 [#uses=18]
+ load <4 x float>, <4 x float>* @17, align 1 ; <<4 x float>>:18 [#uses=19]
+ load <4 x float>, <4 x float>* @18, align 1 ; <<4 x float>>:19 [#uses=20]
+ load <4 x float>, <4 x float>* @19, align 1 ; <<4 x float>>:20 [#uses=21]
+ load <4 x float>, <4 x float>* @20, align 1 ; <<4 x float>>:21 [#uses=22]
+ load <4 x float>, <4 x float>* @21, align 1 ; <<4 x float>>:22 [#uses=23]
+ load <4 x float>, <4 x float>* @22, align 1 ; <<4 x float>>:23 [#uses=24]
+ load <4 x float>, <4 x float>* @23, align 1 ; <<4 x float>>:24 [#uses=25]
+ load <4 x float>, <4 x float>* @24, align 1 ; <<4 x float>>:25 [#uses=26]
+ load <4 x float>, <4 x float>* @25, align 1 ; <<4 x float>>:26 [#uses=27]
+ load <4 x float>, <4 x float>* @26, align 1 ; <<4 x float>>:27 [#uses=28]
+ load <4 x float>, <4 x float>* @27, align 1 ; <<4 x float>>:28 [#uses=29]
+ load <4 x float>, <4 x float>* @28, align 1 ; <<4 x float>>:29 [#uses=30]
+ load <4 x float>, <4 x float>* @29, align 1 ; <<4 x float>>:30 [#uses=31]
+ load <4 x float>, <4 x float>* @30, align 1 ; <<4 x float>>:31 [#uses=32]
+ load <4 x float>, <4 x float>* @31, align 1 ; <<4 x float>>:32 [#uses=33]
fmul <4 x float> %1, %1 ; <<4 x float>>:33 [#uses=1]
fmul <4 x float> %33, %2 ; <<4 x float>>:34 [#uses=1]
fmul <4 x float> %34, %3 ; <<4 x float>>:35 [#uses=1]
@@ -708,38 +708,38 @@ define void @test2() "no-realign-stack" {
; CHECK: movups
; CHECK: movups
; CHECK-NOT: movups
- load <4 x float>* @0, align 1
- load <4 x float>* @1, align 1
- load <4 x float>* @2, align 1
- load <4 x float>* @3, align 1
- load <4 x float>* @4, align 1
- load <4 x float>* @5, align 1
- load <4 x float>* @6, align 1
- load <4 x float>* @7, align 1
- load <4 x float>* @8, align 1
- load <4 x float>* @9, align 1
- load <4 x float>* @10, align 1
- load <4 x float>* @11, align 1
- load <4 x float>* @12, align 1
- load <4 x float>* @13, align 1
- load <4 x float>* @14, align 1
- load <4 x float>* @15, align 1
- load <4 x float>* @16, align 1
- load <4 x float>* @17, align 1
- load <4 x float>* @18, align 1
- load <4 x float>* @19, align 1
- load <4 x float>* @20, align 1
- load <4 x float>* @21, align 1
- load <4 x float>* @22, align 1
- load <4 x float>* @23, align 1
- load <4 x float>* @24, align 1
- load <4 x float>* @25, align 1
- load <4 x float>* @26, align 1
- load <4 x float>* @27, align 1
- load <4 x float>* @28, align 1
- load <4 x float>* @29, align 1
- load <4 x float>* @30, align 1
- load <4 x float>* @31, align 1
+ load <4 x float>, <4 x float>* @0, align 1
+ load <4 x float>, <4 x float>* @1, align 1
+ load <4 x float>, <4 x float>* @2, align 1
+ load <4 x float>, <4 x float>* @3, align 1
+ load <4 x float>, <4 x float>* @4, align 1
+ load <4 x float>, <4 x float>* @5, align 1
+ load <4 x float>, <4 x float>* @6, align 1
+ load <4 x float>, <4 x float>* @7, align 1
+ load <4 x float>, <4 x float>* @8, align 1
+ load <4 x float>, <4 x float>* @9, align 1
+ load <4 x float>, <4 x float>* @10, align 1
+ load <4 x float>, <4 x float>* @11, align 1
+ load <4 x float>, <4 x float>* @12, align 1
+ load <4 x float>, <4 x float>* @13, align 1
+ load <4 x float>, <4 x float>* @14, align 1
+ load <4 x float>, <4 x float>* @15, align 1
+ load <4 x float>, <4 x float>* @16, align 1
+ load <4 x float>, <4 x float>* @17, align 1
+ load <4 x float>, <4 x float>* @18, align 1
+ load <4 x float>, <4 x float>* @19, align 1
+ load <4 x float>, <4 x float>* @20, align 1
+ load <4 x float>, <4 x float>* @21, align 1
+ load <4 x float>, <4 x float>* @22, align 1
+ load <4 x float>, <4 x float>* @23, align 1
+ load <4 x float>, <4 x float>* @24, align 1
+ load <4 x float>, <4 x float>* @25, align 1
+ load <4 x float>, <4 x float>* @26, align 1
+ load <4 x float>, <4 x float>* @27, align 1
+ load <4 x float>, <4 x float>* @28, align 1
+ load <4 x float>, <4 x float>* @29, align 1
+ load <4 x float>, <4 x float>* @30, align 1
+ load <4 x float>, <4 x float>* @31, align 1
fmul <4 x float> %1, %1
fmul <4 x float> %33, %2
fmul <4 x float> %34, %3
diff --git a/llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll b/llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll
index 0f6714579bc..35bb5f05428 100644
--- a/llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll
+++ b/llvm/test/CodeGen/X86/2008-07-22-CombinerCrash.ll
@@ -7,7 +7,7 @@ external global <4 x i16> ; <<4 x i16>*>:1 [#uses=1]
declare void @abort()
define void @t() nounwind {
- load i16* @0 ; <i16>:1 [#uses=1]
+ load i16, i16* @0 ; <i16>:1 [#uses=1]
zext i16 %1 to i64 ; <i64>:2 [#uses=1]
bitcast i64 %2 to <4 x i16> ; <<4 x i16>>:3 [#uses=1]
shufflevector <4 x i16> %3, <4 x i16> undef, <4 x i32> zeroinitializer ; <<4 x i16>>:4 [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll b/llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll
index 4428035cc82..08172fa4bae 100644
--- a/llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll
+++ b/llvm/test/CodeGen/X86/2008-08-06-RewriterBug.ll
@@ -4,14 +4,14 @@
@data = external global [400 x i64] ; <[400 x i64]*> [#uses=5]
define void @foo(double* noalias, double* noalias) {
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 200), align 4 ; <i64>:3 [#uses=1]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 199), align 4 ; <i64>:4 [#uses=1]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 198), align 4 ; <i64>:5 [#uses=2]
- load i64* getelementptr ([400 x i64]* @data, i32 0, i64 197), align 4 ; <i64>:6 [#uses=1]
+ load i64, i64* getelementptr ([400 x i64]* @data, i32 0, i64 200), align 4 ; <i64>:3 [#uses=1]
+ load i64, i64* getelementptr ([400 x i64]* @data, i32 0, i64 199), align 4 ; <i64>:4 [#uses=1]
+ load i64, i64* getelementptr ([400 x i64]* @data, i32 0, i64 198), align 4 ; <i64>:5 [#uses=2]
+ load i64, i64* getelementptr ([400 x i64]* @data, i32 0, i64 197), align 4 ; <i64>:6 [#uses=1]
br i1 false, label %28, label %7
; <label>:7 ; preds = %2
- load double** getelementptr (double** bitcast ([400 x i64]* @data to double**), i64 180), align 8 ; <double*>:8 [#uses=1]
+ load double*, double** getelementptr (double** bitcast ([400 x i64]* @data to double**), i64 180), align 8 ; <double*>:8 [#uses=1]
bitcast double* %8 to double* ; <double*>:9 [#uses=1]
ptrtoint double* %9 to i64 ; <i64>:10 [#uses=1]
mul i64 %4, %3 ; <i64>:11 [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll b/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
index 51064f1d217..d939207d196 100644
--- a/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
+++ b/llvm/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
@@ -29,7 +29,7 @@ declare void @llvm.eh.unwind.init()
; CHECK: _Unwind_Resume_or_Rethrow
define i32 @_Unwind_Resume_or_Rethrow() nounwind uwtable ssp {
entry:
- %0 = load i32* @b, align 4
+ %0 = load i32, i32* @b, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
diff --git a/llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll b/llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll
index b3312d9464d..c80fbddd6aa 100644
--- a/llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll
+++ b/llvm/test/CodeGen/X86/2008-09-09-LinearScanBug.ll
@@ -5,7 +5,7 @@
define i32 @func_125(i32 %p_126, i32 %p_128, i32 %p_129) nounwind {
entry:
- %tmp2.i = load i32* @g_3 ; <i32> [#uses=2]
+ %tmp2.i = load i32, i32* @g_3 ; <i32> [#uses=2]
%conv = trunc i32 %tmp2.i to i16 ; <i16> [#uses=3]
br label %forcond1.preheader.i.i7
diff --git a/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll
index 108f24307ea..635194f7624 100644
--- a/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug.ll
@@ -6,7 +6,7 @@
define i32 @func_3(i32 %p_5) nounwind {
entry:
%0 = srem i32 1, 0 ; <i32> [#uses=2]
- %1 = load i16* @g_15, align 2 ; <i16> [#uses=1]
+ %1 = load i16, i16* @g_15, align 2 ; <i16> [#uses=1]
%2 = zext i16 %1 to i32 ; <i32> [#uses=1]
%3 = and i32 %2, 1 ; <i32> [#uses=1]
%4 = tail call i32 (...)* @rshift_u_s( i32 1 ) nounwind ; <i32> [#uses=1]
@@ -14,7 +14,7 @@ entry:
%6 = zext i1 %5 to i32 ; <i32> [#uses=1]
%7 = icmp sge i32 %3, %6 ; <i1> [#uses=1]
%8 = zext i1 %7 to i32 ; <i32> [#uses=1]
- %9 = load i16* @g_15, align 2 ; <i16> [#uses=1]
+ %9 = load i16, i16* @g_15, align 2 ; <i16> [#uses=1]
%10 = icmp eq i16 %9, 0 ; <i1> [#uses=1]
%11 = zext i1 %10 to i32 ; <i32> [#uses=1]
%12 = tail call i32 (...)* @func_20( i32 1 ) nounwind ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll b/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll
index 59d1c7f77ab..92eb1c8497d 100644
--- a/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll
+++ b/llvm/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll
@@ -18,11 +18,11 @@ entry:
; SOURCE-SCHED: subl
; SOURCE-SCHED: testb
; SOURCE-SCHED: jne
- %0 = load i32* @g_5, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @g_5, align 4 ; <i32> [#uses=1]
%1 = ashr i32 %0, 1 ; <i32> [#uses=1]
%2 = icmp sgt i32 %1, 1 ; <i1> [#uses=1]
%3 = zext i1 %2 to i32 ; <i32> [#uses=1]
- %4 = load i32* @g_73, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* @g_73, align 4 ; <i32> [#uses=1]
%5 = zext i16 %p_46 to i64 ; <i64> [#uses=1]
%6 = sub i64 0, %5 ; <i64> [#uses=1]
%7 = trunc i64 %6 to i8 ; <i8> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll b/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
index 4b2774b64b7..3edd72bdba9 100644
--- a/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
+++ b/llvm/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
@@ -19,7 +19,7 @@ target triple = "i386-apple-darwin8"
define i32 @aci(i32* %pw) nounwind {
entry:
- %0 = load i32* @x, align 4
+ %0 = load i32, i32* @x, align 4
%asmtmp = tail call { i32, i32 } asm "movl $0, %eax\0A\090:\0A\09test %eax, %eax\0A\09je 1f\0A\09movl %eax, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{ax},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind
%asmtmp2 = tail call { i32, i32 } asm "movl $0, %edx\0A\090:\0A\09test %edx, %edx\0A\09je 1f\0A\09movl %edx, $2\0A\09incl $2\0A\09lock\0A\09cmpxchgl $2, $0\0A\09jne 0b\0A\091:", "=*m,=&{dx},=&r,*m,~{dirflag},~{fpsr},~{flags},~{memory},~{cc}"(i32* %pw, i32* %pw) nounwind
%asmresult2 = extractvalue { i32, i32 } %asmtmp, 0
diff --git a/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll b/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
index 015cbb51e1d..0058d979a2f 100644
--- a/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
+++ b/llvm/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
@@ -35,9 +35,9 @@ entry:
%0 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 0 ; <i32*> [#uses=2]
%1 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 1 ; <i32*> [#uses=2]
%2 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 2 ; <i8**> [#uses=2]
- %3 = load i32* %0, align 4 ; <i32> [#uses=1]
- %4 = load i32* %1, align 4 ; <i32> [#uses=1]
- %5 = load i8* %state, align 1 ; <i8> [#uses=1]
+ %3 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* %1, align 4 ; <i32> [#uses=1]
+ %5 = load i8, i8* %state, align 1 ; <i8> [#uses=1]
%asmtmp = tail call { i32, i32, i32, i32 } asm sideeffect "#1st=$0 $1 2nd=$1 $2 3rd=$2 $4 5th=$4 $3=4th 1$0 1%eXx 5$4 5%eXx 6th=$5", "=&r,=r,=r,=*m,=&q,=*imr,1,2,*m,5,~{dirflag},~{fpsr},~{flags},~{cx}"(i8** %2, i8* %state, i32 %3, i32 %4, i8** %2, i8 %5) nounwind ; <{ i32, i32, i32, i32 }> [#uses=3]
%asmresult = extractvalue { i32, i32, i32, i32 } %asmtmp, 0 ; <i32> [#uses=1]
%asmresult1 = extractvalue { i32, i32, i32, i32 } %asmtmp, 1 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll b/llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll
index a8f2912a70a..83a1fac7f3a 100644
--- a/llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll
+++ b/llvm/test/CodeGen/X86/2008-09-19-RegAllocBug.ll
@@ -5,7 +5,7 @@
define i32 @func_4() nounwind {
entry:
- %0 = load i32* @g_3, align 4 ; <i32> [#uses=2]
+ %0 = load i32, i32* @g_3, align 4 ; <i32> [#uses=2]
%1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
%2 = sub i8 1, %1 ; <i8> [#uses=1]
%3 = sext i8 %2 to i32 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll b/llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll
index c7b19122622..38a366472d7 100644
--- a/llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll
+++ b/llvm/test/CodeGen/X86/2008-09-29-ReMatBug.ll
@@ -13,13 +13,13 @@ internal constant %struct.__builtin_CFString { i32* getelementptr ([0 x i32]* @_
define %struct.NSString* @"-[XCStringList stringRepresentation]"(%struct.XCStringList* %self, %struct..0objc_selector* %_cmd) nounwind {
entry:
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%1 = and i32 %0, 16777215 ; <i32> [#uses=1]
%2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %bb44, label %bb4
bb4: ; preds = %entry
- %3 = load %struct._XCStringListNode** null, align 4 ; <%struct._XCStringListNode*> [#uses=2]
+ %3 = load %struct._XCStringListNode*, %struct._XCStringListNode** null, align 4 ; <%struct._XCStringListNode*> [#uses=2]
%4 = icmp eq %struct._XCStringListNode* %3, null ; <i1> [#uses=1]
%5 = bitcast %struct._XCStringListNode* %3 to i32* ; <i32*> [#uses=1]
br label %bb37.outer
@@ -48,7 +48,7 @@ bb35.outer: ; preds = %bb34, %bb25.split
br label %bb35
bb35: ; preds = %bb35, %bb35.outer
- %9 = load i8* null, align 1 ; <i8> [#uses=1]
+ %9 = load i8, i8* null, align 1 ; <i8> [#uses=1]
switch i8 %9, label %bb35 [
i8 0, label %bb37.outer
i8 32, label %bb34
@@ -63,7 +63,7 @@ bb37.outer: ; preds = %bb35, %bb4
br i1 %4, label %bb39.split, label %bb37
bb37: ; preds = %bb37.outer, %bb19
- %10 = load i32* %5, align 4 ; <i32> [#uses=1]
+ %10 = load i32, i32* %5, align 4 ; <i32> [#uses=1]
br i1 false, label %bb6, label %bb19
bb39.split: ; preds = %bb37.outer
diff --git a/llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll b/llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll
index f35245bb2af..6ee8cf2f5e3 100644
--- a/llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll
+++ b/llvm/test/CodeGen/X86/2008-09-29-VolatileBug.ll
@@ -6,7 +6,7 @@
define i32 @main() nounwind {
entry:
- %0 = load volatile i32* @g_407, align 4 ; <i32> [#uses=1]
+ %0 = load volatile i32, i32* @g_407, align 4 ; <i32> [#uses=1]
%1 = trunc i32 %0 to i8 ; <i8> [#uses=1]
%2 = tail call i32 @func_45(i8 zeroext %1) nounwind ; <i32> [#uses=0]
ret i32 0
diff --git a/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll b/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll
index bd48105f129..34c9857b00e 100644
--- a/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll
+++ b/llvm/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll
@@ -10,7 +10,7 @@ declare x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 %f)
define i32 @main() {
entry_nan.main:
- %tmp = load x86_fp80* @_D3nan4rvale ; <x86_fp80> [#uses=1]
+ %tmp = load x86_fp80, x86_fp80* @_D3nan4rvale ; <x86_fp80> [#uses=1]
call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 %tmp)
call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 0xK7FFF8001234000000000)
call x86_stdcallcc void @_D3nan5printFeZv(x86_fp80 0xK7FFFC001234000000400)
diff --git a/llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll b/llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll
index bc5761288c9..26e802ac05f 100644
--- a/llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll
+++ b/llvm/test/CodeGen/X86/2008-10-07-SSEISelBug.ll
@@ -6,17 +6,17 @@ entry:
%w.addr = alloca float ; <float*> [#uses=2]
%.compoundliteral = alloca <4 x float> ; <<4 x float>*> [#uses=2]
store float %w, float* %w.addr
- %tmp = load float* %w.addr ; <float> [#uses=1]
+ %tmp = load float, float* %w.addr ; <float> [#uses=1]
%0 = insertelement <4 x float> undef, float %tmp, i32 0 ; <<4 x float>> [#uses=1]
%1 = insertelement <4 x float> %0, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
%2 = insertelement <4 x float> %1, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
%3 = insertelement <4 x float> %2, float 0.000000e+00, i32 3 ; <<4 x float>> [#uses=1]
store <4 x float> %3, <4 x float>* %.compoundliteral
- %tmp1 = load <4 x float>* %.compoundliteral ; <<4 x float>> [#uses=1]
+ %tmp1 = load <4 x float>, <4 x float>* %.compoundliteral ; <<4 x float>> [#uses=1]
store <4 x float> %tmp1, <4 x float>* %retval
br label %return
return: ; preds = %entry
- %4 = load <4 x float>* %retval ; <<4 x float>> [#uses=1]
+ %4 = load <4 x float>, <4 x float>* %retval ; <<4 x float>> [#uses=1]
ret <4 x float> %4
}
diff --git a/llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll b/llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll
index efc6125cfc2..a859bc6d7a9 100644
--- a/llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll
+++ b/llvm/test/CodeGen/X86/2008-10-11-CallCrash.ll
@@ -7,7 +7,7 @@ target triple = "i386-apple-darwin7"
define i32 @func_45(i64 %p_46, i32 %p_48) nounwind {
entry:
%0 = tail call i32 (...)* @lshift_s_u(i64 %p_46, i64 0) nounwind ; <i32> [#uses=0]
- %1 = load i32* @g_385, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* @g_385, align 4 ; <i32> [#uses=1]
%2 = shl i32 %1, 1 ; <i32> [#uses=1]
%3 = and i32 %2, 32 ; <i32> [#uses=1]
%4 = tail call i32 (...)* @func_87(i32 undef, i32 %p_48, i32 1) nounwind ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll b/llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll
index de4c1e70b8d..ac6fa0dc9b2 100644
--- a/llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll
+++ b/llvm/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86 -mattr=+sse2
; PR2762
define void @foo(<4 x i32>* %p, <4 x double>* %q) {
- %n = load <4 x i32>* %p
+ %n = load <4 x i32>, <4 x i32>* %p
%z = sitofp <4 x i32> %n to <4 x double>
store <4 x double> %z, <4 x double>* %q
ret void
diff --git a/llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll
index 0310a5dcb56..b1dcd031265 100644
--- a/llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-10-27-CoalescerBug.ll
@@ -20,7 +20,7 @@ bb: ; preds = %bb, %entry
; CHECK: movsd %xmm0, 16(%esp)
; CHECK: %bb3
bb3: ; preds = %bb30.loopexit, %bb25, %bb3
- %2 = load i32* null, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%3 = mul i32 %2, 0 ; <i32> [#uses=1]
%4 = icmp slt i32 0, %3 ; <i1> [#uses=1]
br i1 %4, label %bb18, label %bb3
diff --git a/llvm/test/CodeGen/X86/2008-11-06-testb.ll b/llvm/test/CodeGen/X86/2008-11-06-testb.ll
index d510e9f6f38..4ee4b4ad069 100644
--- a/llvm/test/CodeGen/X86/2008-11-06-testb.ll
+++ b/llvm/test/CodeGen/X86/2008-11-06-testb.ll
@@ -12,7 +12,7 @@ entry:
%0 = getelementptr %struct.x, %struct.x* %p, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 55, i8* %0, align 1
%1 = bitcast %struct.x* %p to i32* ; <i32*> [#uses=1]
- %2 = load i32* %1, align 1 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 1 ; <i32> [#uses=1]
%3 = and i32 %2, 512 ; <i32> [#uses=1]
%4 = icmp eq i32 %3, 0 ; <i1> [#uses=1]
br i1 %4, label %bb5, label %bb
diff --git a/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll b/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
index f5880b65759..840b8ba0f8c 100644
--- a/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
+++ b/llvm/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
@@ -16,7 +16,7 @@ bb: ; preds = %bb1, %bb1
bb1: ; preds = %bb, %entry
%P.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%P.0 = getelementptr i8, i8* %Q, i32 %P.0.rec ; <i8*> [#uses=2]
- %0 = load i8* %P.0, align 1 ; <i8> [#uses=1]
+ %0 = load i8, i8* %P.0, align 1 ; <i8> [#uses=1]
switch i8 %0, label %bb3 [
i8 12, label %bb
i8 42, label %bb
diff --git a/llvm/test/CodeGen/X86/2008-12-02-IllegalResultType.ll b/llvm/test/CodeGen/X86/2008-12-02-IllegalResultType.ll
index 4b72cb919ff..c828879e6b9 100644
--- a/llvm/test/CodeGen/X86/2008-12-02-IllegalResultType.ll
+++ b/llvm/test/CodeGen/X86/2008-12-02-IllegalResultType.ll
@@ -7,7 +7,7 @@ target triple = "i386-pc-linux-gnu"
define i32 @func_73(i32 %p_74) nounwind {
entry:
- %0 = load i32* @g_7, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @g_7, align 4 ; <i32> [#uses=1]
%1 = or i8 0, 118 ; <i8> [#uses=1]
%2 = zext i8 %1 to i64 ; <i64> [#uses=1]
%3 = icmp ne i32 %0, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll b/llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll
index 99bef6ce3fc..ac6d0a983e4 100644
--- a/llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll
+++ b/llvm/test/CodeGen/X86/2009-01-16-SchedulerBug.ll
@@ -10,12 +10,12 @@ declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32) nounwind
define fastcc %XXV* @bar(%CF* %call_frame, %XXV** %exception) nounwind {
prologue:
- %param_x = load %XXV** null ; <%XXV*> [#uses=1]
+ %param_x = load %XXV*, %XXV** null ; <%XXV*> [#uses=1]
%unique_1.i = ptrtoint %XXV* %param_x to i1 ; <i1> [#uses=1]
br i1 %unique_1.i, label %NextVerify42, label %FailedVerify
NextVerify42: ; preds = %prologue
- %param_y = load %XXV** null ; <%XXV*> [#uses=1]
+ %param_y = load %XXV*, %XXV** null ; <%XXV*> [#uses=1]
%unique_1.i58 = ptrtoint %XXV* %param_y to i1 ; <i1> [#uses=1]
br i1 %unique_1.i58, label %function_setup.cont, label %FailedVerify
diff --git a/llvm/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll b/llvm/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll
index f895336491e..e4f78f876d8 100644
--- a/llvm/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll
+++ b/llvm/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll
@@ -25,7 +25,7 @@ bb4.i.i70: ; preds = %bb4.i.i70, %bb.i51
br i1 false, label %_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73, label %bb4.i.i70
_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73: ; preds = %bb4.i.i70
- %0 = load i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 add (i32 ashr (i32 sub (i32 ptrtoint (i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 4) to i32), i32 ptrtoint ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE to i32)), i32 1), i32 1)), align 4 ; <i16> [#uses=0]
+ %0 = load i16, i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 add (i32 ashr (i32 sub (i32 ptrtoint (i16* getelementptr ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE, i32 0, i32 4) to i32), i32 ptrtoint ([7 x i16]* @_ZN11xercesc_2_5L17gIdeographicCharsE to i32)), i32 1), i32 1)), align 4 ; <i16> [#uses=0]
br label %bb4.i5.i141
bb4.i5.i141: ; preds = %bb4.i5.i141, %_ZN11xercesc_2_59XMLString9stringLenEPKt.exit.i73
diff --git a/llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll b/llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll
index b478f27a95b..90d14e7b707 100644
--- a/llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll
+++ b/llvm/test/CodeGen/X86/2009-01-31-BigShift2.ll
@@ -2,7 +2,7 @@
; PR3449
define void @test(<8 x double>* %P, i64* %Q) nounwind {
- %A = load <8 x double>* %P ; <<8 x double>> [#uses=1]
+ %A = load <8 x double>, <8 x double>* %P ; <<8 x double>> [#uses=1]
%B = bitcast <8 x double> %A to i512 ; <i512> [#uses=1]
%C = lshr i512 %B, 448 ; <i512> [#uses=1]
%D = trunc i512 %C to i64 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll b/llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll
index c4042e6c9c6..e91208d5b3a 100644
--- a/llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll
+++ b/llvm/test/CodeGen/X86/2009-02-01-LargeMask.ll
@@ -19,7 +19,7 @@ entry:
bb.i49.i72: ; preds = %bb.i49.i72, %entry
%UNP.i1482.0 = phi i288 [ %.ins659, %bb.i49.i72 ], [ undef, %entry ] ; <i288> [#uses=1]
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%1 = xor i32 %0, 17834 ; <i32> [#uses=1]
%2 = zext i32 %1 to i288 ; <i288> [#uses=1]
%3 = shl i288 %2, 160 ; <i288> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll b/llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll
index e75af13a600..592a7e33b19 100644
--- a/llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll
+++ b/llvm/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll
@@ -13,7 +13,7 @@ bb: ; preds = %entry
unreachable
bb1: ; preds = %entry
- %0 = load i32* @g_3, align 4 ; <i32> [#uses=2]
+ %0 = load i32, i32* @g_3, align 4 ; <i32> [#uses=2]
%1 = sext i32 %0 to i64 ; <i64> [#uses=1]
%2 = or i64 %1, %p_66 ; <i64> [#uses=1]
%3 = shl i64 %2, 0 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll b/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
index b7a8a1cb7e7..19c2dfd4ea8 100644
--- a/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
+++ b/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
@@ -6,7 +6,7 @@ target triple = "i386-apple-darwin9.6"
define i32 @alac_decode_frame() nounwind {
entry:
- %tmp2 = load i8** null ; <i8*> [#uses=2]
+ %tmp2 = load i8*, i8** null ; <i8*> [#uses=2]
%tmp34 = getelementptr i8, i8* %tmp2, i32 4 ; <i8*> [#uses=2]
%tmp5.i424 = bitcast i8* %tmp34 to i8** ; <i8**> [#uses=2]
%tmp15.i = getelementptr i8, i8* %tmp2, i32 12 ; <i8*> [#uses=1]
@@ -17,9 +17,9 @@ if.then43: ; preds = %entry
ret i32 0
if.end47: ; preds = %entry
- %tmp5.i590 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
+ %tmp5.i590 = load i8*, i8** %tmp5.i424 ; <i8*> [#uses=0]
store i32 19, i32* %0
- %tmp6.i569 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
+ %tmp6.i569 = load i8*, i8** %tmp5.i424 ; <i8*> [#uses=0]
%1 = call i32 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 0) nounwind ; <i32> [#uses=0]
br i1 false, label %bb.nph, label %if.then63
diff --git a/llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll b/llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
index 602febf7ccf..fca03a2a925 100644
--- a/llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
+++ b/llvm/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
@@ -29,41 +29,41 @@ entry:
call void @llvm.dbg.declare(metadata [0 x i8]** %str.0, metadata !8, metadata !{!"0x102"}), !dbg !7
%4 = call i8* @llvm.stacksave(), !dbg !7 ; <i8*> [#uses=1]
store i8* %4, i8** %saved_stack.1, align 8, !dbg !7
- %5 = load i8** %s1_addr, align 8, !dbg !13 ; <i8*> [#uses=1]
+ %5 = load i8*, i8** %s1_addr, align 8, !dbg !13 ; <i8*> [#uses=1]
%6 = call i64 @strlen(i8* %5) nounwind readonly, !dbg !13 ; <i64> [#uses=1]
%7 = add i64 %6, 1, !dbg !13 ; <i64> [#uses=1]
store i64 %7, i64* %3, align 8, !dbg !13
- %8 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %8 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
%9 = sub nsw i64 %8, 1, !dbg !13 ; <i64> [#uses=0]
- %10 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %10 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
%11 = mul i64 %10, 8, !dbg !13 ; <i64> [#uses=0]
- %12 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %12 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
store i64 %12, i64* %2, align 8, !dbg !13
- %13 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %13 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
%14 = mul i64 %13, 8, !dbg !13 ; <i64> [#uses=0]
- %15 = load i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
+ %15 = load i64, i64* %3, align 8, !dbg !13 ; <i64> [#uses=1]
store i64 %15, i64* %1, align 8, !dbg !13
- %16 = load i64* %1, align 8, !dbg !13 ; <i64> [#uses=1]
+ %16 = load i64, i64* %1, align 8, !dbg !13 ; <i64> [#uses=1]
%17 = trunc i64 %16 to i32, !dbg !13 ; <i32> [#uses=1]
%18 = alloca i8, i32 %17, !dbg !13 ; <i8*> [#uses=1]
%19 = bitcast i8* %18 to [0 x i8]*, !dbg !13 ; <[0 x i8]*> [#uses=1]
store [0 x i8]* %19, [0 x i8]** %str.0, align 8, !dbg !13
- %20 = load [0 x i8]** %str.0, align 8, !dbg !15 ; <[0 x i8]*> [#uses=1]
+ %20 = load [0 x i8]*, [0 x i8]** %str.0, align 8, !dbg !15 ; <[0 x i8]*> [#uses=1]
%21 = getelementptr inbounds [0 x i8], [0 x i8]* %20, i64 0, i64 0, !dbg !15 ; <i8*> [#uses=1]
store i8 0, i8* %21, align 1, !dbg !15
- %22 = load [0 x i8]** %str.0, align 8, !dbg !16 ; <[0 x i8]*> [#uses=1]
+ %22 = load [0 x i8]*, [0 x i8]** %str.0, align 8, !dbg !16 ; <[0 x i8]*> [#uses=1]
%23 = getelementptr inbounds [0 x i8], [0 x i8]* %22, i64 0, i64 0, !dbg !16 ; <i8*> [#uses=1]
- %24 = load i8* %23, align 1, !dbg !16 ; <i8> [#uses=1]
+ %24 = load i8, i8* %23, align 1, !dbg !16 ; <i8> [#uses=1]
%25 = sext i8 %24 to i32, !dbg !16 ; <i32> [#uses=1]
store i32 %25, i32* %0, align 4, !dbg !16
- %26 = load i8** %saved_stack.1, align 8, !dbg !16 ; <i8*> [#uses=1]
+ %26 = load i8*, i8** %saved_stack.1, align 8, !dbg !16 ; <i8*> [#uses=1]
call void @llvm.stackrestore(i8* %26), !dbg !16
- %27 = load i32* %0, align 4, !dbg !16 ; <i32> [#uses=1]
+ %27 = load i32, i32* %0, align 4, !dbg !16 ; <i32> [#uses=1]
store i32 %27, i32* %retval, align 4, !dbg !16
br label %return, !dbg !16
return: ; preds = %entry
- %retval1 = load i32* %retval, !dbg !16 ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval, !dbg !16 ; <i32> [#uses=1]
%retval12 = trunc i32 %retval1 to i8, !dbg !16 ; <i8> [#uses=1]
ret i8 %retval12, !dbg !16
}
diff --git a/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll b/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
index 064b575f75f..db3133364e2 100644
--- a/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
+++ b/llvm/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
@@ -33,17 +33,17 @@ bb26: ; preds = %bb4
br i1 %cond.i, label %bb.i, label %bb4
bb.i: ; preds = %bb26
- %3 = load i32* null, align 4 ; <i32> [#uses=1]
+ %3 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%4 = uitofp i32 %3 to float ; <float> [#uses=1]
%.sum13.i = add i64 0, 4 ; <i64> [#uses=1]
%5 = getelementptr i8, i8* null, i64 %.sum13.i ; <i8*> [#uses=1]
%6 = bitcast i8* %5 to i32* ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
+ %7 = load i32, i32* %6, align 4 ; <i32> [#uses=1]
%8 = uitofp i32 %7 to float ; <float> [#uses=1]
%.sum.i = add i64 0, 8 ; <i64> [#uses=1]
%9 = getelementptr i8, i8* null, i64 %.sum.i ; <i8*> [#uses=1]
%10 = bitcast i8* %9 to i32* ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
+ %11 = load i32, i32* %10, align 4 ; <i32> [#uses=1]
%12 = uitofp i32 %11 to float ; <float> [#uses=1]
%13 = insertelement <4 x float> undef, float %4, i32 0 ; <<4 x float>> [#uses=1]
%14 = insertelement <4 x float> %13, float %8, i32 1 ; <<4 x float>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-03-03-BTHang.ll b/llvm/test/CodeGen/X86/2009-03-03-BTHang.ll
index 626117d7037..d6d24cda295 100644
--- a/llvm/test/CodeGen/X86/2009-03-03-BTHang.ll
+++ b/llvm/test/CodeGen/X86/2009-03-03-BTHang.ll
@@ -10,7 +10,7 @@ entry:
%1 = and i32 %0, -4096 ; <i32> [#uses=1]
%2 = inttoptr i32 %1 to %struct.HandleBlock* ; <%struct.HandleBlock*> [#uses=3]
%3 = getelementptr %struct.HandleBlock, %struct.HandleBlock* %2, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4096 ; <i32> [#uses=1]
+ %4 = load i32, i32* %3, align 4096 ; <i32> [#uses=1]
%5 = icmp eq i32 %4, 1751280747 ; <i1> [#uses=1]
br i1 %5, label %bb, label %bb1
@@ -25,7 +25,7 @@ bb: ; preds = %entry
%not.i = and i32 %9, 31 ; <i32> [#uses=1]
%13 = xor i32 %not.i, 31 ; <i32> [#uses=1]
%14 = shl i32 1, %13 ; <i32> [#uses=1]
- %15 = load i32* %12, align 4 ; <i32> [#uses=1]
+ %15 = load i32, i32* %12, align 4 ; <i32> [#uses=1]
%16 = and i32 %15, %14 ; <i32> [#uses=1]
%17 = icmp eq i32 %16, 0 ; <i1> [#uses=1]
%tmp = zext i1 %17 to i8 ; <i8> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-03-05-burr-list-crash.ll b/llvm/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
index 8eb44b5e065..853bb16aa32 100644
--- a/llvm/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
+++ b/llvm/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
@@ -15,7 +15,7 @@ define fastcc i8* @1(i8*) nounwind {
; <label>:3 ; preds = %1
%4 = call i64 @strlen(i8* %0) nounwind readonly ; <i64> [#uses=1]
%5 = trunc i64 %4 to i32 ; <i32> [#uses=2]
- %6 = load i32* @0, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* @0, align 4 ; <i32> [#uses=1]
%7 = sub i32 %5, %6 ; <i32> [#uses=2]
%8 = sext i32 %5 to i64 ; <i64> [#uses=1]
%9 = sext i32 %7 to i64 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll b/llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll
index 896c9686cc4..3bff7dc7656 100644
--- a/llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll
+++ b/llvm/test/CodeGen/X86/2009-03-09-APIntCrash.ll
@@ -7,7 +7,7 @@ entry:
br i1 false, label %if.then, label %return
if.then: ; preds = %entry
- %srcval18 = load i128* null, align 8 ; <i128> [#uses=1]
+ %srcval18 = load i128, i128* null, align 8 ; <i128> [#uses=1]
%tmp15 = lshr i128 %srcval18, 64 ; <i128> [#uses=1]
%tmp9 = mul i128 %tmp15, 18446744073709551616000 ; <i128> [#uses=1]
br label %return
diff --git a/llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll b/llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll
index 90dff8878a7..38dd2fa0b10 100644
--- a/llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2009-03-10-CoalescerBug.ll
@@ -8,7 +8,7 @@
define i32 @pnoutrefresh(%struct.WINDOW* %win, i32 %pminrow, i32 %pmincol, i32 %sminrow, i32 %smincol, i32 %smaxrow, i32 %smaxcol) nounwind optsize ssp {
entry:
- %0 = load i16* null, align 4 ; <i16> [#uses=2]
+ %0 = load i16, i16* null, align 4 ; <i16> [#uses=2]
%1 = icmp sgt i16 0, %0 ; <i1> [#uses=1]
br i1 %1, label %bb12, label %bb13
diff --git a/llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll b/llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll
index 06dfdc0c767..ba04364e4a0 100644
--- a/llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll
+++ b/llvm/test/CodeGen/X86/2009-03-23-LinearScanBug.ll
@@ -2,9 +2,9 @@
define fastcc void @optimize_bit_field() nounwind {
bb4:
- %a = load i32* null ; <i32> [#uses=1]
- %s = load i32* getelementptr (i32* null, i32 1) ; <i32> [#uses=1]
- %z = load i32* getelementptr (i32* null, i32 2) ; <i32> [#uses=1]
+ %a = load i32, i32* null ; <i32> [#uses=1]
+ %s = load i32, i32* getelementptr (i32* null, i32 1) ; <i32> [#uses=1]
+ %z = load i32, i32* getelementptr (i32* null, i32 2) ; <i32> [#uses=1]
%r = bitcast i32 0 to i32 ; <i32> [#uses=1]
%q = trunc i32 %z to i8 ; <i8> [#uses=1]
%b = icmp eq i8 0, %q ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll b/llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
index bbc1d341d4e..276d52366ae 100644
--- a/llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
+++ b/llvm/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
@@ -10,30 +10,30 @@
@X = external global i64 ; <i64*> [#uses=25]
define fastcc i64 @foo() nounwind {
- %tmp = load volatile i64* @X ; <i64> [#uses=7]
- %tmp1 = load volatile i64* @X ; <i64> [#uses=5]
- %tmp2 = load volatile i64* @X ; <i64> [#uses=3]
- %tmp3 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp4 = load volatile i64* @X ; <i64> [#uses=5]
- %tmp5 = load volatile i64* @X ; <i64> [#uses=3]
- %tmp6 = load volatile i64* @X ; <i64> [#uses=2]
- %tmp7 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp8 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp9 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp10 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp11 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp12 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp13 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp14 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp15 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp16 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp17 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp18 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp19 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp20 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp21 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp22 = load volatile i64* @X ; <i64> [#uses=1]
- %tmp23 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp = load volatile i64, i64* @X ; <i64> [#uses=7]
+ %tmp1 = load volatile i64, i64* @X ; <i64> [#uses=5]
+ %tmp2 = load volatile i64, i64* @X ; <i64> [#uses=3]
+ %tmp3 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp4 = load volatile i64, i64* @X ; <i64> [#uses=5]
+ %tmp5 = load volatile i64, i64* @X ; <i64> [#uses=3]
+ %tmp6 = load volatile i64, i64* @X ; <i64> [#uses=2]
+ %tmp7 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp8 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp9 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp10 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp11 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp12 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp13 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp14 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp15 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp16 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp17 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp18 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp19 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp20 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp21 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp22 = load volatile i64, i64* @X ; <i64> [#uses=1]
+ %tmp23 = load volatile i64, i64* @X ; <i64> [#uses=1]
%tmp24 = call i64 @llvm.bswap.i64(i64 %tmp8) ; <i64> [#uses=1]
%tmp25 = add i64 %tmp6, %tmp5 ; <i64> [#uses=1]
%tmp26 = add i64 %tmp25, %tmp4 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-03-25-TestBug.ll b/llvm/test/CodeGen/X86/2009-03-25-TestBug.ll
index cc1d73da05c..b8b6d9211d9 100644
--- a/llvm/test/CodeGen/X86/2009-03-25-TestBug.ll
+++ b/llvm/test/CodeGen/X86/2009-03-25-TestBug.ll
@@ -9,7 +9,7 @@
define void @func(i32* %b) nounwind {
bb1579.i.i: ; preds = %bb1514.i.i, %bb191.i.i
- %tmp176 = load i32* %b, align 4
+ %tmp176 = load i32, i32* %b, align 4
%tmp177 = and i32 %tmp176, 2
%tmp178 = icmp eq i32 %tmp177, 0
br i1 %tmp178, label %hello, label %world
diff --git a/llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll b/llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
index 24a15903740..1e5e9339361 100644
--- a/llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
+++ b/llvm/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
@@ -15,7 +15,7 @@ entry:
store i8 48, i8* %2, align 1
%3 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
%4 = getelementptr [32 x i8], [32 x i8]* %3, i32 0, i32 31 ; <i8*> [#uses=1]
- %5 = load i8* %4, align 1 ; <i8> [#uses=1]
+ %5 = load i8, i8* %4, align 1 ; <i8> [#uses=1]
%6 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
%7 = getelementptr [32 x i8], [32 x i8]* %6, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 %5, i8* %7, align 1
@@ -23,12 +23,12 @@ entry:
store i8 15, i8* %8, align 1
%9 = call i32 (...)* bitcast (i32 (%struct.X*, %struct.X*)* @f to i32 (...)*)(%struct.X* byval align 4 %xxx, %struct.X* byval align 4 %xxx) nounwind ; <i32> [#uses=1]
store i32 %9, i32* %0, align 4
- %10 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %10 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %10, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
diff --git a/llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll b/llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
index fafab4d4a0f..1d03a1b20a3 100644
--- a/llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
+++ b/llvm/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
@@ -72,7 +72,7 @@ bb349: ; preds = %bb349, %entry
%47 = and i64 %20, %not417 ; <i64> [#uses=1]
%48 = xor i64 0, %47 ; <i64> [#uses=1]
%49 = getelementptr [80 x i64], [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
- %50 = load i64* %49, align 8 ; <i64> [#uses=1]
+ %50 = load i64, i64* %49, align 8 ; <i64> [#uses=1]
%51 = add i64 %48, 0 ; <i64> [#uses=1]
%52 = add i64 %51, 0 ; <i64> [#uses=1]
%53 = add i64 %52, 0 ; <i64> [#uses=1]
@@ -88,12 +88,12 @@ bb349: ; preds = %bb349, %entry
%61 = and i32 %60, 15 ; <i32> [#uses=1]
%62 = zext i32 %61 to i64 ; <i64> [#uses=1]
%63 = getelementptr [16 x i64], [16 x i64]* null, i64 0, i64 %62 ; <i64*> [#uses=2]
- %64 = load i64* null, align 8 ; <i64> [#uses=1]
+ %64 = load i64, i64* null, align 8 ; <i64> [#uses=1]
%65 = lshr i64 %64, 6 ; <i64> [#uses=1]
%66 = xor i64 0, %65 ; <i64> [#uses=1]
%67 = xor i64 %66, 0 ; <i64> [#uses=1]
- %68 = load i64* %46, align 8 ; <i64> [#uses=1]
- %69 = load i64* null, align 8 ; <i64> [#uses=1]
+ %68 = load i64, i64* %46, align 8 ; <i64> [#uses=1]
+ %69 = load i64, i64* null, align 8 ; <i64> [#uses=1]
%70 = add i64 %68, 0 ; <i64> [#uses=1]
%71 = add i64 %70, %67 ; <i64> [#uses=1]
%72 = add i64 %71, %69 ; <i64> [#uses=1]
@@ -106,7 +106,7 @@ bb349: ; preds = %bb349, %entry
%76 = and i64 %33, %not429 ; <i64> [#uses=1]
%77 = xor i64 %75, %76 ; <i64> [#uses=1]
%78 = getelementptr [80 x i64], [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
- %79 = load i64* %78, align 16 ; <i64> [#uses=1]
+ %79 = load i64, i64* %78, align 16 ; <i64> [#uses=1]
%80 = add i64 %77, %20 ; <i64> [#uses=1]
%81 = add i64 %80, %72 ; <i64> [#uses=1]
%82 = add i64 %81, %74 ; <i64> [#uses=1]
@@ -119,14 +119,14 @@ bb349: ; preds = %bb349, %entry
%87 = add i64 0, %85 ; <i64> [#uses=1]
%asmtmp435 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 8, i64 0) nounwind ; <i64> [#uses=1]
%88 = xor i64 0, %asmtmp435 ; <i64> [#uses=1]
- %89 = load i64* null, align 8 ; <i64> [#uses=3]
+ %89 = load i64, i64* null, align 8 ; <i64> [#uses=3]
%asmtmp436 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 19, i64 %89) nounwind ; <i64> [#uses=1]
%asmtmp437 = call i64 asm "rorq $1,$0", "=r,J,0,~{dirflag},~{fpsr},~{flags},~{cc}"(i32 61, i64 %89) nounwind ; <i64> [#uses=1]
%90 = lshr i64 %89, 6 ; <i64> [#uses=1]
%91 = xor i64 %asmtmp436, %90 ; <i64> [#uses=1]
%92 = xor i64 %91, %asmtmp437 ; <i64> [#uses=1]
- %93 = load i64* %63, align 8 ; <i64> [#uses=1]
- %94 = load i64* null, align 8 ; <i64> [#uses=1]
+ %93 = load i64, i64* %63, align 8 ; <i64> [#uses=1]
+ %94 = load i64, i64* null, align 8 ; <i64> [#uses=1]
%95 = add i64 %93, %88 ; <i64> [#uses=1]
%96 = add i64 %95, %92 ; <i64> [#uses=1]
%97 = add i64 %96, %94 ; <i64> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/2009-04-24.ll b/llvm/test/CodeGen/X86/2009-04-24.ll
index d104c875760..7647dcc7feb 100644
--- a/llvm/test/CodeGen/X86/2009-04-24.ll
+++ b/llvm/test/CodeGen/X86/2009-04-24.ll
@@ -8,6 +8,6 @@
define i32 @f() {
entry:
- %tmp1 = load i32* @i
+ %tmp1 = load i32, i32* @i
ret i32 %tmp1
}
diff --git a/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll b/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
index 94d3eb21cec..c687b6905b7 100644
--- a/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
@@ -6,7 +6,7 @@ entry:
br label %while.cond
while.cond: ; preds = %while.cond, %entry
- %tmp15 = load i32* %tmp13 ; <i32> [#uses=2]
+ %tmp15 = load i32, i32* %tmp13 ; <i32> [#uses=2]
%bf.lo = lshr i32 %tmp15, 1 ; <i32> [#uses=1]
%bf.lo.cleared = and i32 %bf.lo, 2147483647 ; <i32> [#uses=1]
%conv = zext i32 %bf.lo.cleared to i64 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll b/llvm/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll
index 84cf341ef89..a364c89b5f4 100644
--- a/llvm/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll
+++ b/llvm/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll
@@ -22,9 +22,9 @@
define void @getAffNeighbour(i32 %curr_mb_nr, i32 %xN, i32 %yN, i32 %is_chroma, %struct.PixelPos* %pix) nounwind {
entry:
%Opq.sa.calc = add i32 0, 2 ; <i32> [#uses=2]
- %0 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=3]
+ %0 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=3]
%1 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %2 = load %struct.Macroblock** %1, align 8 ; <%struct.Macroblock*> [#uses=24]
+ %2 = load %struct.Macroblock*, %struct.Macroblock** %1, align 8 ; <%struct.Macroblock*> [#uses=24]
%3 = zext i32 %curr_mb_nr to i64 ; <i64> [#uses=24]
%4 = sext i32 %is_chroma to i64 ; <i64> [#uses=8]
br label %meshBB392
@@ -32,9 +32,9 @@ entry:
entry.fragment: ; preds = %meshBB392
%Opq.sa.calc747 = add i32 %Opq.sa.calc921, 70 ; <i32> [#uses=0]
%5 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 0 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=2]
+ %6 = load i32, i32* %5, align 4 ; <i32> [#uses=2]
%7 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 1 ; <i32*> [#uses=1]
- %8 = load i32* %7, align 4 ; <i32> [#uses=5]
+ %8 = load i32, i32* %7, align 4 ; <i32> [#uses=5]
br label %entry.fragment181
entry.fragment181: ; preds = %entry.fragment
@@ -75,7 +75,7 @@ bb4: ; preds = %bb3
bb5: ; preds = %meshBB428
%Opq.sa.calc470 = sub i32 %Opq.sa.calc897, -49 ; <i32> [#uses=1]
%17 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %18 = load i32* %17, align 4 ; <i32> [#uses=1]
+ %18 = load i32, i32* %17, align 4 ; <i32> [#uses=1]
br label %bb5.fragment
bb5.fragment: ; preds = %bb5
@@ -92,7 +92,7 @@ bb6: ; preds = %bb5.fragment
bb7: ; preds = %bb6
%Opq.sa.calc476 = add i32 %Opq.sa.calc873, -58 ; <i32> [#uses=1]
%22 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 25 ; <i32*> [#uses=1]
- %23 = load i32* %22, align 8 ; <i32> [#uses=1]
+ %23 = load i32, i32* %22, align 8 ; <i32> [#uses=1]
%24 = add i32 %23, 1 ; <i32> [#uses=1]
%25 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %meshBB388
@@ -103,14 +103,14 @@ bb7.fragment: ; preds = %meshBB388
%Opq.sa.calc708 = xor i32 %Opq.sa.calc707, 474 ; <i32> [#uses=0]
store i32 %.SV194.phi, i32* %.SV196.phi, align 4
%26 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load17.SV.phi, i64 %.load36.SV.phi, i32 29 ; <i32*> [#uses=1]
- %27 = load i32* %26, align 8 ; <i32> [#uses=2]
+ %27 = load i32, i32* %26, align 8 ; <i32> [#uses=2]
store i32 %27, i32* %.load67.SV.phi, align 4
br label %bb96
bb8: ; preds = %meshBB348
%Opq.sa.calc479 = sub i32 %Opq.sa.calc805, 141 ; <i32> [#uses=1]
%28 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=2]
- %29 = load i32* %28, align 4 ; <i32> [#uses=2]
+ %29 = load i32, i32* %28, align 4 ; <i32> [#uses=2]
%30 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
br label %meshBB368
@@ -118,25 +118,25 @@ bb8.fragment: ; preds = %meshBB368
%Opq.sa.calc765 = sub i32 %Opq.sa.calc768, -115 ; <i32> [#uses=2]
store i32 %.SV198.phi, i32* %.SV200.phi, align 4
%31 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load16.SV.phi, i64 %.load35.SV.phi, i32 26 ; <i32*> [#uses=2]
- %32 = load i32* %31, align 4 ; <i32> [#uses=4]
+ %32 = load i32, i32* %31, align 4 ; <i32> [#uses=4]
store i32 %32, i32* %.load66.SV.phi, align 4
- %33 = load i32* %31, align 4 ; <i32> [#uses=1]
+ %33 = load i32, i32* %31, align 4 ; <i32> [#uses=1]
%34 = icmp eq i32 %33, 0 ; <i1> [#uses=1]
br i1 %34, label %bb96, label %bb9
bb9: ; preds = %bb8.fragment
%Opq.sa.calc482 = xor i32 %Opq.sa.calc765, 163 ; <i32> [#uses=0]
- %35 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %35 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%36 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %35, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %37 = load %struct.Macroblock** %36, align 8 ; <%struct.Macroblock*> [#uses=1]
- %38 = load i32* %.SV76.phi, align 4 ; <i32> [#uses=1]
+ %37 = load %struct.Macroblock*, %struct.Macroblock** %36, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %38 = load i32, i32* %.SV76.phi, align 4 ; <i32> [#uses=1]
br label %bb9.fragment
bb9.fragment: ; preds = %bb9
%Opq.sa.calc999 = add i32 %Opq.sa.calc765, -44 ; <i32> [#uses=1]
%39 = sext i32 %38 to i64 ; <i64> [#uses=1]
%40 = getelementptr %struct.Macroblock, %struct.Macroblock* %37, i64 %39, i32 20 ; <i32*> [#uses=1]
- %41 = load i32* %40, align 4 ; <i32> [#uses=1]
+ %41 = load i32, i32* %40, align 4 ; <i32> [#uses=1]
%42 = icmp eq i32 %41, 0 ; <i1> [#uses=1]
br i1 %42, label %bb96, label %bb11
@@ -161,7 +161,7 @@ bb13: ; preds = %bb5.fragment
bb13.fragment: ; preds = %meshBB360
%Opq.sa.calc870 = add i32 %Opq.sa.calc866, -129 ; <i32> [#uses=3]
- %47 = load i32* %.SV208.phi, align 8 ; <i32> [#uses=3]
+ %47 = load i32, i32* %.SV208.phi, align 8 ; <i32> [#uses=3]
br i1 %.load74.SV.phi, label %bb14, label %meshBB412
bb14: ; preds = %bb13.fragment
@@ -173,25 +173,25 @@ bb14: ; preds = %bb13.fragment
bb14.fragment: ; preds = %bb14
%Opq.sa.calc723 = sub i32 %Opq.sa.calc493, 117 ; <i32> [#uses=4]
- %50 = load i32* %49, align 8 ; <i32> [#uses=4]
+ %50 = load i32, i32* %49, align 8 ; <i32> [#uses=4]
store i32 %50, i32* %.SV52.phi1113, align 4
- %51 = load i32* %49, align 8 ; <i32> [#uses=1]
+ %51 = load i32, i32* %49, align 8 ; <i32> [#uses=1]
%52 = icmp eq i32 %51, 0 ; <i1> [#uses=1]
br i1 %52, label %meshBB, label %bb15
bb15: ; preds = %bb14.fragment
%Opq.sa.calc496 = sub i32 %Opq.sa.calc723, -8 ; <i32> [#uses=1]
- %53 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %53 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%54 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %53, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %55 = load %struct.Macroblock** %54, align 8 ; <%struct.Macroblock*> [#uses=1]
- %56 = load i32* %.SV208.phi, align 8 ; <i32> [#uses=1]
+ %55 = load %struct.Macroblock*, %struct.Macroblock** %54, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %56 = load i32, i32* %.SV208.phi, align 8 ; <i32> [#uses=1]
br label %meshBB324
bb15.fragment: ; preds = %meshBB324
%Opq.sa.calc925 = xor i32 %Opq.sa.calc750, 215 ; <i32> [#uses=2]
%57 = sext i32 %.SV214.phi to i64 ; <i64> [#uses=1]
%58 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV212.phi, i64 %57, i32 20 ; <i32*> [#uses=1]
- %59 = load i32* %58, align 4 ; <i32> [#uses=1]
+ %59 = load i32, i32* %58, align 4 ; <i32> [#uses=1]
%60 = icmp eq i32 %59, 0 ; <i1> [#uses=1]
br i1 %60, label %bb16, label %bb96
@@ -216,7 +216,7 @@ bb19.fragment: ; preds = %bb19
%Opq.sa.calc880 = xor i32 %Opq.sa.calc932, 246 ; <i32> [#uses=0]
store i32 %63, i32* %64, align 4
%65 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 29 ; <i32*> [#uses=1]
- %66 = load i32* %65, align 8 ; <i32> [#uses=2]
+ %66 = load i32, i32* %65, align 8 ; <i32> [#uses=2]
store i32 %66, i32* %.SV52.phi1186, align 4
br label %bb96
@@ -228,7 +228,7 @@ bb23: ; preds = %meshBB360
%Opq.sa.calc509 = xor i32 %Opq.sa.calc866, 70 ; <i32> [#uses=1]
%Opq.sa.calc508 = sub i32 %Opq.sa.calc509, -19 ; <i32> [#uses=0]
%67 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %68 = load i32* %67, align 4 ; <i32> [#uses=1]
+ %68 = load i32, i32* %67, align 4 ; <i32> [#uses=1]
%69 = icmp eq i32 %68, 0 ; <i1> [#uses=1]
%70 = and i32 %curr_mb_nr, 1 ; <i32> [#uses=1]
%71 = icmp eq i32 %70, 0 ; <i1> [#uses=2]
@@ -237,7 +237,7 @@ bb23: ; preds = %meshBB360
bb23.fragment: ; preds = %bb23
%Opq.sa.calc847 = sub i32 %Opq.sa.calc866, -9 ; <i32> [#uses=2]
%72 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=3]
- %73 = load i32* %72, align 4 ; <i32> [#uses=3]
+ %73 = load i32, i32* %72, align 4 ; <i32> [#uses=3]
%74 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
store i32 %73, i32* %74, align 4
br label %bb23.fragment182
@@ -247,9 +247,9 @@ bb23.fragment182: ; preds = %bb23.fragment
%Opq.sa.calc742 = add i32 %Opq.sa.calc744, %Opq.sa.calc847 ; <i32> [#uses=1]
%Opq.sa.calc743 = add i32 %Opq.sa.calc742, -149 ; <i32> [#uses=2]
%75 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 26 ; <i32*> [#uses=2]
- %76 = load i32* %75, align 4 ; <i32> [#uses=3]
+ %76 = load i32, i32* %75, align 4 ; <i32> [#uses=3]
store i32 %76, i32* %.SV52.phi1113, align 4
- %77 = load i32* %75, align 4 ; <i32> [#uses=1]
+ %77 = load i32, i32* %75, align 4 ; <i32> [#uses=1]
%78 = icmp ne i32 %77, 0 ; <i1> [#uses=2]
br i1 %69, label %meshBB344, label %meshBB432
@@ -264,10 +264,10 @@ bb25: ; preds = %bb24
bb26: ; preds = %bb25
%Opq.sa.calc519 = xor i32 %Opq.sa.calc515, 23 ; <i32> [#uses=2]
%Opq.sa.calc518 = xor i32 %Opq.sa.calc519, 84 ; <i32> [#uses=1]
- %79 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %79 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%80 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %79, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %81 = load %struct.Macroblock** %80, align 8 ; <%struct.Macroblock*> [#uses=1]
- %82 = load i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
+ %81 = load %struct.Macroblock*, %struct.Macroblock** %80, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %82 = load i32, i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
br label %meshBB340
bb26.fragment: ; preds = %meshBB340
@@ -276,7 +276,7 @@ bb26.fragment: ; preds = %meshBB340
%Opq.sa.calc917 = add i32 %Opq.sa.calc916, -237 ; <i32> [#uses=1]
%83 = sext i32 %.SV230.phi to i64 ; <i64> [#uses=1]
%84 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV228.phi, i64 %83, i32 20 ; <i32*> [#uses=1]
- %85 = load i32* %84, align 4 ; <i32> [#uses=1]
+ %85 = load i32, i32* %84, align 4 ; <i32> [#uses=1]
%86 = icmp eq i32 %85, 0 ; <i1> [#uses=1]
br i1 %86, label %meshBB420, label %meshBB356
@@ -308,17 +308,17 @@ bb32: ; preds = %bb24
bb33: ; preds = %bb32
%Opq.sa.calc534 = sub i32 %Opq.sa.calc512, -75 ; <i32> [#uses=2]
- %92 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %92 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%93 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %92, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %94 = load %struct.Macroblock** %93, align 8 ; <%struct.Macroblock*> [#uses=1]
- %95 = load i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
+ %94 = load %struct.Macroblock*, %struct.Macroblock** %93, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %95 = load i32, i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
br label %bb33.fragment
bb33.fragment: ; preds = %bb33
%Opq.sa.calc712 = add i32 %Opq.sa.calc534, -109 ; <i32> [#uses=3]
%96 = sext i32 %95 to i64 ; <i64> [#uses=1]
%97 = getelementptr %struct.Macroblock, %struct.Macroblock* %94, i64 %96, i32 20 ; <i32*> [#uses=1]
- %98 = load i32* %97, align 4 ; <i32> [#uses=1]
+ %98 = load i32, i32* %97, align 4 ; <i32> [#uses=1]
%99 = icmp eq i32 %98, 0 ; <i1> [#uses=1]
br i1 %99, label %bb34, label %meshBB
@@ -372,17 +372,17 @@ bb40: ; preds = %bb39
bb41: ; preds = %meshBB336
%Opq.sa.calc557 = sub i32 %Opq.sa.calc979, 143 ; <i32> [#uses=1]
- %108 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %108 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%109 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %108, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %110 = load %struct.Macroblock** %109, align 8 ; <%struct.Macroblock*> [#uses=1]
- %111 = load i32* %.SV99.phi1128, align 4 ; <i32> [#uses=1]
+ %110 = load %struct.Macroblock*, %struct.Macroblock** %109, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %111 = load i32, i32* %.SV99.phi1128, align 4 ; <i32> [#uses=1]
br label %bb41.fragment
bb41.fragment: ; preds = %bb41
%Opq.sa.calc987 = xor i32 %Opq.sa.calc557, 213 ; <i32> [#uses=4]
%112 = sext i32 %111 to i64 ; <i64> [#uses=1]
%113 = getelementptr %struct.Macroblock, %struct.Macroblock* %110, i64 %112, i32 20 ; <i32*> [#uses=1]
- %114 = load i32* %113, align 4 ; <i32> [#uses=1]
+ %114 = load i32, i32* %113, align 4 ; <i32> [#uses=1]
%115 = icmp eq i32 %114, 0 ; <i1> [#uses=1]
br i1 %115, label %bb42, label %bb96
@@ -415,17 +415,17 @@ bb48: ; preds = %bb39
bb49: ; preds = %bb48
%Opq.sa.calc572 = add i32 %Opq.sa.calc798, 84 ; <i32> [#uses=0]
- %122 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %122 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%123 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %122, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %124 = load %struct.Macroblock** %123, align 8 ; <%struct.Macroblock*> [#uses=1]
- %125 = load i32* %.SV99.phi1037, align 4 ; <i32> [#uses=1]
+ %124 = load %struct.Macroblock*, %struct.Macroblock** %123, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %125 = load i32, i32* %.SV99.phi1037, align 4 ; <i32> [#uses=1]
br label %bb49.fragment
bb49.fragment: ; preds = %bb49
%Opq.sa.calc860 = sub i32 %Opq.sa.calc569, 114 ; <i32> [#uses=5]
%126 = sext i32 %125 to i64 ; <i64> [#uses=1]
%127 = getelementptr %struct.Macroblock, %struct.Macroblock* %124, i64 %126, i32 20 ; <i32*> [#uses=1]
- %128 = load i32* %127, align 4 ; <i32> [#uses=1]
+ %128 = load i32, i32* %127, align 4 ; <i32> [#uses=1]
%129 = icmp eq i32 %128, 0 ; <i1> [#uses=1]
br i1 %129, label %bb50, label %meshBB380
@@ -485,7 +485,7 @@ bb58: ; preds = %bb56.fragment
bb59: ; preds = %bb58
%Opq.sa.calc599 = add i32 %Opq.sa.calc1002, 151 ; <i32> [#uses=0]
%141 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %142 = load i32* %141, align 4 ; <i32> [#uses=1]
+ %142 = load i32, i32* %141, align 4 ; <i32> [#uses=1]
br label %bb59.fragment
bb59.fragment: ; preds = %bb59
@@ -502,7 +502,7 @@ bb60: ; preds = %bb59.fragment
bb61: ; preds = %bb60
%Opq.sa.calc605 = xor i32 %Opq.sa.calc731, 57 ; <i32> [#uses=1]
%146 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=2]
- %147 = load i32* %146, align 8 ; <i32> [#uses=3]
+ %147 = load i32, i32* %146, align 8 ; <i32> [#uses=3]
%148 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
br label %bb61.fragment
@@ -510,23 +510,23 @@ bb61.fragment: ; preds = %bb61
%Opq.sa.calc700 = sub i32 %Opq.sa.calc605, 108 ; <i32> [#uses=3]
store i32 %147, i32* %148, align 4
%149 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=4]
- %150 = load i32* %149, align 8 ; <i32> [#uses=1]
+ %150 = load i32, i32* %149, align 8 ; <i32> [#uses=1]
%151 = icmp eq i32 %150, 0 ; <i1> [#uses=1]
br i1 %151, label %bb65, label %bb62
bb62: ; preds = %bb61.fragment
%Opq.sa.calc608 = add i32 %Opq.sa.calc700, -94 ; <i32> [#uses=1]
- %152 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=2]
+ %152 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=2]
%153 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %152, i64 0, i32 45 ; <i32*> [#uses=1]
- %154 = load i32* %153, align 4 ; <i32> [#uses=1]
+ %154 = load i32, i32* %153, align 4 ; <i32> [#uses=1]
%155 = icmp eq i32 %154, 1 ; <i1> [#uses=1]
br i1 %155, label %bb63, label %bb64
bb63: ; preds = %bb62
%Opq.sa.calc611 = add i32 %Opq.sa.calc700, -101 ; <i32> [#uses=2]
%156 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %152, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %157 = load %struct.Macroblock** %156, align 8 ; <%struct.Macroblock*> [#uses=1]
- %158 = load i32* %146, align 8 ; <i32> [#uses=1]
+ %157 = load %struct.Macroblock*, %struct.Macroblock** %156, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %158 = load i32, i32* %146, align 8 ; <i32> [#uses=1]
br label %meshBB452
bb63.fragment: ; preds = %meshBB452
@@ -534,7 +534,7 @@ bb63.fragment: ; preds = %meshBB452
%Opq.sa.calc890 = add i32 %Opq.sa.calc891, -3 ; <i32> [#uses=2]
%159 = sext i32 %.SV266.phi to i64 ; <i64> [#uses=1]
%160 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV264.phi, i64 %159, i32 20 ; <i32*> [#uses=1]
- %161 = load i32* %160, align 4 ; <i32> [#uses=1]
+ %161 = load i32, i32* %160, align 4 ; <i32> [#uses=1]
%162 = icmp eq i32 %161, 0 ; <i1> [#uses=1]
br i1 %162, label %bb64, label %meshBB456
@@ -562,7 +562,7 @@ bb65: ; preds = %meshBB456, %bb64, %bb61.fragment
%Opq.link.SV618.phi = phi i32 [ %Opq.sa.calc816, %meshBB456 ], [ %Opq.sa.calc700, %bb61.fragment ], [ %Opq.sa.calc614, %bb64 ] ; <i32> [#uses=1]
%Opq.link.mask620 = and i32 %Opq.link.SV618.phi, 40 ; <i32> [#uses=1]
%Opq.sa.calc617 = add i32 %Opq.link.mask620, -35 ; <i32> [#uses=2]
- %164 = load i32* %.SV152.phi1058, align 8 ; <i32> [#uses=1]
+ %164 = load i32, i32* %.SV152.phi1058, align 8 ; <i32> [#uses=1]
br label %meshBB436
bb65.fragment: ; preds = %meshBB436
@@ -590,7 +590,7 @@ bb68: ; preds = %bb59.fragment
bb68.fragment: ; preds = %meshBB344
%Opq.sa.calc784 = sub i32 %Opq.link.mask722, 3 ; <i32> [#uses=5]
- %168 = load i32* %.SV274.phi, align 8 ; <i32> [#uses=3]
+ %168 = load i32, i32* %.SV274.phi, align 8 ; <i32> [#uses=3]
br i1 %.load144.SV.phi, label %bb69, label %meshBB412
bb69: ; preds = %bb68.fragment
@@ -604,18 +604,18 @@ bb69.fragment: ; preds = %bb69
%Opq.sa.calc996 = sub i32 %Opq.sa.calc784, -9 ; <i32> [#uses=3]
%Opq.sa.calc994 = sub i32 %Opq.sa.calc996, %Opq.sa.calc784 ; <i32> [#uses=1]
%Opq.sa.calc995 = sub i32 %Opq.sa.calc994, 3 ; <i32> [#uses=2]
- %171 = load i32* %170, align 8 ; <i32> [#uses=3]
+ %171 = load i32, i32* %170, align 8 ; <i32> [#uses=3]
store i32 %171, i32* %.SV52.phi1170, align 4
- %172 = load i32* %170, align 8 ; <i32> [#uses=1]
+ %172 = load i32, i32* %170, align 8 ; <i32> [#uses=1]
%173 = icmp eq i32 %172, 0 ; <i1> [#uses=1]
br i1 %173, label %meshBB396, label %meshBB400
bb70: ; preds = %meshBB400
%Opq.sa.calc630 = add i32 %Opq.sa.calc824, -203 ; <i32> [#uses=2]
- %174 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %174 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%175 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %174, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %176 = load %struct.Macroblock** %175, align 8 ; <%struct.Macroblock*> [#uses=1]
- %177 = load i32* %.SV156.phi, align 8 ; <i32> [#uses=1]
+ %176 = load %struct.Macroblock*, %struct.Macroblock** %175, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %177 = load i32, i32* %.SV156.phi, align 8 ; <i32> [#uses=1]
br label %meshBB428
bb70.fragment: ; preds = %meshBB428
@@ -623,7 +623,7 @@ bb70.fragment: ; preds = %meshBB428
%Opq.sa.calc738 = sub i32 %Opq.sa.calc739, 1 ; <i32> [#uses=2]
%178 = sext i32 %.SV280.phi to i64 ; <i64> [#uses=1]
%179 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV278.phi, i64 %178, i32 20 ; <i32*> [#uses=1]
- %180 = load i32* %179, align 4 ; <i32> [#uses=1]
+ %180 = load i32, i32* %179, align 4 ; <i32> [#uses=1]
%181 = icmp eq i32 %180, 0 ; <i1> [#uses=1]
br i1 %181, label %meshBB452, label %meshBB356
@@ -648,7 +648,7 @@ bb74.fragment: ; preds = %bb74
%Opq.sa.calc1011 = sub i32 %Opq.sa.calc636, -19 ; <i32> [#uses=0]
store i32 %184, i32* %185, align 4
%186 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=1]
- %187 = load i32* %186, align 8 ; <i32> [#uses=2]
+ %187 = load i32, i32* %186, align 8 ; <i32> [#uses=2]
store i32 %187, i32* %.SV52.phi1186, align 4
br label %bb96
@@ -660,9 +660,9 @@ bb76: ; preds = %bb58
bb77: ; preds = %bb76
%Opq.sa.calc643 = add i32 %Opq.sa.calc640, 2 ; <i32> [#uses=2]
- %189 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %189 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%190 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %189, i64 0, i32 45 ; <i32*> [#uses=1]
- %191 = load i32* %190, align 4 ; <i32> [#uses=1]
+ %191 = load i32, i32* %190, align 4 ; <i32> [#uses=1]
%192 = icmp eq i32 %191, 2 ; <i1> [#uses=1]
br i1 %192, label %meshBB416, label %bb79
@@ -670,7 +670,7 @@ bb78: ; preds = %meshBB416
%Opq.sa.calc647 = xor i32 %Opq.sa.calc971, 25 ; <i32> [#uses=2]
%Opq.sa.calc646 = sub i32 %Opq.sa.calc647, 29 ; <i32> [#uses=0]
%193 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=1]
- %194 = load i32* %193, align 8 ; <i32> [#uses=1]
+ %194 = load i32, i32* %193, align 8 ; <i32> [#uses=1]
%195 = add i32 %194, 1 ; <i32> [#uses=1]
br label %bb78.fragment
@@ -703,7 +703,7 @@ bb83: ; preds = %bb56.fragment
bb84: ; preds = %bb83
%Opq.sa.calc661 = xor i32 %Opq.sa.calc658, 22 ; <i32> [#uses=1]
%199 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
- %200 = load i32* %199, align 4 ; <i32> [#uses=1]
+ %200 = load i32, i32* %199, align 4 ; <i32> [#uses=1]
br label %meshBB400
bb84.fragment: ; preds = %meshBB400
@@ -723,7 +723,7 @@ bb85: ; preds = %meshBB372
bb86: ; preds = %meshBB336
%Opq.sa.calc670 = sub i32 %Opq.sa.calc979, 35 ; <i32> [#uses=1]
%204 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 24 ; <i32*> [#uses=1]
- %205 = load i32* %204, align 4 ; <i32> [#uses=1]
+ %205 = load i32, i32* %204, align 4 ; <i32> [#uses=1]
%206 = add i32 %205, 1 ; <i32> [#uses=1]
%207 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %bb86.fragment
@@ -732,7 +732,7 @@ bb86.fragment: ; preds = %bb86
%Opq.sa.calc943 = xor i32 %Opq.sa.calc670, 123 ; <i32> [#uses=2]
store i32 %206, i32* %207, align 4
%208 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 28 ; <i32*> [#uses=1]
- %209 = load i32* %208, align 4 ; <i32> [#uses=2]
+ %209 = load i32, i32* %208, align 4 ; <i32> [#uses=2]
store i32 %209, i32* %.SV52.phi1234, align 4
br label %meshBB424
@@ -749,7 +749,7 @@ bb89: ; preds = %bb84.fragment
bb89.fragment: ; preds = %bb89
%Opq.sa.calc962 = add i32 %Opq.sa.calc677, -188 ; <i32> [#uses=3]
- %211 = load i32* %210, align 4 ; <i32> [#uses=3]
+ %211 = load i32, i32* %210, align 4 ; <i32> [#uses=3]
br i1 %203, label %bb90, label %meshBB408
bb90: ; preds = %bb89.fragment
@@ -762,25 +762,25 @@ bb90: ; preds = %bb89.fragment
bb90.fragment: ; preds = %bb90
%Opq.sa.calc773 = sub i32 %Opq.sa.calc680, 60 ; <i32> [#uses=3]
%Opq.sa.calc772 = add i32 %Opq.sa.calc773, -25 ; <i32> [#uses=2]
- %214 = load i32* %213, align 4 ; <i32> [#uses=3]
+ %214 = load i32, i32* %213, align 4 ; <i32> [#uses=3]
store i32 %214, i32* %.SV52.phi1190, align 4
- %215 = load i32* %213, align 4 ; <i32> [#uses=1]
+ %215 = load i32, i32* %213, align 4 ; <i32> [#uses=1]
%216 = icmp eq i32 %215, 0 ; <i1> [#uses=1]
br i1 %216, label %meshBB416, label %meshBB368
bb91: ; preds = %meshBB368
%Opq.sa.calc683 = sub i32 %Opq.sa.calc768, -7 ; <i32> [#uses=0]
- %217 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %217 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%218 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %217, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
- %219 = load %struct.Macroblock** %218, align 8 ; <%struct.Macroblock*> [#uses=1]
- %220 = load i32* %.SV170.phi, align 4 ; <i32> [#uses=1]
+ %219 = load %struct.Macroblock*, %struct.Macroblock** %218, align 8 ; <%struct.Macroblock*> [#uses=1]
+ %220 = load i32, i32* %.SV170.phi, align 4 ; <i32> [#uses=1]
br label %bb91.fragment
bb91.fragment: ; preds = %bb91
%Opq.sa.calc853 = xor i32 %Opq.sa.calc768, 8 ; <i32> [#uses=1]
%221 = sext i32 %220 to i64 ; <i64> [#uses=1]
%222 = getelementptr %struct.Macroblock, %struct.Macroblock* %219, i64 %221, i32 20 ; <i32*> [#uses=1]
- %223 = load i32* %222, align 4 ; <i32> [#uses=1]
+ %223 = load i32, i32* %222, align 4 ; <i32> [#uses=1]
%224 = icmp eq i32 %223, 0 ; <i1> [#uses=1]
br i1 %224, label %bb92, label %bb96
@@ -805,7 +805,7 @@ bb95.fragment: ; preds = %meshBB384
%Opq.sa.calc841 = sub i32 %Opq.sa.calc901, 76 ; <i32> [#uses=0]
store i32 %.SV306.phi, i32* %.SV308.phi, align 4
%229 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load.SV.phi, i64 %.load20.SV.phi, i32 28 ; <i32*> [#uses=1]
- %230 = load i32* %229, align 4 ; <i32> [#uses=2]
+ %230 = load i32, i32* %229, align 4 ; <i32> [#uses=2]
store i32 %230, i32* %.load53.SV.phi, align 4
br label %bb96
@@ -826,13 +826,13 @@ bb97: ; preds = %meshBB424, %meshBB408, %meshBB352, %bb96, %bb21
%.SV70.phi1148 = phi i32 [ %.SV70.phi1195, %meshBB424 ], [ %.SV70.phi1215, %meshBB408 ], [ %.SV70.phi1138, %meshBB352 ], [ %.SV70.phi1085, %bb96 ], [ %.SV70.phi1027, %bb21 ] ; <i32> [#uses=1]
%yM.0.reg2mem.0.SV.phi = phi i32 [ -1, %meshBB424 ], [ -1, %meshBB408 ], [ -1, %meshBB352 ], [ %yM.0.SV.phi, %bb96 ], [ -1, %bb21 ] ; <i32> [#uses=1]
%Opq.sa.calc694 = xor i32 0, 243 ; <i32> [#uses=1]
- %232 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %232 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%233 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %232, i64 0, i32 45 ; <i32*> [#uses=1]
br label %bb97.fragment
bb97.fragment: ; preds = %bb97
%Opq.sa.calc928 = xor i32 %Opq.sa.calc694, 128 ; <i32> [#uses=1]
- %234 = load i32* %233, align 4 ; <i32> [#uses=1]
+ %234 = load i32, i32* %233, align 4 ; <i32> [#uses=1]
%235 = icmp eq i32 %234, 0 ; <i1> [#uses=1]
br i1 %235, label %return, label %bb98
@@ -855,13 +855,13 @@ bb98.fragment: ; preds = %meshBB376
%Opq.sa.calc1008 = sub i32 %Opq.link.mask911, 13 ; <i32> [#uses=1]
%241 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 4 ; <i32*> [#uses=4]
%242 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
- %243 = load i32* %242, align 4 ; <i32> [#uses=1]
- %244 = load void (i32, i32*, i32*)** @get_mb_block_pos, align 8 ; <void (i32, i32*, i32*)*> [#uses=1]
+ %243 = load i32, i32* %242, align 4 ; <i32> [#uses=1]
+ %244 = load void (i32, i32*, i32*)*, void (i32, i32*, i32*)** @get_mb_block_pos, align 8 ; <void (i32, i32*, i32*)*> [#uses=1]
tail call void %244(i32 %243, i32* %241, i32* %.SV317.phi) nounwind
- %245 = load i32* %241, align 4 ; <i32> [#uses=1]
- %246 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %245 = load i32, i32* %241, align 4 ; <i32> [#uses=1]
+ %246 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%247 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %246, i64 0, i32 119, i64 %.load39.SV.phi, i64 0 ; <i32*> [#uses=1]
- %248 = load i32* %247, align 4 ; <i32> [#uses=1]
+ %248 = load i32, i32* %247, align 4 ; <i32> [#uses=1]
%249 = mul i32 %248, %245 ; <i32> [#uses=2]
store i32 %249, i32* %241, align 4
br label %bb98.fragment183
@@ -869,15 +869,15 @@ bb98.fragment: ; preds = %meshBB376
bb98.fragment183: ; preds = %bb98.fragment
%Opq.sa.calc777 = sub i32 %Opq.sa.calc1008, -158 ; <i32> [#uses=1]
%Opq.sa.calc776 = sub i32 %Opq.sa.calc777, 46 ; <i32> [#uses=0]
- %250 = load i32* %.SV317.phi, align 4 ; <i32> [#uses=1]
- %251 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
+ %250 = load i32, i32* %.SV317.phi, align 4 ; <i32> [#uses=1]
+ %251 = load %struct.ImageParameters*, %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
%252 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %251, i64 0, i32 119, i64 %.load39.SV.phi, i64 1 ; <i32*> [#uses=1]
- %253 = load i32* %252, align 4 ; <i32> [#uses=1]
+ %253 = load i32, i32* %252, align 4 ; <i32> [#uses=1]
%254 = mul i32 %253, %250 ; <i32> [#uses=1]
- %255 = load i32* %.SV313.phi, align 4 ; <i32> [#uses=1]
+ %255 = load i32, i32* %.SV313.phi, align 4 ; <i32> [#uses=1]
%256 = add i32 %255, %249 ; <i32> [#uses=1]
store i32 %256, i32* %241, align 4
- %257 = load i32* %.SV315.phi, align 4 ; <i32> [#uses=1]
+ %257 = load i32, i32* %.SV315.phi, align 4 ; <i32> [#uses=1]
%258 = add i32 %257, %254 ; <i32> [#uses=1]
store i32 %258, i32* %.SV317.phi, align 4
ret void
diff --git a/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll b/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
index 6cebb1e926c..5ddb5cae296 100644
--- a/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
+++ b/llvm/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
@@ -9,13 +9,13 @@ entry:
%arrayidx4 = getelementptr i32, i32* %data, i32 3 ; <i32*> [#uses=1]
%arrayidx6 = getelementptr i32, i32* %data, i32 4 ; <i32*> [#uses=1]
%arrayidx8 = getelementptr i32, i32* %data, i32 5 ; <i32*> [#uses=1]
- %tmp9 = load i32* %arrayidx8 ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* %arrayidx8 ; <i32> [#uses=1]
%arrayidx11 = getelementptr i32, i32* %data, i32 6 ; <i32*> [#uses=1]
- %tmp12 = load i32* %arrayidx11 ; <i32> [#uses=1]
+ %tmp12 = load i32, i32* %arrayidx11 ; <i32> [#uses=1]
%arrayidx14 = getelementptr i32, i32* %data, i32 7 ; <i32*> [#uses=1]
- %tmp15 = load i32* %arrayidx14 ; <i32> [#uses=1]
+ %tmp15 = load i32, i32* %arrayidx14 ; <i32> [#uses=1]
%arrayidx17 = getelementptr i32, i32* %data, i32 8 ; <i32*> [#uses=1]
- %tmp18 = load i32* %arrayidx17 ; <i32> [#uses=1]
+ %tmp18 = load i32, i32* %arrayidx17 ; <i32> [#uses=1]
%0 = call i32 asm "cpuid", "={ax},=*{bx},=*{cx},=*{dx},{ax},{bx},{cx},{dx},~{dirflag},~{fpsr},~{flags}"(i32* %arrayidx2, i32* %arrayidx4, i32* %arrayidx6, i32 %tmp9, i32 %tmp12, i32 %tmp15, i32 %tmp18) nounwind ; <i32> [#uses=1]
store i32 %0, i32* %arrayidx
ret void
diff --git a/llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll b/llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll
index e7ef9d8486d..b4d202c168d 100644
--- a/llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll
+++ b/llvm/test/CodeGen/X86/2009-04-29-LinearScanBug.ll
@@ -105,17 +105,17 @@
define fastcc i32 @pf_state_compare_ext_gwy(%struct.pf_state_key* nocapture %a, %struct.pf_state_key* nocapture %b) nounwind optsize ssp {
entry:
%0 = zext i8 0 to i32 ; <i32> [#uses=2]
- %1 = load i8* null, align 1 ; <i8> [#uses=2]
+ %1 = load i8, i8* null, align 1 ; <i8> [#uses=2]
%2 = zext i8 %1 to i32 ; <i32> [#uses=1]
%3 = sub i32 %0, %2 ; <i32> [#uses=1]
%4 = icmp eq i8 0, %1 ; <i1> [#uses=1]
br i1 %4, label %bb1, label %bb79
bb1: ; preds = %entry
- %5 = load i8* null, align 4 ; <i8> [#uses=2]
+ %5 = load i8, i8* null, align 4 ; <i8> [#uses=2]
%6 = zext i8 %5 to i32 ; <i32> [#uses=2]
%7 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %b, i32 0, i32 3 ; <i8*> [#uses=1]
- %8 = load i8* %7, align 4 ; <i8> [#uses=2]
+ %8 = load i8, i8* %7, align 4 ; <i8> [#uses=2]
%9 = zext i8 %8 to i32 ; <i32> [#uses=1]
%10 = sub i32 %6, %9 ; <i32> [#uses=1]
%11 = icmp eq i8 %5, %8 ; <i1> [#uses=1]
@@ -132,32 +132,32 @@ bb3: ; preds = %bb1
]
bb4: ; preds = %bb3, %bb3
- %12 = load i16* null, align 4 ; <i16> [#uses=1]
+ %12 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%13 = zext i16 %12 to i32 ; <i32> [#uses=1]
%14 = sub i32 0, %13 ; <i32> [#uses=1]
br i1 false, label %bb23, label %bb79
bb6: ; preds = %bb3
- %15 = load i16* null, align 4 ; <i16> [#uses=1]
+ %15 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%16 = zext i16 %15 to i32 ; <i32> [#uses=1]
%17 = sub i32 0, %16 ; <i32> [#uses=1]
ret i32 %17
bb10: ; preds = %bb3
- %18 = load i8* null, align 1 ; <i8> [#uses=2]
+ %18 = load i8, i8* null, align 1 ; <i8> [#uses=2]
%19 = zext i8 %18 to i32 ; <i32> [#uses=1]
%20 = sub i32 0, %19 ; <i32> [#uses=1]
%21 = icmp eq i8 0, %18 ; <i1> [#uses=1]
br i1 %21, label %bb12, label %bb79
bb12: ; preds = %bb10
- %22 = load i16* null, align 4 ; <i16> [#uses=1]
+ %22 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%23 = zext i16 %22 to i32 ; <i32> [#uses=1]
%24 = sub i32 0, %23 ; <i32> [#uses=1]
ret i32 %24
bb17: ; preds = %bb3
- %25 = load i8* null, align 1 ; <i8> [#uses=2]
+ %25 = load i8, i8* null, align 1 ; <i8> [#uses=2]
%26 = icmp eq i8 %25, 1 ; <i1> [#uses=1]
br i1 %26, label %bb18, label %bb23
@@ -166,16 +166,16 @@ bb18: ; preds = %bb17
br i1 %27, label %bb19, label %bb23
bb19: ; preds = %bb18
- %28 = load i16* null, align 4 ; <i16> [#uses=1]
+ %28 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%29 = zext i16 %28 to i32 ; <i32> [#uses=1]
%30 = sub i32 0, %29 ; <i32> [#uses=1]
br i1 false, label %bb23, label %bb79
bb21: ; preds = %bb3
%31 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %a, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
- %32 = load i32* %31, align 4 ; <i32> [#uses=2]
+ %32 = load i32, i32* %31, align 4 ; <i32> [#uses=2]
%33 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %b, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
- %34 = load i32* %33, align 4 ; <i32> [#uses=2]
+ %34 = load i32, i32* %33, align 4 ; <i32> [#uses=2]
%35 = sub i32 %32, %34 ; <i32> [#uses=1]
%36 = icmp eq i32 %32, %34 ; <i1> [#uses=1]
br i1 %36, label %bb23, label %bb79
@@ -188,11 +188,11 @@ bb24: ; preds = %bb23
ret i32 1
bb70: ; preds = %bb23
- %37 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=3]
+ %37 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)*, i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=3]
br i1 false, label %bb78, label %bb73
bb73: ; preds = %bb70
- %38 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=2]
+ %38 = load i32 (%struct.pf_app_state*, %struct.pf_app_state*)*, i32 (%struct.pf_app_state*, %struct.pf_app_state*)** null, align 4 ; <i32 (%struct.pf_app_state*, %struct.pf_app_state*)*> [#uses=2]
%39 = icmp eq i32 (%struct.pf_app_state*, %struct.pf_app_state*)* %38, null ; <i1> [#uses=1]
br i1 %39, label %bb78, label %bb74
diff --git a/llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll b/llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
index aa14a54496d..c291fede98e 100644
--- a/llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
+++ b/llvm/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
@@ -71,7 +71,7 @@
define fastcc void @dropCell(%struct.MemPage* nocapture %pPage, i32 %idx, i32 %sz) nounwind ssp {
entry:
- %0 = load i8** null, align 8 ; <i8*> [#uses=4]
+ %0 = load i8*, i8** null, align 8 ; <i8*> [#uses=4]
%1 = or i32 0, 0 ; <i32> [#uses=1]
%2 = icmp slt i32 %sz, 4 ; <i1> [#uses=1]
%size_addr.0.i = select i1 %2, i32 4, i32 %sz ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-04-scale.ll b/llvm/test/CodeGen/X86/2009-04-scale.ll
index 7939235809e..1fc5f2b234f 100644
--- a/llvm/test/CodeGen/X86/2009-04-scale.ll
+++ b/llvm/test/CodeGen/X86/2009-04-scale.ll
@@ -8,13 +8,13 @@
define void @test() {
entry:
- %0 = load i32* null, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%1 = lshr i32 %0, 8 ; <i32> [#uses=1]
%2 = and i32 %1, 255 ; <i32> [#uses=1]
%3 = getelementptr %struct.array, %struct.array* null, i32 0, i32 3 ; <[256 x %struct.pair]*> [#uses=1]
%4 = getelementptr [256 x %struct.pair], [256 x %struct.pair]* %3, i32 0, i32 %2 ; <%struct.pair*> [#uses=1]
%5 = getelementptr %struct.pair, %struct.pair* %4, i32 0, i32 1 ; <i64*> [#uses=1]
- %6 = load i64* %5, align 4 ; <i64> [#uses=1]
+ %6 = load i64, i64* %5, align 4 ; <i64> [#uses=1]
%7 = xor i64 0, %6 ; <i64> [#uses=1]
%8 = xor i64 %7, 0 ; <i64> [#uses=1]
%9 = xor i64 %8, 0 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll b/llvm/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
index c2cd89c33ee..e9d15583e56 100644
--- a/llvm/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
+++ b/llvm/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
@@ -12,7 +12,7 @@ entry:
br label %bb
bb: ; preds = %bb.i, %bb, %entry
- %2 = load volatile i32* @g_9, align 4 ; <i32> [#uses=2]
+ %2 = load volatile i32, i32* @g_9, align 4 ; <i32> [#uses=2]
%3 = icmp sgt i32 %2, 1 ; <i1> [#uses=1]
%4 = and i1 %3, %1 ; <i1> [#uses=1]
br i1 %4, label %bb.i, label %bb
diff --git a/llvm/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll b/llvm/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll
index 1d146207549..019d5dfb1fe 100644
--- a/llvm/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll
+++ b/llvm/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll
@@ -5,7 +5,7 @@ entry:
br label %bb14
bb14: ; preds = %bb
- %srcval16 = load i448* %P, align 8 ; <i448> [#uses=1]
+ %srcval16 = load i448, i448* %P, align 8 ; <i448> [#uses=1]
%tmp = zext i32 undef to i448 ; <i448> [#uses=1]
%tmp15 = shl i448 %tmp, 288 ; <i448> [#uses=1]
%mask = and i448 %srcval16, -2135987035423586845985235064014169866455883682256196619149693890381755748887481053010428711403521 ; <i448> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll b/llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll
index 0ff39a3bde5..e01fe9f8930 100644
--- a/llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll
+++ b/llvm/test/CodeGen/X86/2009-05-30-ISelBug.ll
@@ -14,13 +14,13 @@ bb35.i.backedge.exitStub: ; preds = %bb54.i
bb54.i: ; preds = %newFuncRoot
%1 = zext i32 %.reload51 to i64 ; <i64> [#uses=1]
%2 = getelementptr i32, i32* %0, i64 %1 ; <i32*> [#uses=1]
- %3 = load i32* %2, align 4 ; <i32> [#uses=2]
+ %3 = load i32, i32* %2, align 4 ; <i32> [#uses=2]
%4 = lshr i32 %3, 8 ; <i32> [#uses=1]
%5 = and i32 %3, 255 ; <i32> [#uses=1]
%6 = add i32 %5, 4 ; <i32> [#uses=1]
%7 = zext i32 %4 to i64 ; <i64> [#uses=1]
%8 = getelementptr i32, i32* %0, i64 %7 ; <i32*> [#uses=1]
- %9 = load i32* %8, align 4 ; <i32> [#uses=2]
+ %9 = load i32, i32* %8, align 4 ; <i32> [#uses=2]
%10 = and i32 %9, 255 ; <i32> [#uses=1]
%11 = lshr i32 %9, 8 ; <i32> [#uses=1]
%12 = add i32 %c_nblock_used.2.i, 5 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll b/llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll
index 36cb81479db..6ce7af632ee 100644
--- a/llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll
+++ b/llvm/test/CodeGen/X86/2009-06-02-RewriterBug.ll
@@ -14,11 +14,11 @@ while.body: ; preds = %for.end, %bb.nph
%ctg22996 = getelementptr i8, i8* %in, i64 0 ; <i8*> [#uses=1]
%conv = zext i32 undef to i64 ; <i64> [#uses=1]
%conv11 = zext i32 undef to i64 ; <i64> [#uses=1]
- %tmp18 = load i32* undef ; <i32> [#uses=1]
+ %tmp18 = load i32, i32* undef ; <i32> [#uses=1]
%conv19 = zext i32 %tmp18 to i64 ; <i64> [#uses=1]
- %tmp30 = load i32* undef ; <i32> [#uses=1]
+ %tmp30 = load i32, i32* undef ; <i32> [#uses=1]
%conv31 = zext i32 %tmp30 to i64 ; <i64> [#uses=4]
- %ptrincdec3065 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec3065 = load i8, i8* null ; <i8> [#uses=1]
%conv442709 = zext i8 %ptrincdec3065 to i64 ; <i64> [#uses=1]
%shl45 = shl i64 %conv442709, 16 ; <i64> [#uses=1]
%conv632707 = zext i8 undef to i64 ; <i64> [#uses=1]
@@ -68,10 +68,10 @@ while.body: ; preds = %for.end, %bb.nph
%add479 = add i64 %add473, %add441 ; <i64> [#uses=3]
%conv4932682 = zext i8 undef to i64 ; <i64> [#uses=1]
%shl494 = shl i64 %conv4932682, 16 ; <i64> [#uses=1]
- %ptrincdec4903012 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec4903012 = load i8, i8* null ; <i8> [#uses=1]
%conv5032681 = zext i8 %ptrincdec4903012 to i64 ; <i64> [#uses=1]
%shl504 = shl i64 %conv5032681, 8 ; <i64> [#uses=1]
- %ptrincdec5003009 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec5003009 = load i8, i8* null ; <i8> [#uses=1]
%conv5132680 = zext i8 %ptrincdec5003009 to i64 ; <i64> [#uses=1]
%or495 = or i64 %shl494, 0 ; <i64> [#uses=1]
%or505 = or i64 %or495, %conv5132680 ; <i64> [#uses=1]
@@ -91,10 +91,10 @@ while.body: ; preds = %for.end, %bb.nph
%xor575 = xor i64 %xor568, %or561 ; <i64> [#uses=1]
%add587 = add i64 %xor575, 0 ; <i64> [#uses=1]
%add593 = add i64 %add587, %add555 ; <i64> [#uses=1]
- %ptrincdec6043000 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec6043000 = load i8, i8* null ; <i8> [#uses=1]
%conv6172676 = zext i8 %ptrincdec6043000 to i64 ; <i64> [#uses=1]
%shl618 = shl i64 %conv6172676, 8 ; <i64> [#uses=1]
- %ptrincdec6142997 = load i8* %ctg22996 ; <i8> [#uses=1]
+ %ptrincdec6142997 = load i8, i8* %ctg22996 ; <i8> [#uses=1]
%conv6272675 = zext i8 %ptrincdec6142997 to i64 ; <i64> [#uses=1]
%or619 = or i64 0, %conv6272675 ; <i64> [#uses=1]
%or628 = or i64 %or619, %shl618 ; <i64> [#uses=1]
@@ -106,7 +106,7 @@ while.body: ; preds = %for.end, %bb.nph
%xor700 = xor i64 0, %and699 ; <i64> [#uses=1]
%add701 = add i64 0, %xor700 ; <i64> [#uses=1]
%add707 = add i64 %add701, %add669 ; <i64> [#uses=4]
- %ptrincdec6242994 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec6242994 = load i8, i8* null ; <i8> [#uses=1]
%conv7122673 = zext i8 %ptrincdec6242994 to i64 ; <i64> [#uses=1]
%shl713 = shl i64 %conv7122673, 24 ; <i64> [#uses=1]
%conv7412670 = zext i8 undef to i64 ; <i64> [#uses=1]
@@ -132,7 +132,7 @@ while.body: ; preds = %for.end, %bb.nph
%add821 = add i64 %add815, %add783 ; <i64> [#uses=1]
%add1160 = add i64 0, %add707 ; <i64> [#uses=0]
%add1157 = add i64 undef, undef ; <i64> [#uses=0]
- %ptrincdec11742940 = load i8* null ; <i8> [#uses=1]
+ %ptrincdec11742940 = load i8, i8* null ; <i8> [#uses=1]
%conv11872651 = zext i8 %ptrincdec11742940 to i64 ; <i64> [#uses=1]
%shl1188 = shl i64 %conv11872651, 8 ; <i64> [#uses=1]
%or1198 = or i64 0, %shl1188 ; <i64> [#uses=1]
@@ -172,18 +172,18 @@ bb.nph: ; preds = %entry
br label %while.body
while.body: ; preds = %for.end, %bb.nph
- %tmp3 = load i32* %arr ; <i32> [#uses=2]
+ %tmp3 = load i32, i32* %arr ; <i32> [#uses=2]
%conv = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
- %tmp10 = load i32* %arrayidx9 ; <i32> [#uses=1]
+ %tmp10 = load i32, i32* %arrayidx9 ; <i32> [#uses=1]
%conv11 = zext i32 %tmp10 to i64 ; <i64> [#uses=1]
- %tmp14 = load i32* %arrayidx13 ; <i32> [#uses=3]
+ %tmp14 = load i32, i32* %arrayidx13 ; <i32> [#uses=3]
%conv15 = zext i32 %tmp14 to i64 ; <i64> [#uses=2]
- %tmp18 = load i32* undef ; <i32> [#uses=2]
+ %tmp18 = load i32, i32* undef ; <i32> [#uses=2]
%conv19 = zext i32 %tmp18 to i64 ; <i64> [#uses=1]
%conv23 = zext i32 undef to i64 ; <i64> [#uses=1]
- %tmp26 = load i32* %arrayidx25 ; <i32> [#uses=1]
+ %tmp26 = load i32, i32* %arrayidx25 ; <i32> [#uses=1]
%conv27 = zext i32 %tmp26 to i64 ; <i64> [#uses=1]
- %tmp30 = load i32* %arrayidx29 ; <i32> [#uses=2]
+ %tmp30 = load i32, i32* %arrayidx29 ; <i32> [#uses=2]
%conv31 = zext i32 %tmp30 to i64 ; <i64> [#uses=5]
%shl72 = shl i64 %conv31, 26 ; <i64> [#uses=1]
%shr = lshr i64 %conv31, 6 ; <i64> [#uses=1]
@@ -203,7 +203,7 @@ while.body: ; preds = %for.end, %bb.nph
%add137 = add i64 %add131, %add99 ; <i64> [#uses=5]
%conv1422700 = zext i8 undef to i64 ; <i64> [#uses=1]
%shl143 = shl i64 %conv1422700, 24 ; <i64> [#uses=1]
- %ptrincdec1393051 = load i8* undef ; <i8> [#uses=1]
+ %ptrincdec1393051 = load i8, i8* undef ; <i8> [#uses=1]
%conv1512699 = zext i8 %ptrincdec1393051 to i64 ; <i64> [#uses=1]
%shl152 = shl i64 %conv1512699, 16 ; <i64> [#uses=1]
%conv1712697 = zext i8 undef to i64 ; <i64> [#uses=1]
@@ -283,7 +283,7 @@ for.body: ; preds = %for.cond
%add1427 = add i64 %add1392, %d.0 ; <i64> [#uses=1]
%add1424 = add i64 %xor1412, 0 ; <i64> [#uses=1]
%add1430 = add i64 %add1424, %add1392 ; <i64> [#uses=5]
- %tmp1438 = load i32* undef ; <i32> [#uses=1]
+ %tmp1438 = load i32, i32* undef ; <i32> [#uses=1]
%conv1439 = zext i32 %tmp1438 to i64 ; <i64> [#uses=4]
%shl1441 = shl i64 %conv1439, 25 ; <i64> [#uses=1]
%shr1444 = lshr i64 %conv1439, 7 ; <i64> [#uses=1]
@@ -302,13 +302,13 @@ for.body: ; preds = %for.cond
%shr1479 = lshr i64 %conv1464, 10 ; <i64> [#uses=1]
%xor1477 = xor i64 %or1476, %shr1479 ; <i64> [#uses=1]
%xor1480 = xor i64 %xor1477, %or1470 ; <i64> [#uses=1]
- %tmp1499 = load i32* null ; <i32> [#uses=1]
+ %tmp1499 = load i32, i32* null ; <i32> [#uses=1]
%conv1500 = zext i32 %tmp1499 to i64 ; <i64> [#uses=1]
%add1491 = add i64 %conv1500, 0 ; <i64> [#uses=1]
%add1501 = add i64 %add1491, %xor1455 ; <i64> [#uses=1]
%add1502 = add i64 %add1501, %xor1480 ; <i64> [#uses=1]
%conv1504 = and i64 %add1502, 4294967295 ; <i64> [#uses=1]
- %tmp1541 = load i32* undef ; <i32> [#uses=1]
+ %tmp1541 = load i32, i32* undef ; <i32> [#uses=1]
%conv1542 = zext i32 %tmp1541 to i64 ; <i64> [#uses=1]
%add1527 = add i64 %conv1542, %g.0 ; <i64> [#uses=1]
%add1536 = add i64 %add1527, 0 ; <i64> [#uses=1]
@@ -327,7 +327,7 @@ for.body: ; preds = %for.cond
%add1576 = add i64 %xor1564, 0 ; <i64> [#uses=1]
%add1582 = add i64 %add1576, %add1544 ; <i64> [#uses=3]
store i32 undef, i32* undef
- %tmp1693 = load i32* undef ; <i32> [#uses=1]
+ %tmp1693 = load i32, i32* undef ; <i32> [#uses=1]
%conv1694 = zext i32 %tmp1693 to i64 ; <i64> [#uses=1]
%add1679 = add i64 %conv1694, %f.0 ; <i64> [#uses=1]
%add1688 = add i64 %add1679, 0 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll b/llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll
index 06426a2a772..29795492d89 100644
--- a/llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll
+++ b/llvm/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll
@@ -17,7 +17,7 @@
define fastcc void @MinSize(%struct.rec* %x) nounwind {
entry:
- %tmp13 = load i8* undef, align 4 ; <i8> [#uses=3]
+ %tmp13 = load i8, i8* undef, align 4 ; <i8> [#uses=3]
%tmp14 = zext i8 %tmp13 to i32 ; <i32> [#uses=2]
switch i32 %tmp14, label %bb1109 [
i32 42, label %bb246
diff --git a/llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll b/llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll
index ffbe02c7135..9c7eb6d633d 100644
--- a/llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll
+++ b/llvm/test/CodeGen/X86/2009-06-05-VZextByteShort.ll
@@ -6,7 +6,7 @@ define <4 x i16> @a(i32* %x1) nounwind {
; CHECK-NEXT: movd %[[R]], %xmm0
; CHECK-NEXT: retl
- %x2 = load i32* %x1
+ %x2 = load i32, i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i16
%r = insertelement <4 x i16> zeroinitializer, i16 %x, i32 0
@@ -20,7 +20,7 @@ define <8 x i16> @b(i32* %x1) nounwind {
; CHECK-NEXT: movd %e[[R]]x, %xmm0
; CHECK-NEXT: retl
- %x2 = load i32* %x1
+ %x2 = load i32, i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i16
%r = insertelement <8 x i16> zeroinitializer, i16 %x, i32 0
@@ -34,7 +34,7 @@ define <8 x i8> @c(i32* %x1) nounwind {
; CHECK-NEXT: movd %e[[R]]x, %xmm0
; CHECK-NEXT: retl
- %x2 = load i32* %x1
+ %x2 = load i32, i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i8
%r = insertelement <8 x i8> zeroinitializer, i8 %x, i32 0
@@ -48,7 +48,7 @@ define <16 x i8> @d(i32* %x1) nounwind {
; CHECK-NEXT: movd %e[[R]]x, %xmm0
; CHECK-NEXT: retl
- %x2 = load i32* %x1
+ %x2 = load i32, i32* %x1
%x3 = lshr i32 %x2, 1
%x = trunc i32 %x3 to i8
%r = insertelement <16 x i8> zeroinitializer, i8 %x, i32 0
diff --git a/llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll b/llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll
index eabaf775ede..beb57054dea 100644
--- a/llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2009-07-15-CoalescerBug.ll
@@ -237,7 +237,7 @@ bb1545: ; preds = %bb1544
br i1 undef, label %bb1563, label %bb1558
bb1558: ; preds = %bb1545
- %0 = load %struct.SV** undef ; <%struct.SV*> [#uses=1]
+ %0 = load %struct.SV*, %struct.SV** undef ; <%struct.SV*> [#uses=1]
%1 = bitcast %struct.SV* %0 to %struct.GV* ; <%struct.GV*> [#uses=5]
br i1 undef, label %bb1563, label %bb1559
diff --git a/llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll b/llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll
index e83b3a7db59..045e89e1585 100644
--- a/llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll
+++ b/llvm/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll
@@ -5,7 +5,7 @@
define fastcc i32 @bsGetUInt32() nounwind ssp {
entry:
- %bsBuff.promoted44 = load i32* @bsBuff ; <i32> [#uses=1]
+ %bsBuff.promoted44 = load i32, i32* @bsBuff ; <i32> [#uses=1]
%0 = add i32 0, -8 ; <i32> [#uses=1]
%1 = lshr i32 %bsBuff.promoted44, %0 ; <i32> [#uses=1]
%2 = shl i32 %1, 8 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll b/llvm/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll
index 2080c0ae2e0..a70861dcf84 100644
--- a/llvm/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll
+++ b/llvm/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll
@@ -43,12 +43,12 @@ entry:
br i1 %tobool, label %lor.lhs.false, label %if.then
lor.lhs.false: ; preds = %entry
- %tmp1 = load i8* @g_3 ; <i8> [#uses=1]
+ %tmp1 = load i8, i8* @g_3 ; <i8> [#uses=1]
%tobool3 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tobool3, label %return, label %if.then
if.then: ; preds = %lor.lhs.false, %entry
- %tmp4 = load i8* @g_3 ; <i8> [#uses=1]
+ %tmp4 = load i8, i8* @g_3 ; <i8> [#uses=1]
%conv5 = sext i8 %tmp4 to i32 ; <i32> [#uses=1]
ret i32 %conv5
@@ -93,12 +93,12 @@ entry:
br i1 %tobool, label %lor.lhs.false, label %if.then
lor.lhs.false: ; preds = %entry
- %tmp1 = load i8* @g_3 ; <i8> [#uses=1]
+ %tmp1 = load i8, i8* @g_3 ; <i8> [#uses=1]
%tobool3 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tobool3, label %return, label %if.then
if.then: ; preds = %lor.lhs.false, %entry
- %tmp4 = load i8* @g_3 ; <i8> [#uses=1]
+ %tmp4 = load i8, i8* @g_3 ; <i8> [#uses=1]
%conv5 = sext i8 %tmp4 to i32 ; <i32> [#uses=1]
ret i32 %conv5
diff --git a/llvm/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll b/llvm/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll
index c3bad6b3b4b..f24c3f8171b 100644
--- a/llvm/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll
+++ b/llvm/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll
@@ -14,12 +14,12 @@ primitiveTextureFetchBlock: ; preds = %indexCheckBlock
%pointerArithmeticTmp = bitcast %0* %shaderExecutionStatePtr to i8* ; <i8*> [#uses=1]
%pointerArithmeticTmp1 = getelementptr i8, i8* %pointerArithmeticTmp, i64 1808 ; <i8*> [#uses=1]
%pointerArithmeticTmp2 = bitcast i8* %pointerArithmeticTmp1 to %1** ; <%1**> [#uses=1]
- %primitivePtr = load %1** %pointerArithmeticTmp2 ; <%1*> [#uses=1]
+ %primitivePtr = load %1*, %1** %pointerArithmeticTmp2 ; <%1*> [#uses=1]
%pointerArithmeticTmp3 = bitcast %1* %primitivePtr to i8* ; <i8*> [#uses=1]
%pointerArithmeticTmp4 = getelementptr i8, i8* %pointerArithmeticTmp3, i64 19408 ; <i8*> [#uses=1]
%pointerArithmeticTmp5 = bitcast i8* %pointerArithmeticTmp4 to %1** ; <%1**> [#uses=1]
%primitiveTexturePtr = getelementptr %1*, %1** %pointerArithmeticTmp5, i32 %index ; <%1**> [#uses=1]
- %primitiveTexturePtr6 = load %1** %primitiveTexturePtr ; <%1*> [#uses=2]
+ %primitiveTexturePtr6 = load %1*, %1** %primitiveTexturePtr ; <%1*> [#uses=2]
br label %textureCheckBlock
textureCheckBlock: ; preds = %primitiveTextureFetchBlock
@@ -31,7 +31,7 @@ rhoCalculateBlock: ; preds = %textureCheckBlock
%pointerArithmeticTmp7 = bitcast %1* %primitiveTexturePtr6 to i8* ; <i8*> [#uses=1]
%pointerArithmeticTmp8 = getelementptr i8, i8* %pointerArithmeticTmp7, i64 640 ; <i8*> [#uses=1]
%pointerArithmeticTmp9 = bitcast i8* %pointerArithmeticTmp8 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %dimensionsPtr = load <4 x float>* %pointerArithmeticTmp9, align 1 ; <<4 x float>> [#uses=2]
+ %dimensionsPtr = load <4 x float>, <4 x float>* %pointerArithmeticTmp9, align 1 ; <<4 x float>> [#uses=2]
%texDiffDX = fsub <4 x float> %texCoordDX, %texCoord ; <<4 x float>> [#uses=1]
%texDiffDY = fsub <4 x float> %texCoordDY, %texCoord ; <<4 x float>> [#uses=1]
%ddx = fmul <4 x float> %texDiffDX, %dimensionsPtr ; <<4 x float>> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll b/llvm/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll
index 5f6cf3b9e0b..5926ab4b5c7 100644
--- a/llvm/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll
+++ b/llvm/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll
@@ -5,7 +5,7 @@
define void @c() nounwind {
; CHECK: movl a+8, %eax
- %srcval1 = load i96* @a, align 4
+ %srcval1 = load i96, i96* @a, align 4
%sroa.store.elt2 = lshr i96 %srcval1, 64
%tmp = trunc i96 %sroa.store.elt2 to i64
; CHECK: movl %eax, b
diff --git a/llvm/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll b/llvm/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
index 410a42a4287..fac6a669f6e 100644
--- a/llvm/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
+++ b/llvm/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
@@ -41,18 +41,18 @@ bb3: ; preds = %bb2, %bb
br i1 undef, label %bb5, label %bb4
bb4: ; preds = %bb3
- %17 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
+ %17 = load volatile i32, i32* @uint8, align 4 ; <i32> [#uses=0]
br label %bb5
bb5: ; preds = %bb4, %bb3
- %18 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
+ %18 = load volatile i32, i32* @uint8, align 4 ; <i32> [#uses=0]
%19 = sext i8 undef to i16 ; <i16> [#uses=1]
%20 = tail call i32 @func_24(i16 zeroext %19, i8 signext 1) nounwind; <i32> [#uses=0]
br i1 undef, label %return, label %bb6.preheader
bb6.preheader: ; preds = %bb5
%21 = sext i8 %p_52 to i32 ; <i32> [#uses=1]
- %22 = load volatile i32* @uint8, align 4 ; <i32> [#uses=0]
+ %22 = load volatile i32, i32* @uint8, align 4 ; <i32> [#uses=0]
%23 = tail call i32 (...)* @safefuncts(i32 %21, i32 1) nounwind; <i32> [#uses=0]
unreachable
diff --git a/llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll b/llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
index 9c545dc2f4c..2ec49f486c9 100644
--- a/llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
+++ b/llvm/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
@@ -26,7 +26,7 @@ invcont: ; preds = %entry
invcont1: ; preds = %invcont
%6 = getelementptr inbounds %struct.ComplexType, %struct.ComplexType* %2, i64 0, i32 0 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 4 ; <i32> [#uses=1]
+ %7 = load i32, i32* %6, align 4 ; <i32> [#uses=1]
invoke void @booleanAndDataReply(i32 %7, i32 undef, i32 %requestID, i32 undef, i64 undef, i32 undef)
to label %invcont2 unwind label %lpad
diff --git a/llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll b/llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll
index 4aadd8adb46..78ce1cefcd4 100644
--- a/llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll
+++ b/llvm/test/CodeGen/X86/2009-09-10-SpillComments.ll
@@ -21,7 +21,7 @@ entry:
cond_next: ; preds = %entry
%tmp6 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 0 ; <i16*> [#uses=1]
- %tmp7 = load i16* %tmp6 ; <i16> [#uses=2]
+ %tmp7 = load i16, i16* %tmp6 ; <i16> [#uses=2]
%tmp78 = zext i16 %tmp7 to i32 ; <i32> [#uses=2]
%tmp10 = icmp eq i16 %tmp7, 54 ; <i1> [#uses=1]
br i1 %tmp10, label %cond_true13, label %cond_next32
@@ -29,9 +29,9 @@ cond_next: ; preds = %entry
cond_true13: ; preds = %cond_next
%tmp15 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3 ; <[1 x %struct..0anon]*> [#uses=1]
%tmp1718 = bitcast [1 x %struct..0anon]* %tmp15 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp19 = load %struct.rtx_def** %tmp1718 ; <%struct.rtx_def*> [#uses=1]
+ %tmp19 = load %struct.rtx_def*, %struct.rtx_def** %tmp1718 ; <%struct.rtx_def*> [#uses=1]
%tmp20 = getelementptr %struct.rtx_def, %struct.rtx_def* %tmp19, i32 0, i32 0 ; <i16*> [#uses=1]
- %tmp21 = load i16* %tmp20 ; <i16> [#uses=1]
+ %tmp21 = load i16, i16* %tmp20 ; <i16> [#uses=1]
%tmp22 = icmp eq i16 %tmp21, 57 ; <i1> [#uses=1]
br i1 %tmp22, label %cond_true25, label %cond_next32
@@ -41,9 +41,9 @@ cond_true25: ; preds = %cond_true13
cond_next32: ; preds = %cond_true13, %cond_next
%tmp34 = getelementptr [116 x i8*], [116 x i8*]* @rtx_format, i32 0, i32 %tmp78 ; <i8**> [#uses=1]
- %tmp35 = load i8** %tmp34, align 4 ; <i8*> [#uses=1]
+ %tmp35 = load i8*, i8** %tmp34, align 4 ; <i8*> [#uses=1]
%tmp37 = getelementptr [117 x i32], [117 x i32]* @rtx_length, i32 0, i32 %tmp78 ; <i32*> [#uses=1]
- %tmp38 = load i32* %tmp37, align 4 ; <i32> [#uses=1]
+ %tmp38 = load i32, i32* %tmp37, align 4 ; <i32> [#uses=1]
%i.011 = add i32 %tmp38, -1 ; <i32> [#uses=2]
%tmp12513 = icmp sgt i32 %i.011, -1 ; <i1> [#uses=1]
br i1 %tmp12513, label %bb, label %UnifiedReturnBlock
@@ -52,7 +52,7 @@ bb: ; preds = %bb123, %cond_next32
%indvar = phi i32 [ %indvar.next26, %bb123 ], [ 0, %cond_next32 ] ; <i32> [#uses=2]
%i.01.0 = sub i32 %i.011, %indvar ; <i32> [#uses=5]
%tmp42 = getelementptr i8, i8* %tmp35, i32 %i.01.0 ; <i8*> [#uses=2]
- %tmp43 = load i8* %tmp42 ; <i8> [#uses=1]
+ %tmp43 = load i8, i8* %tmp42 ; <i8> [#uses=1]
switch i8 %tmp43, label %bb123 [
i8 101, label %cond_true47
i8 69, label %bb105.preheader
@@ -61,38 +61,38 @@ bb: ; preds = %bb123, %cond_next32
cond_true47: ; preds = %bb
%tmp52 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
%tmp5354 = bitcast %struct..0anon* %tmp52 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp55 = load %struct.rtx_def** %tmp5354 ; <%struct.rtx_def*> [#uses=1]
+ %tmp55 = load %struct.rtx_def*, %struct.rtx_def** %tmp5354 ; <%struct.rtx_def*> [#uses=1]
%tmp58 = tail call %struct.rtx_def* @walk_fixup_memory_subreg( %struct.rtx_def* %tmp55, %struct.rtx_def* %insn ) nounwind ; <%struct.rtx_def*> [#uses=1]
%tmp62 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0, i32 0 ; <i32*> [#uses=1]
%tmp58.c = ptrtoint %struct.rtx_def* %tmp58 to i32 ; <i32> [#uses=1]
store i32 %tmp58.c, i32* %tmp62
- %tmp6816 = load i8* %tmp42 ; <i8> [#uses=1]
+ %tmp6816 = load i8, i8* %tmp42 ; <i8> [#uses=1]
%tmp6917 = icmp eq i8 %tmp6816, 69 ; <i1> [#uses=1]
br i1 %tmp6917, label %bb105.preheader, label %bb123
bb105.preheader: ; preds = %cond_true47, %bb
%tmp11020 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
%tmp11111221 = bitcast %struct..0anon* %tmp11020 to %struct.rtvec_def** ; <%struct.rtvec_def**> [#uses=3]
- %tmp11322 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
+ %tmp11322 = load %struct.rtvec_def*, %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
%tmp11423 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp11322, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp11524 = load i32* %tmp11423 ; <i32> [#uses=1]
+ %tmp11524 = load i32, i32* %tmp11423 ; <i32> [#uses=1]
%tmp11625 = icmp eq i32 %tmp11524, 0 ; <i1> [#uses=1]
br i1 %tmp11625, label %bb123, label %bb73
bb73: ; preds = %bb73, %bb105.preheader
%j.019 = phi i32 [ %tmp104, %bb73 ], [ 0, %bb105.preheader ] ; <i32> [#uses=3]
- %tmp81 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=2]
+ %tmp81 = load %struct.rtvec_def*, %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=2]
%tmp92 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019 ; <%struct..0anon*> [#uses=1]
%tmp9394 = bitcast %struct..0anon* %tmp92 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
- %tmp95 = load %struct.rtx_def** %tmp9394 ; <%struct.rtx_def*> [#uses=1]
+ %tmp95 = load %struct.rtx_def*, %struct.rtx_def** %tmp9394 ; <%struct.rtx_def*> [#uses=1]
%tmp98 = tail call %struct.rtx_def* @walk_fixup_memory_subreg( %struct.rtx_def* %tmp95, %struct.rtx_def* %insn ) nounwind ; <%struct.rtx_def*> [#uses=1]
%tmp101 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019, i32 0 ; <i32*> [#uses=1]
%tmp98.c = ptrtoint %struct.rtx_def* %tmp98 to i32 ; <i32> [#uses=1]
store i32 %tmp98.c, i32* %tmp101
%tmp104 = add i32 %j.019, 1 ; <i32> [#uses=2]
- %tmp113 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
+ %tmp113 = load %struct.rtvec_def*, %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
%tmp114 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp113, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp115 = load i32* %tmp114 ; <i32> [#uses=1]
+ %tmp115 = load i32, i32* %tmp114 ; <i32> [#uses=1]
%tmp116 = icmp ult i32 %tmp104, %tmp115 ; <i1> [#uses=1]
br i1 %tmp116, label %bb73, label %bb123
diff --git a/llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll b/llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll
index 18b5a179c9e..a18a30ad5cd 100644
--- a/llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2009-09-16-CoalescerBug.ll
@@ -32,7 +32,7 @@ lor.lhs.false: ; preds = %for.body
br i1 %cmp16, label %for.end41, label %for.cond17.preheader
for.cond17.preheader: ; preds = %lor.lhs.false
- %tmp24 = load i32* @boot_cpu_id ; <i32> [#uses=1]
+ %tmp24 = load i32, i32* @boot_cpu_id ; <i32> [#uses=1]
%shr26 = ashr i32 %tmp24, %and ; <i32> [#uses=1]
br label %for.body20
diff --git a/llvm/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll b/llvm/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll
index 809e9f7fd9b..840b3171f68 100644
--- a/llvm/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll
+++ b/llvm/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll
@@ -13,11 +13,11 @@ bb: ; preds = %bb, %entry
%sum.04 = phi i32 [ 0, %entry ], [ %10, %bb ] ; <i32> [#uses=1]
%1 = mul i32 %i.03, %As ; <i32> [#uses=1]
%2 = getelementptr i16, i16* %A, i32 %1 ; <i16*> [#uses=1]
- %3 = load i16* %2, align 2 ; <i16> [#uses=1]
+ %3 = load i16, i16* %2, align 2 ; <i16> [#uses=1]
%4 = sext i16 %3 to i32 ; <i32> [#uses=1]
%5 = mul i32 %i.03, %Bs ; <i32> [#uses=1]
%6 = getelementptr i16, i16* %B, i32 %5 ; <i16*> [#uses=1]
- %7 = load i16* %6, align 2 ; <i16> [#uses=1]
+ %7 = load i16, i16* %6, align 2 ; <i16> [#uses=1]
%8 = sext i16 %7 to i32 ; <i32> [#uses=1]
%9 = mul i32 %8, %4 ; <i32> [#uses=1]
%10 = add i32 %9, %sum.04 ; <i32> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll b/llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll
index 33f35f881e8..e469a600432 100644
--- a/llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2009-09-22-CoalescerBug.ll
@@ -54,7 +54,7 @@ bb9: ; preds = %quantum_new_qureg.e
unreachable
bb.i37: ; preds = %bb.i37, %bb11.thread
- %0 = load i64* undef, align 8 ; <i64> [#uses=1]
+ %0 = load i64, i64* undef, align 8 ; <i64> [#uses=1]
%1 = shl i64 %0, %.cast.i ; <i64> [#uses=1]
store i64 %1, i64* undef, align 8
br i1 undef, label %bb.i37, label %quantum_addscratch.exit
diff --git a/llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll b/llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
index 73133ad60d4..ec73f5a2c82 100644
--- a/llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
+++ b/llvm/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
@@ -8,12 +8,12 @@
define fastcc void @nodeOverwriteCell(%struct.Rtree* nocapture %pRtree, %struct.RtreeNode* nocapture %pNode, %struct.RtreeCell* nocapture %pCell, i32 %iCell) nounwind ssp {
entry:
- %0 = load i8** undef, align 8 ; <i8*> [#uses=2]
- %1 = load i32* undef, align 8 ; <i32> [#uses=1]
+ %0 = load i8*, i8** undef, align 8 ; <i8*> [#uses=2]
+ %1 = load i32, i32* undef, align 8 ; <i32> [#uses=1]
%2 = mul i32 %1, %iCell ; <i32> [#uses=1]
%3 = add nsw i32 %2, 4 ; <i32> [#uses=1]
%4 = sext i32 %3 to i64 ; <i64> [#uses=2]
- %5 = load i64* null, align 8 ; <i64> [#uses=2]
+ %5 = load i64, i64* null, align 8 ; <i64> [#uses=2]
%6 = lshr i64 %5, 48 ; <i64> [#uses=1]
%7 = trunc i64 %6 to i8 ; <i8> [#uses=1]
store i8 %7, i8* undef, align 1
@@ -36,12 +36,12 @@ bb: ; preds = %bb, %bb.nph
%tmp = shl i64 %indvar, 2 ; <i64> [#uses=1]
%tmp26 = add i64 %tmp, %tmp25 ; <i64> [#uses=1]
%scevgep27 = getelementptr i8, i8* %0, i64 %tmp26 ; <i8*> [#uses=1]
- %12 = load i32* %scevgep12, align 4 ; <i32> [#uses=1]
+ %12 = load i32, i32* %scevgep12, align 4 ; <i32> [#uses=1]
%13 = lshr i32 %12, 24 ; <i32> [#uses=1]
%14 = trunc i32 %13 to i8 ; <i8> [#uses=1]
store i8 %14, i8* undef, align 1
store i8 undef, i8* %scevgep27, align 1
- %15 = load i32* %11, align 4 ; <i32> [#uses=1]
+ %15 = load i32, i32* %11, align 4 ; <i32> [#uses=1]
%16 = shl i32 %15, 1 ; <i32> [#uses=1]
%17 = icmp sgt i32 %16, undef ; <i1> [#uses=1]
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll b/llvm/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
index 006a02a43b1..2d5052d49de 100644
--- a/llvm/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
+++ b/llvm/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
@@ -32,7 +32,7 @@ if.end.i: ; preds = %entry
br label %lt_init.exit
lt_init.exit: ; preds = %if.end.i, %if.then.i
- %3 = load i32* %retval.i ; <i32> [#uses=1]
+ %3 = load i32, i32* %retval.i ; <i32> [#uses=1]
call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
%4 = call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
%5 = sub i64 %4, %2 ; <i64> [#uses=1]
@@ -50,7 +50,7 @@ if.then: ; preds = %lt_init.exit
if.end: ; preds = %if.then, %lt_init.exit
store i32 0, i32* %retval
- %7 = load i32* %retval ; <i32> [#uses=1]
+ %7 = load i32, i32* %retval ; <i32> [#uses=1]
tail call void asm sideeffect "cpuid", "~{ax},~{bx},~{cx},~{dx},~{memory},~{dirflag},~{fpsr},~{flags}"() nounwind
%8 = tail call i64 @llvm.readcyclecounter() nounwind ; <i64> [#uses=1]
%9 = sub i64 %8, %0 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll b/llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll
index 4fbecfd7875..be18186463d 100644
--- a/llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll
+++ b/llvm/test/CodeGen/X86/2009-10-25-RewriterBug.ll
@@ -95,7 +95,7 @@ bb41.i: ; preds = %bb40.i
bb45.i: ; preds = %bb41.i
%33 = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %26, i64 0, i32 5, i64 undef, i64 %32, i64 undef ; <i64*> [#uses=1]
- %34 = load i64* %33, align 8 ; <i64> [#uses=1]
+ %34 = load i64, i64* %33, align 8 ; <i64> [#uses=1]
br label %bb47.i
bb47.i: ; preds = %bb45.i, %bb41.i
@@ -110,9 +110,9 @@ bb58.i: ; preds = %bb57.i
br label %bb60.i
bb60.i: ; preds = %bb58.i, %bb57.i
- %35 = load i64*** undef, align 8 ; <i64**> [#uses=1]
+ %35 = load i64**, i64*** undef, align 8 ; <i64**> [#uses=1]
%scevgep256.i = getelementptr i64*, i64** %35, i64 %indvar248.i ; <i64**> [#uses=1]
- %36 = load i64** %scevgep256.i, align 8 ; <i64*> [#uses=1]
+ %36 = load i64*, i64** %scevgep256.i, align 8 ; <i64*> [#uses=1]
%scevgep243.i = getelementptr i64, i64* %36, i64 undef ; <i64*> [#uses=1]
store i64 -1, i64* %scevgep243.i, align 8
br label %bb64.i
@@ -160,7 +160,7 @@ bb101.i: ; preds = %bb82.i
br label %bb102.i
bb102.i: ; preds = %bb101.i, %bb83.i
- %48 = load %struct.StorablePicture** %0, align 8 ; <%struct.StorablePicture*> [#uses=2]
+ %48 = load %struct.StorablePicture*, %struct.StorablePicture** %0, align 8 ; <%struct.StorablePicture*> [#uses=2]
br i1 undef, label %bb81.i, label %bb104.i
bb104.i: ; preds = %bb102.i, %bb80.i
diff --git a/llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll b/llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll
index 987b6e48771..80f4f74abd0 100644
--- a/llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll
+++ b/llvm/test/CodeGen/X86/2009-11-16-MachineLICM.ll
@@ -25,13 +25,13 @@ bb: ; preds = %bb, %bb.nph
%tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1]
%scevgep14 = getelementptr float, float* %x, i64 %tmp1318 ; <float*> [#uses=1]
%x_addr.03 = getelementptr float, float* %x, i64 %tmp9 ; <float*> [#uses=1]
- %1 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 0), align 16 ; <float> [#uses=1]
+ %1 = load float, float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 0), align 16 ; <float> [#uses=1]
store float %1, float* %x_addr.03, align 4
- %2 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 1), align 4 ; <float> [#uses=1]
+ %2 = load float, float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 1), align 4 ; <float> [#uses=1]
store float %2, float* %scevgep, align 4
- %3 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 2), align 8 ; <float> [#uses=1]
+ %3 = load float, float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 2), align 8 ; <float> [#uses=1]
store float %3, float* %scevgep12, align 4
- %4 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 3), align 4 ; <float> [#uses=1]
+ %4 = load float, float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 3), align 4 ; <float> [#uses=1]
store float %4, float* %scevgep14, align 4
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=2]
%exitcond = icmp eq i64 %indvar.next, %tmp ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll b/llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
index 396638fb1db..0bf13de6127 100644
--- a/llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
+++ b/llvm/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
@@ -48,7 +48,7 @@ lpad: ; preds = %bb1.i.fragment.cl,
%.SV10.phi807 = phi i8* [ undef, %bb1.i.fragment.cl ], [ undef, %bb1.i.fragment ], [ undef, %bb5 ] ; <i8*> [#uses=1]
%exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
cleanup
- %1 = load i8* %.SV10.phi807, align 8 ; <i8> [#uses=0]
+ %1 = load i8, i8* %.SV10.phi807, align 8 ; <i8> [#uses=0]
br i1 undef, label %meshBB81.bbcl.disp, label %bb13.fragment.bbcl.disp
bb.i1: ; preds = %bb.i.i.bbcl.disp
diff --git a/llvm/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll b/llvm/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
index 5c10c55ea3e..e191a8a3772 100644
--- a/llvm/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
+++ b/llvm/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
@@ -9,8 +9,8 @@ entry:
%b = alloca i32 ; <i32*> [#uses=2]
%a = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load i32* %b, align 4 ; <i32> [#uses=1]
- %1 = load i32* %b, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %b, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %b, align 4 ; <i32> [#uses=1]
%asmtmp = call i32 asm "$0 = foo ($1, $2)", "=&{ax},%0,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) nounwind ; <i32> [#uses=1]
store i32 %asmtmp, i32* %a
br label %return
@@ -30,8 +30,8 @@ entry:
%b = alloca i32 ; <i32*> [#uses=2]
%a = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = load i32* %b, align 4 ; <i32> [#uses=1]
- %1 = load i32* %b, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %b, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %b, align 4 ; <i32> [#uses=1]
%asmtmp = call i32 asm "$0 = foo ($1, $2)", "=&r,%0,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1) nounwind ; <i32> [#uses=1]
store i32 %asmtmp, i32* %a
br label %return
diff --git a/llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll b/llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll
index 9a84fe2743b..97d97872aa6 100644
--- a/llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll
+++ b/llvm/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll
@@ -30,13 +30,13 @@ define void @leaf() nounwind {
br label %"@CFE_debug_label_0"
"@CFE_debug_label_0": ; preds = %"file foo2.c, line 14, bb2"
- %r = load %test** bitcast ([1 x i64]* @ptr to %test**), align 8 ; <%test*> [#uses=1]
+ %r = load %test*, %test** bitcast ([1 x i64]* @ptr to %test**), align 8 ; <%test*> [#uses=1]
store %test* %r, %test** %p, align 8
br label %"@CFE_debug_label_2"
"@CFE_debug_label_2": ; preds = %"@CFE_debug_label_0"
- %r1 = load %link** bitcast ([1 x i64]* @link_ptr to %link**), align 8 ; <%link*> [#uses=1]
- %r2 = load %test** %p, align 8 ; <%test*> [#uses=1]
+ %r1 = load %link*, %link** bitcast ([1 x i64]* @link_ptr to %link**), align 8 ; <%link*> [#uses=1]
+ %r2 = load %test*, %test** %p, align 8 ; <%test*> [#uses=1]
%r3 = ptrtoint %test* %r2 to i64 ; <i64> [#uses=1]
%r4 = inttoptr i64 %r3 to %link** ; <%link**> [#uses=1]
%r5 = getelementptr %link*, %link** %r4, i64 1 ; <%link**> [#uses=1]
@@ -44,7 +44,7 @@ define void @leaf() nounwind {
br label %"@CFE_debug_label_3"
"@CFE_debug_label_3": ; preds = %"@CFE_debug_label_2"
- %r6 = load %test** %p, align 8 ; <%test*> [#uses=1]
+ %r6 = load %test*, %test** %p, align 8 ; <%test*> [#uses=1]
%r7 = ptrtoint %test* %r6 to i64 ; <i64> [#uses=1]
%r8 = inttoptr i64 %r7 to %link* ; <%link*> [#uses=1]
%r9 = getelementptr %link, %link* %r8, i64 1 ; <%link*> [#uses=1]
@@ -52,7 +52,7 @@ define void @leaf() nounwind {
br label %"@CFE_debug_label_4"
"@CFE_debug_label_4": ; preds = %"@CFE_debug_label_3"
- %r10 = load %test** %p, align 8 ; <%test*> [#uses=1]
+ %r10 = load %test*, %test** %p, align 8 ; <%test*> [#uses=1]
%r11 = ptrtoint %test* %r10 to i64 ; <i64> [#uses=1]
%r12 = inttoptr i64 %r11 to i32* ; <i32*> [#uses=1]
store i32 1, i32* %r12, align 4
diff --git a/llvm/test/CodeGen/X86/20090313-signext.ll b/llvm/test/CodeGen/X86/20090313-signext.ll
index b8effa67735..3ea13164112 100644
--- a/llvm/test/CodeGen/X86/20090313-signext.ll
+++ b/llvm/test/CodeGen/X86/20090313-signext.ll
@@ -10,7 +10,7 @@ entry:
%0 = tail call signext i16 @h() nounwind
%1 = sext i16 %0 to i32
tail call void @g(i32 %1) nounwind
- %2 = load i16* @x, align 2
+ %2 = load i16, i16* @x, align 2
ret i16 %2
}
diff --git a/llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll b/llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll
index 106848184d3..3ecf845f093 100644
--- a/llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll
+++ b/llvm/test/CodeGen/X86/2010-01-13-OptExtBug.ll
@@ -8,14 +8,14 @@ entry:
%call = tail call i8* @_Z15uprv_malloc_4_2v()
%0 = bitcast i8* %call to double*
%tmp = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 3
- %tmp2 = load i16* %tmp
+ %tmp2 = load i16, i16* %tmp
%tmp525 = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 0
- %tmp626 = load i16* %tmp525
+ %tmp626 = load i16, i16* %tmp525
%cmp27 = icmp slt i16 %tmp2, %tmp626
br i1 %cmp27, label %bb.nph, label %for.end
for.cond:
- %tmp6 = load i16* %tmp5
+ %tmp6 = load i16, i16* %tmp5
%cmp = icmp slt i16 %inc, %tmp6
%indvar.next = add i32 %indvar, 1
br i1 %cmp, label %for.body, label %for.end
@@ -34,9 +34,9 @@ for.body:
%tmp30 = add i32 %indvar, %tmp29
%tmp33 = add i32 %indvar, %tmp32
%inc = trunc i32 %tmp33 to i16
- %tmp11 = load i8** %tmp10
+ %tmp11 = load i8*, i8** %tmp10
%arrayidx = getelementptr i8, i8* %tmp11, i32 %tmp30
- %tmp12 = load i8* %arrayidx
+ %tmp12 = load i8, i8* %arrayidx
br label %for.cond
for.end:
diff --git a/llvm/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll b/llvm/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll
index 43787860f90..6aba39e04bc 100644
--- a/llvm/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll
+++ b/llvm/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll
@@ -11,12 +11,12 @@ define void @numvec_(i32* noalias %ncelet, i32* noalias %ncel, i32* noalias %nfa
"file bug754399.f90, line 184, in inner vector loop at depth 0, bb164": ; preds = %"file bug754399.f90, line 184, in inner vector loop at depth 0, bb164", %"file bug754399.f90, line 1, bb1"
%tmp641 = add i64 0, 48 ; <i64> [#uses=1]
%tmp641642 = inttoptr i64 %tmp641 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
- %r1258 = load <4 x i32>* %tmp641642, align 4 ; <<4 x i32>> [#uses=2]
+ %r1258 = load <4 x i32>, <4 x i32>* %tmp641642, align 4 ; <<4 x i32>> [#uses=2]
%r1295 = extractelement <4 x i32> %r1258, i32 3 ; <i32> [#uses=1]
%r1296 = sext i32 %r1295 to i64 ; <i64> [#uses=1]
%r1297 = add i64 %r1296, -1 ; <i64> [#uses=1]
%r1298183 = getelementptr [0 x i32], [0 x i32]* %ismbs, i64 0, i64 %r1297 ; <i32*> [#uses=1]
- %r1298184 = load i32* %r1298183, align 4 ; <i32> [#uses=1]
+ %r1298184 = load i32, i32* %r1298183, align 4 ; <i32> [#uses=1]
%r1301 = extractelement <4 x i32> %r1037, i32 3 ; <i32> [#uses=1]
%r1302 = mul i32 %r1298184, %r1301 ; <i32> [#uses=1]
%r1306 = insertelement <4 x i32> zeroinitializer, i32 %r1302, i32 3 ; <<4 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2010-01-18-DbgValue.ll b/llvm/test/CodeGen/X86/2010-01-18-DbgValue.ll
index f95499829eb..b49b7430acf 100644
--- a/llvm/test/CodeGen/X86/2010-01-18-DbgValue.ll
+++ b/llvm/test/CodeGen/X86/2010-01-18-DbgValue.ll
@@ -15,14 +15,14 @@ entry:
call void @llvm.dbg.declare(metadata %struct.Rect* %my_r0, metadata !0, metadata !{!"0x102"}), !dbg !15
%1 = getelementptr inbounds %struct.Rect, %struct.Rect* %my_r0, i32 0, i32 0, !dbg !16 ; <%struct.Pt*> [#uses=1]
%2 = getelementptr inbounds %struct.Pt, %struct.Pt* %1, i32 0, i32 0, !dbg !16 ; <double*> [#uses=1]
- %3 = load double* %2, align 8, !dbg !16 ; <double> [#uses=1]
+ %3 = load double, double* %2, align 8, !dbg !16 ; <double> [#uses=1]
store double %3, double* %0, align 8, !dbg !16
- %4 = load double* %0, align 8, !dbg !16 ; <double> [#uses=1]
+ %4 = load double, double* %0, align 8, !dbg !16 ; <double> [#uses=1]
store double %4, double* %retval, align 8, !dbg !16
br label %return, !dbg !16
return: ; preds = %entry
- %retval1 = load double* %retval, !dbg !16 ; <double> [#uses=1]
+ %retval1 = load double, double* %retval, !dbg !16 ; <double> [#uses=1]
ret double %retval1, !dbg !16
}
diff --git a/llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll b/llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll
index ec24e73c34a..def8dd31978 100644
--- a/llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll
+++ b/llvm/test/CodeGen/X86/2010-01-19-OptExtBug.ll
@@ -21,7 +21,7 @@ bb7: ; preds = %bb6
unreachable
bb9: ; preds = %bb6
- %0 = load i8* undef, align 1 ; <i8> [#uses=3]
+ %0 = load i8, i8* undef, align 1 ; <i8> [#uses=3]
br i1 undef, label %bb12, label %bb10
bb10: ; preds = %bb9
diff --git a/llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll b/llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll
index 9b1430f2249..51686ea3f77 100644
--- a/llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll
+++ b/llvm/test/CodeGen/X86/2010-02-04-SchedulerBug.ll
@@ -6,13 +6,13 @@
define void @t(i32 %cNum, i64 %max) nounwind optsize ssp noimplicitfloat {
entry:
- %0 = load %struct.b_t** null, align 4 ; <%struct.b_t*> [#uses=1]
+ %0 = load %struct.b_t*, %struct.b_t** null, align 4 ; <%struct.b_t*> [#uses=1]
%1 = getelementptr inbounds %struct.b_t, %struct.b_t* %0, i32 %cNum, i32 5 ; <i64*> [#uses=1]
- %2 = load i64* %1, align 4 ; <i64> [#uses=1]
+ %2 = load i64, i64* %1, align 4 ; <i64> [#uses=1]
%3 = icmp ult i64 %2, %max ; <i1> [#uses=1]
%4 = getelementptr inbounds %struct.a_t, %struct.a_t* null, i32 0, i32 7 ; <i64**> [#uses=1]
- %5 = load i64** %4, align 4 ; <i64*> [#uses=0]
- %6 = load i64* null, align 4 ; <i64> [#uses=1]
+ %5 = load i64*, i64** %4, align 4 ; <i64*> [#uses=0]
+ %6 = load i64, i64* null, align 4 ; <i64> [#uses=1]
br i1 %3, label %bb2, label %bb
bb: ; preds = %entry
diff --git a/llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll b/llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll
index f9cca8c70c7..5d74db1160c 100644
--- a/llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll
+++ b/llvm/test/CodeGen/X86/2010-02-11-NonTemporal.ll
@@ -11,8 +11,8 @@ define void @sub_(i32* noalias %n) {
%i = alloca i32, align 4
%"$LCS_0" = alloca i64, align 8
%"$LCS_S2" = alloca <2 x double>, align 16
- %r9 = load <2 x double>* %"$LCS_S2", align 8
- %r10 = load i64* %"$LCS_0", align 8
+ %r9 = load <2 x double>, <2 x double>* %"$LCS_S2", align 8
+ %r10 = load i64, i64* %"$LCS_0", align 8
%r11 = inttoptr i64 %r10 to <2 x double>*
store <2 x double> %r9, <2 x double>* %r11, align 16, !nontemporal !0
br label %"file movnt.f90, line 18, bb5"
diff --git a/llvm/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll b/llvm/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
index 739a27a3e17..193f8cfcd52 100644
--- a/llvm/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
+++ b/llvm/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
@@ -228,7 +228,7 @@ entry:
unreachable
"67": ; preds = %"65"
- %1 = load i32* undef, align 4 ; <i32> [#uses=0]
+ %1 = load i32, i32* undef, align 4 ; <i32> [#uses=0]
br label %"100"
"82": ; preds = %"61", %"60", %"59"
diff --git a/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll b/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
index d39023dc725..c3b12edf4ce 100644
--- a/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
+++ b/llvm/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
@@ -23,23 +23,23 @@ declare fastcc void @l298(i32 %r10, i32 %r9, i32 %r4) noreturn nounwind
define fastcc void @l186(%tupl* %r1) noreturn nounwind {
entry:
%ptr1 = getelementptr %tupl, %tupl* %r1, i32 0, i32 0
- %r2 = load i32* %ptr1
+ %r2 = load i32, i32* %ptr1
%ptr3 = getelementptr %tupl, %tupl* %r1, i32 0, i32 1
- %r3 = load i32* %ptr3
+ %r3 = load i32, i32* %ptr3
%ptr5 = getelementptr %tupl, %tupl* %r1, i32 0, i32 2
- %r4 = load i32* %ptr5
+ %r4 = load i32, i32* %ptr5
%ptr7 = getelementptr %tupl, %tupl* %r1, i32 0, i32 3
- %r5 = load i32* %ptr7
+ %r5 = load i32, i32* %ptr7
%ptr9 = getelementptr %tupl, %tupl* %r1, i32 0, i32 4
- %r6 = load i32* %ptr9
+ %r6 = load i32, i32* %ptr9
%ptr11 = getelementptr %tupl, %tupl* %r1, i32 0, i32 5
- %r7 = load i32* %ptr11
+ %r7 = load i32, i32* %ptr11
%ptr13 = getelementptr %tupl, %tupl* %r1, i32 0, i32 6
- %r8 = load i32* %ptr13
+ %r8 = load i32, i32* %ptr13
%ptr15 = getelementptr %tupl, %tupl* %r1, i32 0, i32 7
- %r9 = load i32* %ptr15
+ %r9 = load i32, i32* %ptr15
%ptr17 = getelementptr %tupl, %tupl* %r1, i32 0, i32 8
- %r10 = load i32* %ptr17
+ %r10 = load i32, i32* %ptr17
%cond = icmp eq i32 %r10, 3
br i1 %cond, label %true, label %false
diff --git a/llvm/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll b/llvm/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
index 4a26ba088e5..4e4e006fd62 100644
--- a/llvm/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
+++ b/llvm/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
@@ -16,7 +16,7 @@ entry:
br i1 undef, label %for.end, label %for.body
for.body: ; preds = %if.end40, %entry
- %tmp6 = load i8* undef, align 2 ; <i8> [#uses=3]
+ %tmp6 = load i8, i8* undef, align 2 ; <i8> [#uses=3]
%conv11 = sext i8 %tmp6 to i64 ; <i64> [#uses=1]
%cmp15 = icmp slt i64 %conv11, undef ; <i1> [#uses=1]
br i1 %cmp15, label %if.end, label %if.then
@@ -29,7 +29,7 @@ if.then: ; preds = %for.body
if.end: ; preds = %if.then, %for.body
%index.0 = phi i8 [ 0, %if.then ], [ %tmp6, %for.body ] ; <i8> [#uses=1]
store i8 %index.0, i8* undef
- %tmp24 = load i8* undef ; <i8> [#uses=2]
+ %tmp24 = load i8, i8* undef ; <i8> [#uses=2]
br i1 undef, label %if.end40, label %if.then36
if.then36: ; preds = %if.end
diff --git a/llvm/test/CodeGen/X86/2010-03-17-ISelBug.ll b/llvm/test/CodeGen/X86/2010-03-17-ISelBug.ll
index febf1dbdd28..e1d3c10a80e 100644
--- a/llvm/test/CodeGen/X86/2010-03-17-ISelBug.ll
+++ b/llvm/test/CodeGen/X86/2010-03-17-ISelBug.ll
@@ -9,7 +9,7 @@
define i32* @t() align 2 nounwind {
entry:
%operation = alloca %struct.PPOperation, align 8 ; <%struct.PPOperation*> [#uses=2]
- %0 = load i32*** null, align 4 ; [#uses=1]
+ %0 = load i32**, i32*** null, align 4 ; [#uses=1]
%1 = ptrtoint i32** %0 to i32 ; <i32> [#uses=1]
%2 = sub nsw i32 %1, undef ; <i32> [#uses=2]
br i1 false, label %bb20, label %bb.nph380
diff --git a/llvm/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll b/llvm/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
index 864ebf120f6..2ba4d9aaded 100644
--- a/llvm/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
+++ b/llvm/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
@@ -19,7 +19,7 @@ invcont64: ; preds = %bb58
br i1 undef, label %invcont65, label %bb.i.i
bb.i.i: ; preds = %invcont64
- %1 = load <4 x float>* undef, align 16 ; <<4 x float>> [#uses=5]
+ %1 = load <4 x float>, <4 x float>* undef, align 16 ; <<4 x float>> [#uses=5]
br i1 undef, label %bb.nph.i.i, label %invcont65
bb.nph.i.i: ; preds = %bb.i.i
diff --git a/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll b/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
index 6d25317697e..5adf99e3e47 100644
--- a/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
@@ -15,7 +15,7 @@ entry:
; CHECK: addq $12, %rsi
%BitValueArray = alloca [32 x i32], align 4
%tmp2 = getelementptr inbounds %struct.F, %struct.F* %this, i64 0, i32 0
- %tmp3 = load %struct.FC** %tmp2, align 8
+ %tmp3 = load %struct.FC*, %struct.FC** %tmp2, align 8
%tmp4 = getelementptr inbounds %struct.FC, %struct.FC* %tmp3, i64 0, i32 1, i64 0
%tmp5 = bitcast [32 x i32]* %BitValueArray to i8*
%tmp6 = bitcast i32* %tmp4 to i8*
diff --git a/llvm/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll b/llvm/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll
index fadbd219198..6c8dbbe5eff 100644
--- a/llvm/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll
+++ b/llvm/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll
@@ -12,7 +12,7 @@ entry:
]
if.then: ; preds = %entry, %entry
- %tmp69 = load float* null, align 4 ; <float> [#uses=1]
+ %tmp69 = load float, float* null, align 4 ; <float> [#uses=1]
%cmp19 = icmp eq %1* null, %scroller ; <i1> [#uses=2]
%cond = select i1 %cmp19, float %tmp69, float 0.000000e+00 ; <float> [#uses=1]
%call36 = call i64 bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i64 (i8*, i8*)*)(i8* undef, i8* undef) nounwind optsize ; <i64> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll b/llvm/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
index 08ee91ba30d..aeb2f2c398d 100644
--- a/llvm/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
+++ b/llvm/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
@@ -47,7 +47,7 @@ try.handler: ; preds = %entry
match: ; preds = %try.handler
%4 = call i8* @__cxa_begin_catch(i8* %exc1) ; <i8*> [#uses=1]
%5 = bitcast i8* %4 to i32* ; <i32*> [#uses=1]
- %6 = load i32* %5 ; <i32> [#uses=1]
+ %6 = load i32, i32* %5 ; <i32> [#uses=1]
store i32 %6, i32* %0
%call = invoke i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), %struct.S* %s2)
to label %invoke.cont2 unwind label %match.handler ; <i32> [#uses=0]
@@ -80,7 +80,7 @@ invoke.cont5: ; preds = %match.end
br label %cleanup.switch
cleanup.switch: ; preds = %invoke.cont5
- %tmp = load i32* %cleanup.dst ; <i32> [#uses=1]
+ %tmp = load i32, i32* %cleanup.dst ; <i32> [#uses=1]
switch i32 %tmp, label %cleanup.end [
i32 1, label %cleanup.pad
i32 2, label %cleanup.pad4
@@ -99,7 +99,7 @@ finally: ; preds = %catch.next, %cleanu
br label %cleanup.switch9
cleanup.switch9: ; preds = %finally
- %tmp8 = load i32* %cleanup.dst7 ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %cleanup.dst7 ; <i32> [#uses=1]
switch i32 %tmp8, label %cleanup.end10 [
i32 1, label %finally.end
i32 2, label %finally.throw
@@ -109,7 +109,7 @@ cleanup.end10: ; preds = %cleanup.switch9
br label %finally.end
finally.throw: ; preds = %cleanup.switch9
- %8 = load i8** %_rethrow ; <i8*> [#uses=1]
+ %8 = load i8*, i8** %_rethrow ; <i8*> [#uses=1]
call void @_Unwind_Resume_or_Rethrow(i8* %8)
unreachable
@@ -117,9 +117,9 @@ finally.end: ; preds = %cleanup.end10, %cle
%tmp11 = getelementptr inbounds %struct.S, %struct.S* %s1, i32 0, i32 0 ; <[2 x i8*]*> [#uses=1]
%arraydecay = getelementptr inbounds [2 x i8*], [2 x i8*]* %tmp11, i32 0, i32 0 ; <i8**> [#uses=1]
%arrayidx = getelementptr inbounds i8*, i8** %arraydecay, i32 1 ; <i8**> [#uses=1]
- %tmp12 = load i8** %arrayidx ; <i8*> [#uses=1]
+ %tmp12 = load i8*, i8** %arrayidx ; <i8*> [#uses=1]
store i8* %tmp12, i8** %retval
- %9 = load i8** %retval ; <i8*> [#uses=1]
+ %9 = load i8*, i8** %retval ; <i8*> [#uses=1]
ret i8* %9
}
diff --git a/llvm/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll b/llvm/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
index 86be390b822..5a9c0210934 100644
--- a/llvm/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
+++ b/llvm/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
@@ -23,9 +23,9 @@ entry:
store i8* %asmresult, i8** %ret
store i8* %asmresult1, i8** %p
store i32 %asmresult2, i32* %t
- %tmp = load i8** %ret ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** %ret ; <i8*> [#uses=1]
store i8* %tmp, i8** %retval
- %1 = load i8** %retval ; <i8*> [#uses=1]
+ %1 = load i8*, i8** %retval ; <i8*> [#uses=1]
ret i8* %1
}
diff --git a/llvm/test/CodeGen/X86/2010-05-07-ldconvert.ll b/llvm/test/CodeGen/X86/2010-05-07-ldconvert.ll
index 0ba6a8fd6d7..a0c3c95ef60 100644
--- a/llvm/test/CodeGen/X86/2010-05-07-ldconvert.ll
+++ b/llvm/test/CodeGen/X86/2010-05-07-ldconvert.ll
@@ -9,7 +9,7 @@ entry:
%tmp = call x86_fp80 @llvm.powi.f80(x86_fp80 0xK3FFF8000000000000000, i32 -64) ; <x86_fp80> [#uses=1]
%conv = fptosi x86_fp80 %tmp to i32 ; <i32> [#uses=1]
store i32 %conv, i32* %r
- %tmp1 = load i32* %r ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %r ; <i32> [#uses=1]
%tobool = icmp ne i32 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tobool, label %if.then, label %if.end
@@ -18,7 +18,7 @@ if.then: ; preds = %entry
br label %if.end
if.end: ; preds = %if.then, %entry
- %0 = load i32* %retval ; <i32> [#uses=1]
+ %0 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll b/llvm/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll
index e719da304c5..a6fe310bffa 100644
--- a/llvm/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll
+++ b/llvm/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll
@@ -4,7 +4,7 @@
define i32 @CXB30130(i32 %num1, i16* nocapture %num2, float* nocapture %num3, double* nocapture %num4) nounwind ssp {
entry:
- %0 = load i16* %num2, align 2 ; <i16> [#uses=2]
+ %0 = load i16, i16* %num2, align 2 ; <i16> [#uses=2]
%1 = mul nsw i16 %0, %0 ; <i16> [#uses=1]
store i16 %1, i16* %num2, align 2
ret i32 undef
diff --git a/llvm/test/CodeGen/X86/2010-05-16-nosseconversion.ll b/llvm/test/CodeGen/X86/2010-05-16-nosseconversion.ll
index 889575cea3a..2d3f0eb1c5f 100644
--- a/llvm/test/CodeGen/X86/2010-05-16-nosseconversion.ll
+++ b/llvm/test/CodeGen/X86/2010-05-16-nosseconversion.ll
@@ -5,7 +5,7 @@
define i32 @foo() nounwind readonly ssp {
entry:
- %0 = load i64* @x, align 8 ; <i64> [#uses=1]
+ %0 = load i64, i64* @x, align 8 ; <i64> [#uses=1]
%1 = uitofp i64 %0 to double ; <double> [#uses=1]
%2 = fptosi double %1 to i32 ; <i32> [#uses=1]
ret i32 %2
diff --git a/llvm/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll b/llvm/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
index 8fd33680f1c..9abccf83fa5 100644
--- a/llvm/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
+++ b/llvm/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
@@ -11,7 +11,7 @@ define i8* @bar(%struct.a* %myvar) nounwind optsize noinline ssp {
entry:
tail call void @llvm.dbg.value(metadata %struct.a* %myvar, i64 0, metadata !8, metadata !{!"0x102"})
%0 = getelementptr inbounds %struct.a, %struct.a* %myvar, i64 0, i32 0, !dbg !28 ; <i32*> [#uses=1]
- %1 = load i32* %0, align 8, !dbg !28 ; <i32> [#uses=1]
+ %1 = load i32, i32* %0, align 8, !dbg !28 ; <i32> [#uses=1]
tail call void @foo(i32 %1) nounwind optsize noinline ssp, !dbg !28
%2 = bitcast %struct.a* %myvar to i8*, !dbg !30 ; <i8*> [#uses=1]
ret i8* %2, !dbg !30
diff --git a/llvm/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll b/llvm/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll
index 38dcb806cc9..ac18195dc5a 100644
--- a/llvm/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll
+++ b/llvm/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll
@@ -7,7 +7,7 @@ module asm "\09.ident\09\22GCC: (GNU) 4.5.1 20100510 (prerelease) LLVM: 104604:1
define i32 @f2(double %x) nounwind {
entry:
- %0 = load double* undef, align 64 ; <double> [#uses=1]
+ %0 = load double, double* undef, align 64 ; <double> [#uses=1]
%1 = fptoui double %0 to i16 ; <i16> [#uses=1]
%2 = zext i16 %1 to i32 ; <i32> [#uses=1]
%3 = add nsw i32 0, %2 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll b/llvm/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
index b5679e66569..5a4b389acb3 100644
--- a/llvm/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
+++ b/llvm/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
@@ -2,6 +2,6 @@
; CHECK: %fs:
define i32 @test1(i32 addrspace(257)* %arg) nounwind {
- %tmp = load i32 addrspace(257)* %arg
+ %tmp = load i32, i32 addrspace(257)* %arg
ret i32 %tmp
}
diff --git a/llvm/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll b/llvm/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
index 74a7610e659..5bf7397ab73 100644
--- a/llvm/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
+++ b/llvm/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
@@ -10,17 +10,17 @@ entry:
%retval = alloca i32, align 4 ; <i32*> [#uses=3]
%v = alloca i32, align 4 ; <i32*> [#uses=3]
store i32 0, i32* %retval
- %zero = load i32* %retval
+ %zero = load i32, i32* %retval
; The earlyclobber register EC0 should not be spilled before the inline asm.
; Yes, check-not can refer to FileCheck variables defined in the future.
; CHECK-NOT: [[EC0]]{{.*}}(%rsp)
; CHECK: bsr {{[^,]*}}, [[EC0:%...]]
%0 = call i32 asm "bsr $1, $0\0A\09cmovz $2, $0", "=&r,ro,r,~{cc},~{dirflag},~{fpsr},~{flags}"(i32 %zero, i32 -1) nounwind, !srcloc !0 ; <i32> [#uses=1]
store i32 %0, i32* %v
- %tmp = load i32* %v ; <i32> [#uses=1]
+ %tmp = load i32, i32* %v ; <i32> [#uses=1]
%call1 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([23 x i8]* @.str, i32 0, i32 0), i32 %tmp) ; <i32> [#uses=0]
store i32 0, i32* %retval
- %1 = load i32* %retval ; <i32> [#uses=1]
+ %1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll b/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
index ad5931ebf50..ffb51572a30 100644
--- a/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
+++ b/llvm/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
@@ -7,7 +7,7 @@
define i32 @func(%struct.type* %s) nounwind optsize ssp {
entry:
%tmp1 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 1
- %tmp2 = load i32* %tmp1, align 8
+ %tmp2 = load i32, i32* %tmp1, align 8
%tmp3 = icmp eq i32 %tmp2, 10
%tmp4 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 40
br i1 %tmp3, label %bb, label %entry.bb1_crit_edge
diff --git a/llvm/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll b/llvm/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
index 3470a06a543..8f5f083209b 100644
--- a/llvm/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
+++ b/llvm/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
@@ -6,10 +6,10 @@ define void @_SEH2FrameHandler() nounwind {
entry:
%target.addr.i = alloca i8*, align 4 ; <i8**> [#uses=2]
%frame = alloca %struct.__SEH2Frame*, align 4 ; <%struct.__SEH2Frame**> [#uses=1]
- %tmp = load %struct.__SEH2Frame** %frame ; <%struct.__SEH2Frame*> [#uses=1]
+ %tmp = load %struct.__SEH2Frame*, %struct.__SEH2Frame** %frame ; <%struct.__SEH2Frame*> [#uses=1]
%conv = bitcast %struct.__SEH2Frame* %tmp to i8* ; <i8*> [#uses=1]
store i8* %conv, i8** %target.addr.i
- %tmp.i = load i8** %target.addr.i ; <i8*> [#uses=1]
+ %tmp.i = load i8*, i8** %target.addr.i ; <i8*> [#uses=1]
call void asm sideeffect "push %ebp\0Apush $$0\0Apush $$0\0Apush $$Return${:uid}\0Apush $0\0Acall ${1:c}\0AReturn${:uid}: pop %ebp\0A", "imr,imr,~{ax},~{bx},~{cx},~{dx},~{si},~{di},~{flags},~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %tmp.i, void (...)* @RtlUnwind) nounwind, !srcloc !0
ret void
}
diff --git a/llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll b/llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
index a0798ae10d7..023c77aedd4 100644
--- a/llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
+++ b/llvm/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
@@ -6,6 +6,6 @@ entry:
; CHECK: GCROOT %eax
%_r = alloca i32, align 4 ; <i32*> [#uses=2]
call void asm "/* GCROOT $0 */", "=*imr,0,~{dirflag},~{fpsr},~{flags}"(i32* %_r, i32 4) nounwind
- %0 = load i32* %_r, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %_r, align 4 ; <i32> [#uses=1]
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/2010-07-02-UnfoldBug.ll b/llvm/test/CodeGen/X86/2010-07-02-UnfoldBug.ll
index 79219dcfe60..954f25f6e16 100644
--- a/llvm/test/CodeGen/X86/2010-07-02-UnfoldBug.ll
+++ b/llvm/test/CodeGen/X86/2010-07-02-UnfoldBug.ll
@@ -61,7 +61,7 @@ bb22: ; preds = %bb24.preheader
br i1 undef, label %bb2.i.i, label %bb.i.i49
bb.i.i49: ; preds = %bb22
- %0 = load float* undef, align 4 ; <float> [#uses=1]
+ %0 = load float, float* undef, align 4 ; <float> [#uses=1]
%1 = insertelement <4 x float> undef, float %0, i32 0 ; <<4 x float>> [#uses=1]
%2 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> <float 1.000000e+00, float undef, float undef, float undef>, <4 x float> %1) nounwind readnone ; <<4 x float>> [#uses=1]
%3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %2, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>) nounwind readnone ; <<4 x float>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll b/llvm/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll
index e96da94f5a3..33a89a04c7d 100644
--- a/llvm/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll
+++ b/llvm/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-apple-darwin10.0.0"
define void @_ZN7QVectorIdE4fillERKdi(double* nocapture %t) nounwind ssp align 2 {
entry:
- %tmp2 = load double* %t ; <double> [#uses=1]
+ %tmp2 = load double, double* %t ; <double> [#uses=1]
br i1 undef, label %if.end, label %if.then
if.then: ; preds = %entry
diff --git a/llvm/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll b/llvm/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
index 12a8274fb56..cc6354e0795 100644
--- a/llvm/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
+++ b/llvm/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
@@ -7,7 +7,7 @@
define i32 @main() nounwind {
entry:
- %tmp = load i64* @g_16 ; <i64> [#uses=1]
+ %tmp = load i64, i64* @g_16 ; <i64> [#uses=1]
%not.lnot = icmp ne i64 %tmp, 0 ; <i1> [#uses=1]
%conv = sext i1 %not.lnot to i64 ; <i64> [#uses=1]
%and = and i64 %conv, 150 ; <i64> [#uses=1]
@@ -20,7 +20,7 @@ entry:
; CHECK-NEXT: jle
entry.if.end_crit_edge: ; preds = %entry
- %tmp4.pre = load i32* @g_38 ; <i32> [#uses=1]
+ %tmp4.pre = load i32, i32* @g_38 ; <i32> [#uses=1]
br label %if.end
if.then: ; preds = %entry
diff --git a/llvm/test/CodeGen/X86/2010-08-04-StackVariable.ll b/llvm/test/CodeGen/X86/2010-08-04-StackVariable.ll
index aa2906180bd..541512d4a8e 100644
--- a/llvm/test/CodeGen/X86/2010-08-04-StackVariable.ll
+++ b/llvm/test/CodeGen/X86/2010-08-04-StackVariable.ll
@@ -13,13 +13,13 @@ entry:
bb: ; preds = %entry
%1 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !29 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 8, !dbg !29 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 8, !dbg !29 ; <i32> [#uses=1]
%3 = add i32 %2, %i, !dbg !29 ; <i32> [#uses=1]
br label %bb2, !dbg !29
bb1: ; preds = %entry
%4 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !30 ; <i32*> [#uses=1]
- %5 = load i32* %4, align 8, !dbg !30 ; <i32> [#uses=1]
+ %5 = load i32, i32* %4, align 8, !dbg !30 ; <i32> [#uses=1]
%6 = sub i32 %5, 1, !dbg !30 ; <i32> [#uses=1]
br label %bb2, !dbg !30
@@ -58,11 +58,11 @@ entry:
store i32 1, i32* %1, align 8, !dbg !42
%2 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
%3 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
- %4 = load i8** %3, align 8, !dbg !43 ; <i8*> [#uses=1]
+ %4 = load i8*, i8** %3, align 8, !dbg !43 ; <i8*> [#uses=1]
store i8* %4, i8** %2, align 8, !dbg !43
%5 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
%6 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
- %7 = load i32* %6, align 8, !dbg !43 ; <i32> [#uses=1]
+ %7 = load i32, i32* %6, align 8, !dbg !43 ; <i32> [#uses=1]
store i32 %7, i32* %5, align 8, !dbg !43
%8 = call i32 @_Z3fooi4SVal(i32 2, %struct.SVal* noalias %0) nounwind, !dbg !43 ; <i32> [#uses=0]
call void @llvm.dbg.value(metadata i32 %8, i64 0, metadata !44, metadata !{!"0x102"}), !dbg !43
diff --git a/llvm/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll b/llvm/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll
index e5542baf2ee..b05664d758c 100644
--- a/llvm/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll
+++ b/llvm/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll
@@ -8,7 +8,7 @@ define void @f(i32* %w, i32* %h, i8* %_this, i8* %image) nounwind ssp {
%x1 = tail call i64 @g(i8* %_this, i8* %image) nounwind ; <i64> [#uses=3]
%tmp1 = trunc i64 %x1 to i32 ; <i32> [#uses=1]
; CHECK: movl (%r{{.*}}), %
- %x4 = load i32* %h, align 4 ; <i32> [#uses=1]
+ %x4 = load i32, i32* %h, align 4 ; <i32> [#uses=1]
; The imull clobbers a 32-bit register.
; CHECK: imull %{{...}}, %e[[CLOBBER:..]]
diff --git a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
index 11cad5fa8cb..b7380196bd9 100644
--- a/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
+++ b/llvm/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
@@ -9,8 +9,8 @@ entry:
%a = alloca [64 x i8]
%b = getelementptr inbounds [64 x i8], [64 x i8]* %a, i64 0, i32 0
%c = getelementptr inbounds [64 x i8], [64 x i8]* %a, i64 0, i32 30
- %d = load i8* %b, align 8
- %e = load i8* %c, align 8
+ %d = load i8, i8* %b, align 8
+ %e = load i8, i8* %c, align 8
%f = bitcast [64 x i8]* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %f, i8* %in, i64 64, i32 8, i1 false) nounwind
store i8 %d, i8* %b, align 8
diff --git a/llvm/test/CodeGen/X86/2010-11-09-MOVLPS.ll b/llvm/test/CodeGen/X86/2010-11-09-MOVLPS.ll
index 1ed5f189097..4b937333c8e 100644
--- a/llvm/test/CodeGen/X86/2010-11-09-MOVLPS.ll
+++ b/llvm/test/CodeGen/X86/2010-11-09-MOVLPS.ll
@@ -21,9 +21,9 @@ entry:
store i8* %a, i8** %a_addr
store %0* %b, %0** %b_addr
store %0* %c, %0** %c_addr
- %0 = load i8** %a_addr, align 64
- %1 = load %0** %b_addr, align 64
- %2 = load %0** %c_addr, align 64
+ %0 = load i8*, i8** %a_addr, align 64
+ %1 = load %0*, %0** %b_addr, align 64
+ %2 = load %0*, %0** %c_addr, align 64
%"ssa point" = bitcast i32 0 to i32
br label %"2"
@@ -31,10 +31,10 @@ entry:
%3 = bitcast i8* %0 to <2 x i32>*
%4 = getelementptr inbounds %0, %0* %1, i32 0, i32 0
%5 = bitcast %"int[]"* %4 to <4 x float>*
- %6 = load <4 x float>* %5, align 16
+ %6 = load <4 x float>, <4 x float>* %5, align 16
%7 = bitcast <2 x i32>* %3 to <2 x float>*
%8 = bitcast <2 x float>* %7 to double*
- %9 = load double* %8
+ %9 = load double, double* %8
%10 = insertelement <2 x double> undef, double %9, i32 0
%11 = insertelement <2 x double> %10, double undef, i32 1
%12 = bitcast <2 x double> %11 to <4 x float>
@@ -48,10 +48,10 @@ entry:
%19 = bitcast i8* %18 to <2 x i32>*
%20 = getelementptr inbounds %0, %0* %2, i32 0, i32 0
%21 = bitcast %"int[]"* %20 to <4 x float>*
- %22 = load <4 x float>* %21, align 16
+ %22 = load <4 x float>, <4 x float>* %21, align 16
%23 = bitcast <2 x i32>* %19 to <2 x float>*
%24 = bitcast <2 x float>* %23 to double*
- %25 = load double* %24
+ %25 = load double, double* %24
%26 = insertelement <2 x double> undef, double %25, i32 0
%27 = insertelement <2 x double> %26, double undef, i32 1
%28 = bitcast <2 x double> %27 to <4 x float>
diff --git a/llvm/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll b/llvm/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll
index 6d54c7e2982..331e83bb506 100644
--- a/llvm/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll
+++ b/llvm/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll
@@ -4,11 +4,11 @@
@s = external global i8
define i32 @foo(i1 %cond) {
; CHECK: @foo
- %u_base = load i8* @u
+ %u_base = load i8, i8* @u
%u_val = zext i8 %u_base to i32
; CHECK: movzbl
; CHECK: movsbl
- %s_base = load i8* @s
+ %s_base = load i8, i8* @s
%s_val = sext i8 %s_base to i32
%val = select i1 %cond, i32 %u_val, i32 %s_val
ret i32 %val
diff --git a/llvm/test/CodeGen/X86/2011-02-12-shuffle.ll b/llvm/test/CodeGen/X86/2011-02-12-shuffle.ll
index b4d56d193ca..40e36678a55 100644
--- a/llvm/test/CodeGen/X86/2011-02-12-shuffle.ll
+++ b/llvm/test/CodeGen/X86/2011-02-12-shuffle.ll
@@ -9,7 +9,7 @@ entry:
br i1 undef, label %if.end, label %UnifiedReturnBlock
if.end: ; preds = %entry
- %tmp1067 = load <16 x i32> addrspace(1)* null, align 64
+ %tmp1067 = load <16 x i32>, <16 x i32> addrspace(1)* null, align 64
%tmp1082 = shufflevector <16 x i32> <i32 0, i32 0, i32 0, i32 undef, i32 undef, i32 0, i32 0, i32 undef, i32 0, i32 0, i32 undef, i32 undef, i32 0, i32 undef, i32 undef, i32 undef>,
<16 x i32> %tmp1067,
<16 x i32> <i32 0, i32 1, i32 2, i32 undef, i32 26, i32 5, i32 6, i32 undef, i32 8, i32 9, i32 31, i32 30, i32 12, i32 undef, i32 undef, i32 undef>
diff --git a/llvm/test/CodeGen/X86/2011-03-02-DAGCombiner.ll b/llvm/test/CodeGen/X86/2011-03-02-DAGCombiner.ll
index c9251d2929a..86e579a2e13 100644
--- a/llvm/test/CodeGen/X86/2011-03-02-DAGCombiner.ll
+++ b/llvm/test/CodeGen/X86/2011-03-02-DAGCombiner.ll
@@ -13,23 +13,23 @@ entry:
%K = alloca %0, align 4
store i32 0, i32* %retval
%0 = bitcast %0* %K to i32*
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = and i32 %1, -121
%3 = or i32 %2, 32
store i32 %3, i32* %0, align 4
%4 = bitcast %0* %K to i32*
- %5 = load i32* %4, align 4
+ %5 = load i32, i32* %4, align 4
%6 = lshr i32 %5, 3
%bf.clear = and i32 %6, 15
%conv = sitofp i32 %bf.clear to float
%f = getelementptr inbounds %struct.anon, %struct.anon* %F, i32 0, i32 0
- %tmp = load float* %f, align 4
+ %tmp = load float, float* %f, align 4
%sub = fsub float %tmp, %conv
store float %sub, float* %f, align 4
%ld = getelementptr inbounds %struct.anon, %struct.anon* %F, i32 0, i32 1
- %tmp1 = load x86_fp80* %ld, align 16
+ %tmp1 = load x86_fp80, x86_fp80* %ld, align 16
%7 = bitcast %0* %K to i32*
- %8 = load i32* %7, align 4
+ %8 = load i32, i32* %7, align 4
%9 = lshr i32 %8, 7
%bf.clear2 = and i32 %9, 1
%conv3 = uitofp i32 %bf.clear2 to x86_fp80
@@ -39,12 +39,12 @@ entry:
%10 = bitcast %0* %K to i32*
%11 = and i32 %bf.value, 1
%12 = shl i32 %11, 7
- %13 = load i32* %10, align 4
+ %13 = load i32, i32* %10, align 4
%14 = and i32 %13, -129
%15 = or i32 %14, %12
store i32 %15, i32* %10, align 4
%call = call i32 (...)* @iequals(i32 1841, i32 %bf.value, i32 0)
- %16 = load i32* %retval
+ %16 = load i32, i32* %retval
ret i32 %16
}
diff --git a/llvm/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll b/llvm/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll
index 67c99ed5838..9fe6a774fbb 100644
--- a/llvm/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll
+++ b/llvm/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll
@@ -12,7 +12,7 @@ declare fastcc i8* @save_string(i8* %d, i8* nocapture %s) nounwind
define i32 @cvtchar(i8* nocapture %sp) nounwind {
%temp.i = alloca [2 x i8], align 1
- %tmp1 = load i8* %sp, align 1
+ %tmp1 = load i8, i8* %sp, align 1
%div = udiv i8 %tmp1, 10
%rem = urem i8 %div, 10
%arrayidx.i = getelementptr inbounds [2 x i8], [2 x i8]* %temp.i, i32 0, i32 0
diff --git a/llvm/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll b/llvm/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll
index a71879232db..ed64ea93b39 100644
--- a/llvm/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll
+++ b/llvm/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll
@@ -17,7 +17,7 @@ declare hidden fastcc void @_ZN3JSCL23returnToThrowTrampolineEPNS_12JSGlobalData
; CHECK: je
define i32 @cti_op_eq(i8** nocapture %args) nounwind ssp {
entry:
- %0 = load i8** null, align 8
+ %0 = load i8*, i8** null, align 8
%tmp13 = bitcast i8* %0 to %"class.JSC::CodeLocationCall"*
%tobool.i.i.i = icmp ugt i8* undef, inttoptr (i64 281474976710655 to i8*)
%or.cond.i = and i1 %tobool.i.i.i, undef
@@ -34,7 +34,7 @@ if.end.i: ; preds = %entry
br i1 undef, label %land.rhs.i121.i, label %_ZNK3JSC7JSValue8isStringEv.exit122.i
land.rhs.i121.i: ; preds = %if.end.i
- %tmp.i.i117.i = load %"class.JSC::Structure"** undef, align 8
+ %tmp.i.i117.i = load %"class.JSC::Structure"*, %"class.JSC::Structure"** undef, align 8
br label %_ZNK3JSC7JSValue8isStringEv.exit122.i
_ZNK3JSC7JSValue8isStringEv.exit122.i: ; preds = %land.rhs.i121.i, %if.end.i
@@ -48,7 +48,7 @@ if.then.i92.i: ; preds = %_ZNK3JSC7JSValue8is
_ZN3JSC7JSValue19equalSlowCaseInlineEPNS_9ExecStateES0_S0_.exit: ; preds = %_ZNK3JSC7JSValue8isStringEv.exit122.i, %if.then.i.i.i, %if.then.i
- %1 = load i8** undef, align 8
+ %1 = load i8*, i8** undef, align 8
br i1 undef, label %do.end39, label %do.body27
do.body27: ; preds = %_ZN3JSC7JSValue19equalSlowCaseInlineEPNS_9ExecStateES0_S0_.exit
diff --git a/llvm/test/CodeGen/X86/2011-05-09-loaduse.ll b/llvm/test/CodeGen/X86/2011-05-09-loaduse.ll
index c772e4c7f4e..a94a9812431 100644
--- a/llvm/test/CodeGen/X86/2011-05-09-loaduse.ll
+++ b/llvm/test/CodeGen/X86/2011-05-09-loaduse.ll
@@ -5,7 +5,7 @@
;CHECK: ret
define float @test(<4 x float>* %A) nounwind {
entry:
- %T = load <4 x float>* %A
+ %T = load <4 x float>, <4 x float>* %A
%R = extractelement <4 x float> %T, i32 3
store <4 x float><float 0.0, float 0.0, float 0.0, float 0.0>, <4 x float>* %A
ret float %R
diff --git a/llvm/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll b/llvm/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll
index f9b1970333f..6cdcd581d40 100644
--- a/llvm/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll
+++ b/llvm/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll
@@ -41,7 +41,7 @@ cond.false156.i: ; preds = %for.end.i, %land.en
cond.end166.i: ; preds = %cond.false156.i, %cond.true138.i
%idxprom1113.i = phi i64 [ %idxprom1114.i, %cond.false156.i ], [ undef, %cond.true138.i ]
- %tmp235.i = load %struct.state** getelementptr inbounds (%struct.dfa* @aux_temp, i64 0, i32 2), align 8
+ %tmp235.i = load %struct.state*, %struct.state** getelementptr inbounds (%struct.dfa* @aux_temp, i64 0, i32 2), align 8
%att.i = getelementptr inbounds %struct.state, %struct.state* %tmp235.i, i64 %idxprom1113.i, i32 0
store i32 0, i32* %att.i, align 4
ret void
diff --git a/llvm/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll b/llvm/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll
index 8096394ffd2..414bd243ebc 100644
--- a/llvm/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll
+++ b/llvm/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll
@@ -20,7 +20,7 @@ land.lhs.true: ; preds = %do.body.i
for.body.i: ; preds = %for.inc.i, %if.then
%tmp3524.i = phi i32 [ 0, %land.lhs.true ], [ %tmp351.i, %for.inc.i ]
- %tmp6.i12 = load i32* undef, align 4
+ %tmp6.i12 = load i32, i32* undef, align 4
br i1 undef, label %for.inc.i, label %if.then.i17
if.then.i17: ; preds = %for.body.i
@@ -28,7 +28,7 @@ if.then.i17: ; preds = %for.body.i
%and14.i = and i32 %shr.i14, 255
%idxprom15.i = zext i32 %and14.i to i64
%arrayidx16.i = getelementptr inbounds [256 x i32], [256 x i32]* @bit_count, i64 0, i64 %idxprom15.i
- %tmp17.i15 = load i32* %arrayidx16.i, align 4
+ %tmp17.i15 = load i32, i32* %arrayidx16.i, align 4
%add.i = add i32 0, %tmp3524.i
%add24.i = add i32 %add.i, %tmp17.i15
%add31.i = add i32 %add24.i, 0
diff --git a/llvm/test/CodeGen/X86/2011-06-01-fildll.ll b/llvm/test/CodeGen/X86/2011-06-01-fildll.ll
index 3a0b05fce3a..30c743441c3 100644
--- a/llvm/test/CodeGen/X86/2011-06-01-fildll.ll
+++ b/llvm/test/CodeGen/X86/2011-06-01-fildll.ll
@@ -7,7 +7,7 @@ define float @f(i64* nocapture %x) nounwind readonly ssp {
entry:
; CHECK: movl
; CHECK-NOT: movl
- %tmp1 = load i64* %x, align 4
+ %tmp1 = load i64, i64* %x, align 4
; CHECK: fildll
%conv = sitofp i64 %tmp1 to float
%add = fadd float %conv, 1.000000e+00
diff --git a/llvm/test/CodeGen/X86/2011-06-03-x87chain.ll b/llvm/test/CodeGen/X86/2011-06-03-x87chain.ll
index f75166843f3..c78e8e38a56 100644
--- a/llvm/test/CodeGen/X86/2011-06-03-x87chain.ll
+++ b/llvm/test/CodeGen/X86/2011-06-03-x87chain.ll
@@ -2,7 +2,7 @@
define float @chainfail1(i64* nocapture %a, i64* nocapture %b, i32 %x, i32 %y, float* nocapture %f) nounwind uwtable noinline ssp {
entry:
- %tmp1 = load i64* %a, align 8
+ %tmp1 = load i64, i64* %a, align 8
; Insure x87 ops are properly chained, order preserved.
; CHECK: fildll
%conv = sitofp i64 %tmp1 to float
@@ -23,7 +23,7 @@ entry:
%sub = add nsw i32 %mul, -1
%idxprom = sext i32 %sub to i64
%arrayidx = getelementptr inbounds i64, i64* %a, i64 %idxprom
- %tmp4 = load i64* %arrayidx, align 8
+ %tmp4 = load i64, i64* %arrayidx, align 8
; CHECK: fildll
%conv = sitofp i64 %tmp4 to float
store float %conv, float* %f, align 4
@@ -35,7 +35,7 @@ entry:
br i1 undef, label %while.end, label %while.body
while.body: ; preds = %while.body, %entry
- %x.1.copyload = load i24* undef, align 1
+ %x.1.copyload = load i24, i24* undef, align 1
%conv = sitofp i24 %x.1.copyload to float
%div = fmul float %conv, 0x3E80000000000000
store float %div, float* undef, align 4
diff --git a/llvm/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll b/llvm/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll
index 4b3490aed8d..1285d20b852 100644
--- a/llvm/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll
+++ b/llvm/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll
@@ -32,17 +32,17 @@ bb8: ; preds = %bb23, %bb
store i8* bitcast (%0* @0 to i8*), i8** %tmp15
%tmp16 = bitcast %3* %tmp7 to void ()*
store void ()* %tmp16, void ()** %tmp6, align 8
- %tmp17 = load void ()** %tmp6, align 8
+ %tmp17 = load void ()*, void ()** %tmp6, align 8
%tmp18 = bitcast void ()* %tmp17 to %6*
%tmp19 = getelementptr inbounds %6, %6* %tmp18, i32 0, i32 3
%tmp20 = bitcast %6* %tmp18 to i8*
- %tmp21 = load i8** %tmp19
+ %tmp21 = load i8*, i8** %tmp19
%tmp22 = bitcast i8* %tmp21 to void (i8*)*
call void %tmp22(i8* %tmp20)
br label %bb23
bb23: ; preds = %bb8
- %tmp24 = load i64* %tmp5, align 8
+ %tmp24 = load i64, i64* %tmp5, align 8
%tmp25 = add i64 %tmp24, 1
store i64 %tmp25, i64* %tmp5, align 8
%tmp26 = icmp ult i64 %tmp25, 10
diff --git a/llvm/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll b/llvm/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll
index cd9070a3510..f38ebf1da85 100644
--- a/llvm/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll
+++ b/llvm/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll
@@ -12,7 +12,7 @@ entry:
%tmp6 = add i64 %a, -2147483647
%.sum = add i64 %tmp6, %b
%tmp8 = getelementptr inbounds [39 x i8], [39 x i8]* %stack_main, i64 0, i64 %.sum
- %tmp9 = load i8* %tmp8, align 1
+ %tmp9 = load i8, i8* %tmp8, align 1
%tmp10 = sext i8 %tmp9 to i32
ret i32 %tmp10
}
diff --git a/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll b/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
index 4e84e84c1aa..a086a79a02a 100644
--- a/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
+++ b/llvm/test/CodeGen/X86/2011-09-14-valcoalesce.ll
@@ -121,7 +121,7 @@ while.body.i188: ; preds = %for.end173.i, %if.e
while.body85.i: ; preds = %while.body85.i, %while.body.i188
%aFreq.0518.i = phi i32 [ %add93.i, %while.body85.i ], [ 0, %while.body.i188 ]
%inc87.i = add nsw i32 0, 1
- %tmp91.i = load i32* undef, align 4
+ %tmp91.i = load i32, i32* undef, align 4
%add93.i = add nsw i32 %tmp91.i, %aFreq.0518.i
%or.cond514.i = and i1 undef, false
br i1 %or.cond514.i, label %while.body85.i, label %while.end.i
diff --git a/llvm/test/CodeGen/X86/2011-09-21-setcc-bug.ll b/llvm/test/CodeGen/X86/2011-09-21-setcc-bug.ll
index a67c3f33886..e61715a4813 100644
--- a/llvm/test/CodeGen/X86/2011-09-21-setcc-bug.ll
+++ b/llvm/test/CodeGen/X86/2011-09-21-setcc-bug.ll
@@ -3,10 +3,10 @@
; Make sure we are not crashing on this code.
define void @load_4_i8(<4 x i8>* %k, <4 x i8>* %y, <4 x double>* %A1, <4 x double>* %A0) {
- %A = load <4 x i8>* %k
- %B = load <4 x i8>* %y
- %C = load <4 x double>* %A0
- %D= load <4 x double>* %A1
+ %A = load <4 x i8>, <4 x i8>* %k
+ %B = load <4 x i8>, <4 x i8>* %y
+ %C = load <4 x double>, <4 x double>* %A0
+ %D= load <4 x double>, <4 x double>* %A1
%M = icmp uge <4 x i8> %A, %B
%T = select <4 x i1> %M, <4 x double> %C, <4 x double> %D
store <4 x double> %T, <4 x double>* undef
@@ -15,10 +15,10 @@ define void @load_4_i8(<4 x i8>* %k, <4 x i8>* %y, <4 x double>* %A1, <4 x doubl
define void @load_256_i8(<256 x i8>* %k, <256 x i8>* %y, <256 x double>* %A1, <256 x double>* %A0) {
- %A = load <256 x i8>* %k
- %B = load <256 x i8>* %y
- %C = load <256 x double>* %A0
- %D= load <256 x double>* %A1
+ %A = load <256 x i8>, <256 x i8>* %k
+ %B = load <256 x i8>, <256 x i8>* %y
+ %C = load <256 x double>, <256 x double>* %A0
+ %D= load <256 x double>, <256 x double>* %A1
%M = icmp uge <256 x i8> %A, %B
%T = select <256 x i1> %M, <256 x double> %C, <256 x double> %D
store <256 x double> %T, <256 x double>* undef
diff --git a/llvm/test/CodeGen/X86/2011-10-11-srl.ll b/llvm/test/CodeGen/X86/2011-10-11-srl.ll
index 434f88c14b6..ff58afca041 100644
--- a/llvm/test/CodeGen/X86/2011-10-11-srl.ll
+++ b/llvm/test/CodeGen/X86/2011-10-11-srl.ll
@@ -3,7 +3,7 @@
target triple = "x86_64-unknown-linux-gnu"
define void @m387(<2 x i8>* %p, <2 x i16>* %q) {
- %t = load <2 x i8>* %p
+ %t = load <2 x i8>, <2 x i8>* %p
%r = sext <2 x i8> %t to <2 x i16>
store <2 x i16> %r, <2 x i16>* %q
ret void
diff --git a/llvm/test/CodeGen/X86/2011-10-12-MachineCSE.ll b/llvm/test/CodeGen/X86/2011-10-12-MachineCSE.ll
index ff565ef5ff3..a9432e6da58 100644
--- a/llvm/test/CodeGen/X86/2011-10-12-MachineCSE.ll
+++ b/llvm/test/CodeGen/X86/2011-10-12-MachineCSE.ll
@@ -16,15 +16,15 @@ target triple = "x86_64-apple-macosx10.7.2"
define %struct.rtx_def* @gen_add3_insn(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c) nounwind uwtable ssp {
entry:
%0 = bitcast %struct.rtx_def* %r0 to i32*
- %1 = load i32* %0, align 8
+ %1 = load i32, i32* %0, align 8
%2 = lshr i32 %1, 16
%bf.clear = and i32 %2, 255
%idxprom = sext i32 %bf.clear to i64
- %3 = load %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8
+ %3 = load %struct.optab*, %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8
%handlers = getelementptr inbounds %struct.optab, %struct.optab* %3, i32 0, i32 1
%arrayidx = getelementptr inbounds [59 x %struct.anon.3], [59 x %struct.anon.3]* %handlers, i32 0, i64 %idxprom
%insn_code = getelementptr inbounds %struct.anon.3, %struct.anon.3* %arrayidx, i32 0, i32 0
- %4 = load i32* %insn_code, align 4
+ %4 = load i32, i32* %insn_code, align 4
%cmp = icmp eq i32 %4, 1317
br i1 %cmp, label %if.then, label %lor.lhs.false
@@ -32,19 +32,19 @@ lor.lhs.false: ; preds = %entry
%idxprom1 = sext i32 %4 to i64
%arrayidx2 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom1
%operand = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx2, i32 0, i32 3
- %5 = load %struct.insn_operand_data** %operand, align 8
+ %5 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand, align 8
%arrayidx3 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %5, i64 0
%predicate = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx3, i32 0, i32 0
- %6 = load i32 (%struct.rtx_def*, i32)** %predicate, align 8
+ %6 = load i32 (%struct.rtx_def*, i32)*, i32 (%struct.rtx_def*, i32)** %predicate, align 8
%idxprom4 = sext i32 %4 to i64
%arrayidx5 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom4
%operand6 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx5, i32 0, i32 3
- %7 = load %struct.insn_operand_data** %operand6, align 8
+ %7 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand6, align 8
%arrayidx7 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %7, i64 0
%8 = bitcast %struct.insn_operand_data* %arrayidx7 to i8*
%bf.field.offs = getelementptr i8, i8* %8, i32 16
%9 = bitcast i8* %bf.field.offs to i32*
- %10 = load i32* %9, align 8
+ %10 = load i32, i32* %9, align 8
%bf.clear8 = and i32 %10, 65535
%call = tail call i32 %6(%struct.rtx_def* %r0, i32 %bf.clear8)
%tobool = icmp ne i32 %call, 0
@@ -54,19 +54,19 @@ lor.lhs.false9: ; preds = %lor.lhs.false
%idxprom10 = sext i32 %4 to i64
%arrayidx11 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom10
%operand12 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx11, i32 0, i32 3
- %11 = load %struct.insn_operand_data** %operand12, align 8
+ %11 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand12, align 8
%arrayidx13 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %11, i64 1
%predicate14 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx13, i32 0, i32 0
- %12 = load i32 (%struct.rtx_def*, i32)** %predicate14, align 8
+ %12 = load i32 (%struct.rtx_def*, i32)*, i32 (%struct.rtx_def*, i32)** %predicate14, align 8
%idxprom15 = sext i32 %4 to i64
%arrayidx16 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom15
%operand17 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx16, i32 0, i32 3
- %13 = load %struct.insn_operand_data** %operand17, align 8
+ %13 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand17, align 8
%arrayidx18 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %13, i64 1
%14 = bitcast %struct.insn_operand_data* %arrayidx18 to i8*
%bf.field.offs19 = getelementptr i8, i8* %14, i32 16
%15 = bitcast i8* %bf.field.offs19 to i32*
- %16 = load i32* %15, align 8
+ %16 = load i32, i32* %15, align 8
%bf.clear20 = and i32 %16, 65535
%call21 = tail call i32 %12(%struct.rtx_def* %r1, i32 %bf.clear20)
%tobool22 = icmp ne i32 %call21, 0
@@ -76,19 +76,19 @@ lor.lhs.false23: ; preds = %lor.lhs.false9
%idxprom24 = sext i32 %4 to i64
%arrayidx25 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom24
%operand26 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx25, i32 0, i32 3
- %17 = load %struct.insn_operand_data** %operand26, align 8
+ %17 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand26, align 8
%arrayidx27 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %17, i64 2
%predicate28 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx27, i32 0, i32 0
- %18 = load i32 (%struct.rtx_def*, i32)** %predicate28, align 8
+ %18 = load i32 (%struct.rtx_def*, i32)*, i32 (%struct.rtx_def*, i32)** %predicate28, align 8
%idxprom29 = sext i32 %4 to i64
%arrayidx30 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom29
%operand31 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx30, i32 0, i32 3
- %19 = load %struct.insn_operand_data** %operand31, align 8
+ %19 = load %struct.insn_operand_data*, %struct.insn_operand_data** %operand31, align 8
%arrayidx32 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %19, i64 2
%20 = bitcast %struct.insn_operand_data* %arrayidx32 to i8*
%bf.field.offs33 = getelementptr i8, i8* %20, i32 16
%21 = bitcast i8* %bf.field.offs33 to i32*
- %22 = load i32* %21, align 8
+ %22 = load i32, i32* %21, align 8
%bf.clear34 = and i32 %22, 65535
%call35 = tail call i32 %18(%struct.rtx_def* %c, i32 %bf.clear34)
%tobool36 = icmp ne i32 %call35, 0
@@ -101,7 +101,7 @@ if.end: ; preds = %lor.lhs.false23
%idxprom37 = sext i32 %4 to i64
%arrayidx38 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom37
%genfun = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx38, i32 0, i32 2
- %23 = load %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8
+ %23 = load %struct.rtx_def* (%struct.rtx_def*, ...)*, %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8
%call39 = tail call %struct.rtx_def* (%struct.rtx_def*, ...)* %23(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c)
br label %return
diff --git a/llvm/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll b/llvm/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll
index e7d1e194d9c..c9dc050d0b4 100644
--- a/llvm/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll
+++ b/llvm/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll
@@ -15,11 +15,11 @@ entry:
store <4 x float> <float 0x4008CCCCC0000000, float 0x40099999A0000000, float 0x400A666660000000, float 0x400B333340000000>, <4 x float>* %p3, align 16
store <4 x float> <float 0x4010666660000000, float 0x4010CCCCC0000000, float 0x4011333340000000, float 0x40119999A0000000>, <4 x float>* %p4, align 16
store <4 x float> <float 0x4014666660000000, float 0x4014CCCCC0000000, float 0x4015333340000000, float 0x40159999A0000000>, <4 x float>* %p5, align 16
- %0 = load <4 x float>* %p1, align 16
- %1 = load <4 x float>* %p2, align 16
- %2 = load <4 x float>* %p3, align 16
- %3 = load <4 x float>* %p4, align 16
- %4 = load <4 x float>* %p5, align 16
+ %0 = load <4 x float>, <4 x float>* %p1, align 16
+ %1 = load <4 x float>, <4 x float>* %p2, align 16
+ %2 = load <4 x float>, <4 x float>* %p3, align 16
+ %3 = load <4 x float>, <4 x float>* %p4, align 16
+ %4 = load <4 x float>, <4 x float>* %p5, align 16
; CHECK: movups {{%xmm[0-7]}}, (%esp)
; CHECK-NEXT: calll _dovectortest
call void @dovectortest(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4)
diff --git a/llvm/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll b/llvm/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll
index 07a6910c65e..7e450a8e06a 100644
--- a/llvm/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll
+++ b/llvm/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll
@@ -18,8 +18,8 @@ define i32 @main() nounwind uwtable {
entry:
; CHECK: pmovsxbq i(%rip), %
; CHECK: pmovsxbq j(%rip), %
- %0 = load <2 x i8>* @i, align 8
- %1 = load <2 x i8>* @j, align 8
+ %0 = load <2 x i8>, <2 x i8>* @i, align 8
+ %1 = load <2 x i8>, <2 x i8>* @j, align 8
%div = sdiv <2 x i8> %1, %0
store <2 x i8> %div, <2 x i8>* getelementptr inbounds (%union.anon* @res, i32 0, i32 0), align 8
ret i32 0
diff --git a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
index 7eaa5bb7e88..da3c3222826 100644
--- a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
+++ b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll
@@ -49,7 +49,7 @@ define void @full_test() {
br label %B1
B1: ; preds = %entry
- %0 = load <2 x float>* %Cy119
+ %0 = load <2 x float>, <2 x float>* %Cy119
%1 = fptosi <2 x float> %0 to <2 x i32>
%2 = sitofp <2 x i32> %1 to <2 x float>
%3 = fcmp ogt <2 x float> %0, zeroinitializer
@@ -58,7 +58,7 @@ define void @full_test() {
%6 = fcmp oeq <2 x float> %2, %0
%7 = select <2 x i1> %6, <2 x float> %0, <2 x float> %5
store <2 x float> %7, <2 x float>* %Cy118
- %8 = load <2 x float>* %Cy118
+ %8 = load <2 x float>, <2 x float>* %Cy118
store <2 x float> %8, <2 x float>* %Cy11a
ret void
}
diff --git a/llvm/test/CodeGen/X86/2011-10-27-tstore.ll b/llvm/test/CodeGen/X86/2011-10-27-tstore.ll
index 6dea92b6307..290b4d0cb00 100644
--- a/llvm/test/CodeGen/X86/2011-10-27-tstore.ll
+++ b/llvm/test/CodeGen/X86/2011-10-27-tstore.ll
@@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu"
;CHECK: ret
define void @ltstore(<4 x i32>* %pA, <2 x i32>* %pB) {
entry:
- %in = load <4 x i32>* %pA
+ %in = load <4 x i32>, <4 x i32>* %pA
%j = shufflevector <4 x i32> %in, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
store <2 x i32> %j, <2 x i32>* %pB
ret void
diff --git a/llvm/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll b/llvm/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll
index 8174109378d..dffd6d1cee5 100644
--- a/llvm/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll
+++ b/llvm/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll
@@ -18,9 +18,9 @@ if_else: ; preds = %allocas
br i1 undef, label %for_loop156.lr.ph, label %if_exit
for_loop156.lr.ph: ; preds = %if_else
- %val_6.i21244 = load i16* undef, align 2
+ %val_6.i21244 = load i16, i16* undef, align 2
%0 = insertelement <8 x i16> undef, i16 %val_6.i21244, i32 6
- %val_7.i21248 = load i16* undef, align 2
+ %val_7.i21248 = load i16, i16* undef, align 2
%1 = insertelement <8 x i16> %0, i16 %val_7.i21248, i32 7
%uint2uint32.i20206 = zext <8 x i16> %1 to <8 x i32>
%bitop5.i20208 = and <8 x i32> %uint2uint32.i20206, <i32 31744, i32 31744, i32 31744, i32 31744, i32 31744, i32 31744, i32 31744, i32 31744>
@@ -39,26 +39,26 @@ for_loop156.lr.ph: ; preds = %if_else
%binop407 = fadd <8 x float> %binop406, <float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00, float -2.000000e+00>
%binop408 = fmul <8 x float> zeroinitializer, %binop407
%binop411 = fsub <8 x float> <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>, undef
- %val_4.i21290 = load i16* undef, align 2
+ %val_4.i21290 = load i16, i16* undef, align 2
%2 = insertelement <8 x i16> undef, i16 %val_4.i21290, i32 4
- %val_5.i21294 = load i16* undef, align 2
+ %val_5.i21294 = load i16, i16* undef, align 2
%3 = insertelement <8 x i16> %2, i16 %val_5.i21294, i32 5
- %val_6.i21298 = load i16* undef, align 2
+ %val_6.i21298 = load i16, i16* undef, align 2
%4 = insertelement <8 x i16> %3, i16 %val_6.i21298, i32 6
%ptr_7.i21301 = inttoptr i64 undef to i16*
- %val_7.i21302 = load i16* %ptr_7.i21301, align 2
+ %val_7.i21302 = load i16, i16* %ptr_7.i21301, align 2
%5 = insertelement <8 x i16> %4, i16 %val_7.i21302, i32 7
%uint2uint32.i20218 = zext <8 x i16> %5 to <8 x i32>
- %structelement561 = load i8** undef, align 8
+ %structelement561 = load i8*, i8** undef, align 8
%ptr2int563 = ptrtoint i8* %structelement561 to i64
%smear.ptr_smear7571 = insertelement <8 x i64> undef, i64 %ptr2int563, i32 7
%new_ptr582 = add <8 x i64> %smear.ptr_smear7571, zeroinitializer
- %val_5.i21509 = load i8* null, align 1
+ %val_5.i21509 = load i8, i8* null, align 1
%6 = insertelement <8 x i8> undef, i8 %val_5.i21509, i32 5
%7 = insertelement <8 x i8> %6, i8 undef, i32 6
%iptr_7.i21515 = extractelement <8 x i64> %new_ptr582, i32 7
%ptr_7.i21516 = inttoptr i64 %iptr_7.i21515 to i8*
- %val_7.i21517 = load i8* %ptr_7.i21516, align 1
+ %val_7.i21517 = load i8, i8* %ptr_7.i21516, align 1
%8 = insertelement <8 x i8> %7, i8 %val_7.i21517, i32 7
%uint2float.i20245 = uitofp <8 x i8> %8 to <8 x float>
%binop.i20246 = fmul <8 x float> %uint2float.i20245, <float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000, float 0x3F70101020000000>
diff --git a/llvm/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll b/llvm/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll
index 1561784dee3..ab1b46c99d9 100644
--- a/llvm/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll
+++ b/llvm/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll
@@ -13,7 +13,7 @@ loop: ; preds = %loop.cond
br i1 undef, label %0, label %t1.exit
; <label>:0 ; preds = %loop
- %1 = load <16 x i32> addrspace(1)* undef, align 64
+ %1 = load <16 x i32>, <16 x i32> addrspace(1)* undef, align 64
%2 = shufflevector <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, <16 x i32> %1, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 0, i32 0>
store <16 x i32> %2, <16 x i32> addrspace(1)* undef, align 64
br label %t1.exit
@@ -29,7 +29,7 @@ define void @t2() nounwind {
br i1 undef, label %1, label %4
; <label>:1 ; preds = %0
- %2 = load <16 x i32> addrspace(1)* undef, align 64
+ %2 = load <16 x i32>, <16 x i32> addrspace(1)* undef, align 64
%3 = shufflevector <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, <16 x i32> %2, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 20, i32 0, i32 0, i32 0, i32 0>
store <16 x i32> %3, <16 x i32> addrspace(1)* undef, align 64
br label %4
@@ -50,7 +50,7 @@ loop: ; preds = %loop.cond
; <label>:0 ; preds = %loop
%1 = shufflevector <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 25, i32 0>
- %2 = load <16 x i32> addrspace(1)* undef, align 64
+ %2 = load <16 x i32>, <16 x i32> addrspace(1)* undef, align 64
%3 = shufflevector <16 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>, <16 x i32> %2, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 28, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
store <16 x i32> %3, <16 x i32> addrspace(1)* undef, align 64
br label %t2.exit
@@ -64,7 +64,7 @@ return: ; preds = %loop.cond
define <3 x i64> @t4() nounwind {
entry:
- %0 = load <2 x i64> addrspace(1)* undef, align 16
+ %0 = load <2 x i64>, <2 x i64> addrspace(1)* undef, align 16
%1 = extractelement <2 x i64> %0, i32 0
%2 = insertelement <3 x i64> <i64 undef, i64 0, i64 0>, i64 %1, i32 0
ret <3 x i64> %2
diff --git a/llvm/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll b/llvm/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
index 14643e4ba8b..0944adb8b00 100644
--- a/llvm/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
+++ b/llvm/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
@@ -8,7 +8,7 @@
; CHECK-LABEL: test:
; CHECK: pextrd $2, %xmm
define <4 x i32> @test(<4 x i32>* %p) {
- %v = load <4 x i32>* %p
+ %v = load <4 x i32>, <4 x i32>* %p
%e = extractelement <4 x i32> %v, i32 2
%cmp = icmp eq i32 %e, 3
%sel = select i1 %cmp, <4 x i32> %v, <4 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
index 42a3b9b2c04..21443441c9f 100644
--- a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
+++ b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
@@ -109,7 +109,7 @@ bb49: ; preds = %bb49, %bb48
%tmp51 = add i32 %tmp50, undef
%tmp52 = add i32 %tmp50, undef
%tmp53 = getelementptr i32, i32* %tmp13, i32 %tmp52
- %tmp54 = load i32* %tmp53, align 4
+ %tmp54 = load i32, i32* %tmp53, align 4
%tmp55 = add i32 %tmp50, 1
%tmp56 = icmp eq i32 %tmp55, %tmp8
br i1 %tmp56, label %bb57, label %bb49
diff --git a/llvm/test/CodeGen/X86/2012-01-11-split-cv.ll b/llvm/test/CodeGen/X86/2012-01-11-split-cv.ll
index 69d4b93bb78..cb39ed91197 100644
--- a/llvm/test/CodeGen/X86/2012-01-11-split-cv.ll
+++ b/llvm/test/CodeGen/X86/2012-01-11-split-cv.ll
@@ -3,7 +3,7 @@
;CHECK-LABEL: add18i16:
define void @add18i16(<18 x i16>* nocapture sret %ret, <18 x i16>* %bp) nounwind {
;CHECK: vmovaps
- %b = load <18 x i16>* %bp, align 16
+ %b = load <18 x i16>, <18 x i16>* %bp, align 16
%x = add <18 x i16> zeroinitializer, %b
store <18 x i16> %x, <18 x i16>* %ret, align 16
;CHECK: ret
diff --git a/llvm/test/CodeGen/X86/2012-01-12-extract-sv.ll b/llvm/test/CodeGen/X86/2012-01-12-extract-sv.ll
index fa8e80f0bde..75409f22b14 100644
--- a/llvm/test/CodeGen/X86/2012-01-12-extract-sv.ll
+++ b/llvm/test/CodeGen/X86/2012-01-12-extract-sv.ll
@@ -3,7 +3,7 @@
; CHECK: endless_loop
define void @endless_loop() {
entry:
- %0 = load <8 x i32> addrspace(1)* undef, align 32
+ %0 = load <8 x i32>, <8 x i32> addrspace(1)* undef, align 32
%1 = shufflevector <8 x i32> %0, <8 x i32> undef, <16 x i32> <i32 4, i32 4, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = shufflevector <16 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 undef>, <16 x i32> %1, <16 x i32> <i32 16, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 17>
store <16 x i32> %2, <16 x i32> addrspace(1)* undef, align 64
diff --git a/llvm/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll b/llvm/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll
index b78c13f9d4e..16eef0a28aa 100644
--- a/llvm/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll
+++ b/llvm/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll
@@ -5,7 +5,7 @@
define void @baz() nounwind ssp {
entry:
- %0 = load i8** @ptr, align 4
+ %0 = load i8*, i8** @ptr, align 4
%cmp = icmp eq i8* %0, null
fence seq_cst
br i1 %cmp, label %if.then, label %if.else
diff --git a/llvm/test/CodeGen/X86/2012-02-12-dagco.ll b/llvm/test/CodeGen/X86/2012-02-12-dagco.ll
index 13723a22994..5d48c142dc1 100644
--- a/llvm/test/CodeGen/X86/2012-02-12-dagco.ll
+++ b/llvm/test/CodeGen/X86/2012-02-12-dagco.ll
@@ -3,9 +3,9 @@ target triple = "x86_64-unknown-linux-gnu"
; Make sure we are not crashing on this one
define void @dagco_crash() {
entry:
- %srcval.i411.i = load <4 x i64>* undef, align 1
+ %srcval.i411.i = load <4 x i64>, <4 x i64>* undef, align 1
%0 = extractelement <4 x i64> %srcval.i411.i, i32 3
- %srcval.i409.i = load <2 x i64>* undef, align 1
+ %srcval.i409.i = load <2 x i64>, <2 x i64>* undef, align 1
%1 = extractelement <2 x i64> %srcval.i409.i, i32 0
%2 = insertelement <8 x i64> undef, i64 %0, i32 5
%3 = insertelement <8 x i64> %2, i64 %1, i32 6
diff --git a/llvm/test/CodeGen/X86/2012-02-29-CoalescerBug.ll b/llvm/test/CodeGen/X86/2012-02-29-CoalescerBug.ll
index bdce85325f3..bbeb2a02219 100644
--- a/llvm/test/CodeGen/X86/2012-02-29-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2012-02-29-CoalescerBug.ll
@@ -14,9 +14,9 @@ target triple = "i386-apple-macosx10.7.0"
define void @fn2() nounwind optsize ssp {
entry:
store i64 0, i64* bitcast ([2 x [2 x %struct.S0]]* @d to i64*), align 4
- %0 = load i32* @c, align 4
+ %0 = load i32, i32* @c, align 4
%tobool2 = icmp eq i32 %0, 0
- %1 = load i32* @a, align 4
+ %1 = load i32, i32* @a, align 4
%tobool4 = icmp eq i32 %1, 0
br label %for.cond
diff --git a/llvm/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll b/llvm/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll
index 372441af13a..260f059492f 100644
--- a/llvm/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll
+++ b/llvm/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll
@@ -21,7 +21,7 @@ entry:
if.end: ; preds = %entry
%size5 = getelementptr inbounds %struct.ref_s, %struct.ref_s* %op, i64 0, i32 2
- %tmp6 = load i16* %size5, align 2
+ %tmp6 = load i16, i16* %size5, align 2
%tobool1 = icmp eq i16 %tmp6, 0
%1 = select i1 %tobool1, i32 1396, i32 -1910
%index10 = add i32 %index9, %1
@@ -29,12 +29,12 @@ if.end: ; preds = %entry
while.body.lr.ph: ; preds = %if.end
%refs = bitcast %struct.ref_s* %op to %struct.ref_s**
- %tmp9 = load %struct.ref_s** %refs, align 8
+ %tmp9 = load %struct.ref_s*, %struct.ref_s** %refs, align 8
%tmp4 = zext i16 %tmp6 to i64
%index13 = add i32 %index10, 1658
%2 = sext i32 %index13 to i64
%3 = getelementptr [3891 x i64], [3891 x i64]* @table, i64 0, i64 %2
- %blockaddress14 = load i64* %3, align 8
+ %blockaddress14 = load i64, i64* %3, align 8
%4 = inttoptr i64 %blockaddress14 to i8*
indirectbr i8* %4, [label %while.body]
@@ -50,7 +50,7 @@ while.body: ; preds = %while.body, %while.
%tmp8 = select i1 %exitcond5, i64 13, i64 0
%5 = sext i32 %index15 to i64
%6 = getelementptr [3891 x i64], [3891 x i64]* @table, i64 0, i64 %5
- %blockaddress16 = load i64* %6, align 8
+ %blockaddress16 = load i64, i64* %6, align 8
%7 = inttoptr i64 %blockaddress16 to i8*
indirectbr i8* %7, [label %return, label %while.body]
diff --git a/llvm/test/CodeGen/X86/2012-04-26-sdglue.ll b/llvm/test/CodeGen/X86/2012-04-26-sdglue.ll
index 48981f9b87a..e0b09036395 100644
--- a/llvm/test/CodeGen/X86/2012-04-26-sdglue.ll
+++ b/llvm/test/CodeGen/X86/2012-04-26-sdglue.ll
@@ -14,9 +14,9 @@
;CHECK: ret
define void @func() nounwind ssp {
- %tmp = load <4 x float>* null, align 1
+ %tmp = load <4 x float>, <4 x float>* null, align 1
%tmp14 = getelementptr <4 x float>, <4 x float>* null, i32 2
- %tmp15 = load <4 x float>* %tmp14, align 1
+ %tmp15 = load <4 x float>, <4 x float>* %tmp14, align 1
%tmp16 = shufflevector <4 x float> %tmp, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
%tmp17 = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %tmp16, <4 x float> undef, i8 1)
%tmp18 = bitcast <4 x float> %tmp to <16 x i8>
diff --git a/llvm/test/CodeGen/X86/2012-07-10-extload64.ll b/llvm/test/CodeGen/X86/2012-07-10-extload64.ll
index 723302723b6..f33fc8c69ef 100644
--- a/llvm/test/CodeGen/X86/2012-07-10-extload64.ll
+++ b/llvm/test/CodeGen/X86/2012-07-10-extload64.ll
@@ -4,7 +4,7 @@
define void @load_store(<4 x i16>* %in) {
entry:
; CHECK: pmovzxwd
- %A27 = load <4 x i16>* %in, align 4
+ %A27 = load <4 x i16>, <4 x i16>* %in, align 4
%A28 = add <4 x i16> %A27, %A27
; CHECK: movlpd
store <4 x i16> %A28, <4 x i16>* %in, align 4
@@ -25,7 +25,7 @@ BB:
;CHECK-LABEL: load_64:
define <2 x i32> @load_64(<2 x i32>* %ptr) {
BB:
- %t = load <2 x i32>* %ptr
+ %t = load <2 x i32>, <2 x i32>* %ptr
ret <2 x i32> %t
;CHECK: pmovzxdq
;CHECK: ret
diff --git a/llvm/test/CodeGen/X86/2012-07-15-broadcastfold.ll b/llvm/test/CodeGen/X86/2012-07-15-broadcastfold.ll
index 1c39c747cdc..7c8c2f28348 100644
--- a/llvm/test/CodeGen/X86/2012-07-15-broadcastfold.ll
+++ b/llvm/test/CodeGen/X86/2012-07-15-broadcastfold.ll
@@ -9,7 +9,7 @@ declare x86_fastcallcc i64 @barrier()
;CHECK: ret
define <8 x float> @bcast_fold( float* %A) {
BB:
- %A0 = load float* %A
+ %A0 = load float, float* %A
%tt3 = call x86_fastcallcc i64 @barrier()
br i1 undef, label %work, label %exit
diff --git a/llvm/test/CodeGen/X86/2012-08-17-legalizer-crash.ll b/llvm/test/CodeGen/X86/2012-08-17-legalizer-crash.ll
index 0d18267fcde..a19aa52f302 100644
--- a/llvm/test/CodeGen/X86/2012-08-17-legalizer-crash.ll
+++ b/llvm/test/CodeGen/X86/2012-08-17-legalizer-crash.ll
@@ -12,9 +12,9 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @fn1() nounwind uwtable ssp {
entry:
- %0 = load %struct._GtkSheetRow** @a, align 8
+ %0 = load %struct._GtkSheetRow*, %struct._GtkSheetRow** @a, align 8
%1 = bitcast %struct._GtkSheetRow* %0 to i576*
- %srcval2 = load i576* %1, align 8
+ %srcval2 = load i576, i576* %1, align 8
%tobool = icmp ugt i576 %srcval2, 57586096570152913699974892898380567793532123114264532903689671329431521032595044740083720782129802971518987656109067457577065805510327036019308994315074097345724415
br i1 %tobool, label %if.then, label %if.end
diff --git a/llvm/test/CodeGen/X86/2012-09-28-CGPBug.ll b/llvm/test/CodeGen/X86/2012-09-28-CGPBug.ll
index 9f20d469513..2d29433c1b5 100644
--- a/llvm/test/CodeGen/X86/2012-09-28-CGPBug.ll
+++ b/llvm/test/CodeGen/X86/2012-09-28-CGPBug.ll
@@ -16,10 +16,10 @@
define void @h(i8*) nounwind ssp {
%2 = alloca i8*
store i8* %0, i8** %2
- %3 = load i8** %2
+ %3 = load i8*, i8** %2
%4 = bitcast i8* %3 to { i32, i32 }*
%5 = getelementptr { i32, i32 }, { i32, i32 }* %4, i32 0, i32 0
- %6 = load i32* %5
+ %6 = load i32, i32* %5
%7 = srem i32 %6, 2
%8 = icmp slt i32 %6, 2
%9 = select i1 %8, i32 %6, i32 %7
@@ -29,7 +29,7 @@ define void @h(i8*) nounwind ssp {
; <label>:11 ; preds = %1
%12 = zext i1 %10 to i32
%13 = getelementptr [4 x i32], [4 x i32]* @JT, i32 0, i32 %12
- %14 = load i32* %13
+ %14 = load i32, i32* %13
%15 = add i32 %14, ptrtoint (i8* blockaddress(@h, %11) to i32)
%16 = inttoptr i32 %15 to i8*
indirectbr i8* %16, [label %17, label %18]
diff --git a/llvm/test/CodeGen/X86/2012-10-02-DAGCycle.ll b/llvm/test/CodeGen/X86/2012-10-02-DAGCycle.ll
index 9efe77db45d..c43001eb8c2 100644
--- a/llvm/test/CodeGen/X86/2012-10-02-DAGCycle.ll
+++ b/llvm/test/CodeGen/X86/2012-10-02-DAGCycle.ll
@@ -9,9 +9,9 @@
define i32 @t(%TRp* inreg %rp) nounwind optsize ssp {
entry:
%handler = getelementptr inbounds %TRp, %TRp* %rp, i32 0, i32 1
- %0 = load %TRH** %handler, align 4
+ %0 = load %TRH*, %TRH** %handler, align 4
%sync = getelementptr inbounds %TRH, %TRH* %0, i32 0, i32 4
- %sync12 = load {}** %sync, align 4
+ %sync12 = load {}*, {}** %sync, align 4
%1 = bitcast {}* %sync12 to i32 (%TRp*)*
%call = tail call i32 %1(%TRp* inreg %rp) nounwind optsize
ret i32 %call
@@ -29,13 +29,13 @@ entry:
br i1 undef, label %if.then, label %if.end17
if.then: ; preds = %entry
- %vecnorm.sroa.2.8.copyload = load float* undef, align 4
+ %vecnorm.sroa.2.8.copyload = load float, float* undef, align 4
%cmp4 = fcmp olt float undef, 0x3D10000000000000
%vecnorm.sroa.2.8.copyload36 = select i1 %cmp4, float -1.000000e+00, float %vecnorm.sroa.2.8.copyload
%call.i.i.i = tail call float @sqrtf(float 0.000000e+00) nounwind readnone
%div.i.i = fdiv float 1.000000e+00, %call.i.i.i
%mul7.i.i.i = fmul float %div.i.i, %vecnorm.sroa.2.8.copyload36
- %1 = load float (%btConvexInternalShape*)** undef, align 8
+ %1 = load float (%btConvexInternalShape*)*, float (%btConvexInternalShape*)** undef, align 8
%call12 = tail call float %1(%btConvexInternalShape* %0)
%mul7.i.i = fmul float %call12, %mul7.i.i.i
%retval.sroa.0.4.insert = insertelement <2 x float> zeroinitializer, float undef, i32 1
diff --git a/llvm/test/CodeGen/X86/2012-10-03-DAGCycle.ll b/llvm/test/CodeGen/X86/2012-10-03-DAGCycle.ll
index 83ae87b7c76..da92565708c 100644
--- a/llvm/test/CodeGen/X86/2012-10-03-DAGCycle.ll
+++ b/llvm/test/CodeGen/X86/2012-10-03-DAGCycle.ll
@@ -13,11 +13,11 @@ define fastcc void @bar(%struct.pluto.0* %arg) nounwind uwtable ssp align 2 {
bb:
%tmp1 = alloca %struct.widget.375, align 8
%tmp2 = getelementptr inbounds %struct.pluto.0, %struct.pluto.0* %arg, i64 0, i32 1
- %tmp3 = load %struct.hoge.368** %tmp2, align 8
+ %tmp3 = load %struct.hoge.368*, %struct.hoge.368** %tmp2, align 8
store %struct.pluto.0* %arg, %struct.pluto.0** undef, align 8
%tmp = getelementptr inbounds %struct.widget.375, %struct.widget.375* %tmp1, i64 0, i32 2
%tmp4 = getelementptr %struct.pluto.0, %struct.pluto.0* %arg, i64 0, i32 0, i32 0
- %tmp5 = load %i8** %tmp4, align 8
+ %tmp5 = load %i8*, %i8** %tmp4, align 8
store %i8* %tmp5, %i8** %tmp, align 8
%tmp6 = getelementptr inbounds %struct.widget.375, %struct.widget.375* %tmp1, i64 0, i32 3
store %struct.hoge.368* %tmp3, %struct.hoge.368** %tmp6, align 8
diff --git a/llvm/test/CodeGen/X86/2012-10-18-crash-dagco.ll b/llvm/test/CodeGen/X86/2012-10-18-crash-dagco.ll
index 71b20609d22..fb29241035e 100644
--- a/llvm/test/CodeGen/X86/2012-10-18-crash-dagco.ll
+++ b/llvm/test/CodeGen/X86/2012-10-18-crash-dagco.ll
@@ -22,23 +22,23 @@ bb27: ; preds = %bb48, %bb
]
bb28: ; preds = %bb27, %bb26
- %tmp = load i32* null
+ %tmp = load i32, i32* null
%tmp29 = trunc i32 %tmp to i8
store i8* undef, i8** undef
- %tmp30 = load i32* null
+ %tmp30 = load i32, i32* null
%tmp31 = icmp eq i32 %tmp30, 0
%tmp32 = getelementptr inbounds [411 x i8], [411 x i8]* @global, i32 0, i32 undef
- %tmp33 = load i8* %tmp32, align 1
+ %tmp33 = load i8, i8* %tmp32, align 1
%tmp34 = getelementptr inbounds [411 x i8], [411 x i8]* @global, i32 0, i32 0
- %tmp35 = load i8* %tmp34, align 1
+ %tmp35 = load i8, i8* %tmp34, align 1
%tmp36 = select i1 %tmp31, i8 %tmp35, i8 %tmp33
%tmp37 = select i1 undef, i8 %tmp29, i8 %tmp36
%tmp38 = zext i8 %tmp37 to i32
%tmp39 = select i1 undef, i32 0, i32 %tmp38
%tmp40 = getelementptr inbounds i32, i32* null, i32 %tmp39
- %tmp41 = load i32* %tmp40, align 4
- %tmp42 = load i32* undef, align 4
- %tmp43 = load i32* undef
+ %tmp41 = load i32, i32* %tmp40, align 4
+ %tmp42 = load i32, i32* undef, align 4
+ %tmp43 = load i32, i32* undef
%tmp44 = xor i32 %tmp42, %tmp43
%tmp45 = lshr i32 %tmp44, 8
%tmp46 = lshr i32 %tmp44, 7
diff --git a/llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll b/llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll
index ce4852125f4..df4f0282bd4 100644
--- a/llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll
+++ b/llvm/test/CodeGen/X86/2012-11-28-merge-store-alias.ll
@@ -21,7 +21,7 @@ define i32 @merge_stores_can() nounwind ssp {
store i32 0, i32* %O1_1
store i32 0, i32* %O1_2
- %ret = load i32* %ld_ptr ; <--- does not alias.
+ %ret = load i32, i32* %ld_ptr ; <--- does not alias.
store i32 0, i32* %O1_3
store i32 0, i32* %O1_4
@@ -44,7 +44,7 @@ define i32 @merge_stores_cant([10 x i32]* %in0, [10 x i32]* %in1) nounwind ssp {
store i32 0, i32* %O1_1
store i32 0, i32* %O1_2
- %ret = load i32* %ld_ptr ; <--- may alias
+ %ret = load i32, i32* %ld_ptr ; <--- may alias
store i32 0, i32* %O1_3
store i32 0, i32* %O1_4
diff --git a/llvm/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll b/llvm/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
index aa9fffd0bfd..2239ea153d9 100644
--- a/llvm/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
+++ b/llvm/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
@@ -18,7 +18,7 @@ define signext i16 @subdivp(%struct.node.0.27* nocapture %p, double %dsq, double
entry:
call void @llvm.dbg.declare(metadata %struct.hgstruct.2.29* %hg, metadata !4, metadata !{!"0x102"})
%type = getelementptr inbounds %struct.node.0.27, %struct.node.0.27* %p, i64 0, i32 0
- %0 = load i16* %type, align 2
+ %0 = load i16, i16* %type, align 2
%cmp = icmp eq i16 %0, 1
br i1 %cmp, label %return, label %for.cond.preheader
diff --git a/llvm/test/CodeGen/X86/2012-11-30-misched-dbg.ll b/llvm/test/CodeGen/X86/2012-11-30-misched-dbg.ll
index 86118132dfc..3104d5ae1d2 100644
--- a/llvm/test/CodeGen/X86/2012-11-30-misched-dbg.ll
+++ b/llvm/test/CodeGen/X86/2012-11-30-misched-dbg.ll
@@ -45,7 +45,7 @@ if.then3344:
if.then4073: ; preds = %if.then3344
call void @llvm.dbg.declare(metadata [20 x i8]* %num14075, metadata !4, metadata !{!"0x102"})
%arraydecay4078 = getelementptr inbounds [20 x i8], [20 x i8]* %num14075, i64 0, i64 0
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%add4093 = add nsw i32 %0, 0
%conv4094 = sitofp i32 %add4093 to float
%div4095 = fdiv float %conv4094, 5.670000e+02
diff --git a/llvm/test/CodeGen/X86/2012-12-06-python27-miscompile.ll b/llvm/test/CodeGen/X86/2012-12-06-python27-miscompile.ll
index d73f95eda00..b80ae3ae2b7 100644
--- a/llvm/test/CodeGen/X86/2012-12-06-python27-miscompile.ll
+++ b/llvm/test/CodeGen/X86/2012-12-06-python27-miscompile.ll
@@ -15,7 +15,7 @@ entry:
%used = getelementptr inbounds i64, i64* %so, i32 3
store i64 0, i64* %used, align 8
%fill = getelementptr inbounds i64, i64* %so, i32 2
- %L = load i64* %fill, align 8
+ %L = load i64, i64* %fill, align 8
store i64 0, i64* %fill, align 8
%cmp28 = icmp sgt i64 %L, 0
%R = sext i1 %cmp28 to i32
diff --git a/llvm/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll b/llvm/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll
index f0e5d35b426..e5a64b5ae87 100644
--- a/llvm/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll
+++ b/llvm/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll
@@ -8,7 +8,7 @@ define void @test() nounwind noimplicitfloat {
entry:
; CHECK-NOT: xmm
; CHECK: ret
- %0 = load %struct1** undef, align 8
+ %0 = load %struct1*, %struct1** undef, align 8
%1 = getelementptr inbounds %struct1, %struct1* %0, i64 0, i32 0
store i32* null, i32** %1, align 8
%2 = getelementptr inbounds %struct1, %struct1* %0, i64 0, i32 1
diff --git a/llvm/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll b/llvm/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll
index 0ff9d3951d5..35ee84b864c 100644
--- a/llvm/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll
+++ b/llvm/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @main() #0 {
entry:
- %0 = load <8 x float>* bitcast ([8 x float]* @b to <8 x float>*), align 32
+ %0 = load <8 x float>, <8 x float>* bitcast ([8 x float]* @b to <8 x float>*), align 32
%bitcast.i = extractelement <8 x float> %0, i32 0
%vecinit.i.i = insertelement <4 x float> undef, float %bitcast.i, i32 0
%vecinit2.i.i = insertelement <4 x float> %vecinit.i.i, float 0.000000e+00, i32 1
diff --git a/llvm/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll b/llvm/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
index 9cd150a2f56..5ef867d4f9d 100644
--- a/llvm/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
+++ b/llvm/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
@@ -34,7 +34,7 @@
; CHECK: ret
define i64 @test_bitcast(i64 (i64, i64, i64)** %arg, i1 %bool, i64 %arg2) {
entry:
- %loaded_ptr = load i64 (i64, i64, i64)** %arg, align 8
+ %loaded_ptr = load i64 (i64, i64, i64)*, i64 (i64, i64, i64)** %arg, align 8
%raw = bitcast i64 (i64, i64, i64)* %loaded_ptr to i8*
switch i1 %bool, label %default [
i1 true, label %label_true
@@ -73,7 +73,7 @@ label_end:
; CHECK: ret
define i64 @test_inttoptr(i64 (i64, i64, i64)** %arg, i1 %bool, i64 %arg2) {
entry:
- %loaded_ptr = load i64 (i64, i64, i64)** %arg, align 8
+ %loaded_ptr = load i64 (i64, i64, i64)*, i64 (i64, i64, i64)** %arg, align 8
%raw = ptrtoint i64 (i64, i64, i64)* %loaded_ptr to i64
switch i1 %bool, label %default [
i1 true, label %label_true
@@ -112,7 +112,7 @@ label_end:
; CHECK: ret
define i64 @test_ptrtoint(i64 (i64, i64, i64)** %arg, i1 %bool, i64 %arg2) {
entry:
- %loaded_ptr = load i64 (i64, i64, i64)** %arg, align 8
+ %loaded_ptr = load i64 (i64, i64, i64)*, i64 (i64, i64, i64)** %arg, align 8
%raw = bitcast i64 (i64, i64, i64)* %loaded_ptr to i8*
switch i1 %bool, label %default [
i1 true, label %label_true
diff --git a/llvm/test/CodeGen/X86/Atomics-64.ll b/llvm/test/CodeGen/X86/Atomics-64.ll
index c392e947407..6d367a71d01 100644
--- a/llvm/test/CodeGen/X86/Atomics-64.ll
+++ b/llvm/test/CodeGen/X86/Atomics-64.ll
@@ -308,331 +308,331 @@ return: ; preds = %entry
define void @test_op_and_fetch() nounwind {
entry:
- %0 = load i8* @uc, align 1
+ %0 = load i8, i8* @uc, align 1
%1 = zext i8 %0 to i32
%2 = trunc i32 %1 to i8
%3 = atomicrmw add i8* @sc, i8 %2 monotonic
%4 = add i8 %3, %2
store i8 %4, i8* @sc, align 1
- %5 = load i8* @uc, align 1
+ %5 = load i8, i8* @uc, align 1
%6 = zext i8 %5 to i32
%7 = trunc i32 %6 to i8
%8 = atomicrmw add i8* @uc, i8 %7 monotonic
%9 = add i8 %8, %7
store i8 %9, i8* @uc, align 1
- %10 = load i8* @uc, align 1
+ %10 = load i8, i8* @uc, align 1
%11 = zext i8 %10 to i32
%12 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%13 = trunc i32 %11 to i16
%14 = atomicrmw add i16* %12, i16 %13 monotonic
%15 = add i16 %14, %13
store i16 %15, i16* @ss, align 2
- %16 = load i8* @uc, align 1
+ %16 = load i8, i8* @uc, align 1
%17 = zext i8 %16 to i32
%18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%19 = trunc i32 %17 to i16
%20 = atomicrmw add i16* %18, i16 %19 monotonic
%21 = add i16 %20, %19
store i16 %21, i16* @us, align 2
- %22 = load i8* @uc, align 1
+ %22 = load i8, i8* @uc, align 1
%23 = zext i8 %22 to i32
%24 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%25 = atomicrmw add i32* %24, i32 %23 monotonic
%26 = add i32 %25, %23
store i32 %26, i32* @si, align 4
- %27 = load i8* @uc, align 1
+ %27 = load i8, i8* @uc, align 1
%28 = zext i8 %27 to i32
%29 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%30 = atomicrmw add i32* %29, i32 %28 monotonic
%31 = add i32 %30, %28
store i32 %31, i32* @ui, align 4
- %32 = load i8* @uc, align 1
+ %32 = load i8, i8* @uc, align 1
%33 = zext i8 %32 to i64
%34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%35 = atomicrmw add i64* %34, i64 %33 monotonic
%36 = add i64 %35, %33
store i64 %36, i64* @sl, align 8
- %37 = load i8* @uc, align 1
+ %37 = load i8, i8* @uc, align 1
%38 = zext i8 %37 to i64
%39 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%40 = atomicrmw add i64* %39, i64 %38 monotonic
%41 = add i64 %40, %38
store i64 %41, i64* @ul, align 8
- %42 = load i8* @uc, align 1
+ %42 = load i8, i8* @uc, align 1
%43 = zext i8 %42 to i64
%44 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%45 = atomicrmw add i64* %44, i64 %43 monotonic
%46 = add i64 %45, %43
store i64 %46, i64* @sll, align 8
- %47 = load i8* @uc, align 1
+ %47 = load i8, i8* @uc, align 1
%48 = zext i8 %47 to i64
%49 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%50 = atomicrmw add i64* %49, i64 %48 monotonic
%51 = add i64 %50, %48
store i64 %51, i64* @ull, align 8
- %52 = load i8* @uc, align 1
+ %52 = load i8, i8* @uc, align 1
%53 = zext i8 %52 to i32
%54 = trunc i32 %53 to i8
%55 = atomicrmw sub i8* @sc, i8 %54 monotonic
%56 = sub i8 %55, %54
store i8 %56, i8* @sc, align 1
- %57 = load i8* @uc, align 1
+ %57 = load i8, i8* @uc, align 1
%58 = zext i8 %57 to i32
%59 = trunc i32 %58 to i8
%60 = atomicrmw sub i8* @uc, i8 %59 monotonic
%61 = sub i8 %60, %59
store i8 %61, i8* @uc, align 1
- %62 = load i8* @uc, align 1
+ %62 = load i8, i8* @uc, align 1
%63 = zext i8 %62 to i32
%64 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%65 = trunc i32 %63 to i16
%66 = atomicrmw sub i16* %64, i16 %65 monotonic
%67 = sub i16 %66, %65
store i16 %67, i16* @ss, align 2
- %68 = load i8* @uc, align 1
+ %68 = load i8, i8* @uc, align 1
%69 = zext i8 %68 to i32
%70 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%71 = trunc i32 %69 to i16
%72 = atomicrmw sub i16* %70, i16 %71 monotonic
%73 = sub i16 %72, %71
store i16 %73, i16* @us, align 2
- %74 = load i8* @uc, align 1
+ %74 = load i8, i8* @uc, align 1
%75 = zext i8 %74 to i32
%76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%77 = atomicrmw sub i32* %76, i32 %75 monotonic
%78 = sub i32 %77, %75
store i32 %78, i32* @si, align 4
- %79 = load i8* @uc, align 1
+ %79 = load i8, i8* @uc, align 1
%80 = zext i8 %79 to i32
%81 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%82 = atomicrmw sub i32* %81, i32 %80 monotonic
%83 = sub i32 %82, %80
store i32 %83, i32* @ui, align 4
- %84 = load i8* @uc, align 1
+ %84 = load i8, i8* @uc, align 1
%85 = zext i8 %84 to i64
%86 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%87 = atomicrmw sub i64* %86, i64 %85 monotonic
%88 = sub i64 %87, %85
store i64 %88, i64* @sl, align 8
- %89 = load i8* @uc, align 1
+ %89 = load i8, i8* @uc, align 1
%90 = zext i8 %89 to i64
%91 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%92 = atomicrmw sub i64* %91, i64 %90 monotonic
%93 = sub i64 %92, %90
store i64 %93, i64* @ul, align 8
- %94 = load i8* @uc, align 1
+ %94 = load i8, i8* @uc, align 1
%95 = zext i8 %94 to i64
%96 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%97 = atomicrmw sub i64* %96, i64 %95 monotonic
%98 = sub i64 %97, %95
store i64 %98, i64* @sll, align 8
- %99 = load i8* @uc, align 1
+ %99 = load i8, i8* @uc, align 1
%100 = zext i8 %99 to i64
%101 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%102 = atomicrmw sub i64* %101, i64 %100 monotonic
%103 = sub i64 %102, %100
store i64 %103, i64* @ull, align 8
- %104 = load i8* @uc, align 1
+ %104 = load i8, i8* @uc, align 1
%105 = zext i8 %104 to i32
%106 = trunc i32 %105 to i8
%107 = atomicrmw or i8* @sc, i8 %106 monotonic
%108 = or i8 %107, %106
store i8 %108, i8* @sc, align 1
- %109 = load i8* @uc, align 1
+ %109 = load i8, i8* @uc, align 1
%110 = zext i8 %109 to i32
%111 = trunc i32 %110 to i8
%112 = atomicrmw or i8* @uc, i8 %111 monotonic
%113 = or i8 %112, %111
store i8 %113, i8* @uc, align 1
- %114 = load i8* @uc, align 1
+ %114 = load i8, i8* @uc, align 1
%115 = zext i8 %114 to i32
%116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%117 = trunc i32 %115 to i16
%118 = atomicrmw or i16* %116, i16 %117 monotonic
%119 = or i16 %118, %117
store i16 %119, i16* @ss, align 2
- %120 = load i8* @uc, align 1
+ %120 = load i8, i8* @uc, align 1
%121 = zext i8 %120 to i32
%122 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%123 = trunc i32 %121 to i16
%124 = atomicrmw or i16* %122, i16 %123 monotonic
%125 = or i16 %124, %123
store i16 %125, i16* @us, align 2
- %126 = load i8* @uc, align 1
+ %126 = load i8, i8* @uc, align 1
%127 = zext i8 %126 to i32
%128 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%129 = atomicrmw or i32* %128, i32 %127 monotonic
%130 = or i32 %129, %127
store i32 %130, i32* @si, align 4
- %131 = load i8* @uc, align 1
+ %131 = load i8, i8* @uc, align 1
%132 = zext i8 %131 to i32
%133 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%134 = atomicrmw or i32* %133, i32 %132 monotonic
%135 = or i32 %134, %132
store i32 %135, i32* @ui, align 4
- %136 = load i8* @uc, align 1
+ %136 = load i8, i8* @uc, align 1
%137 = zext i8 %136 to i64
%138 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%139 = atomicrmw or i64* %138, i64 %137 monotonic
%140 = or i64 %139, %137
store i64 %140, i64* @sl, align 8
- %141 = load i8* @uc, align 1
+ %141 = load i8, i8* @uc, align 1
%142 = zext i8 %141 to i64
%143 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%144 = atomicrmw or i64* %143, i64 %142 monotonic
%145 = or i64 %144, %142
store i64 %145, i64* @ul, align 8
- %146 = load i8* @uc, align 1
+ %146 = load i8, i8* @uc, align 1
%147 = zext i8 %146 to i64
%148 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%149 = atomicrmw or i64* %148, i64 %147 monotonic
%150 = or i64 %149, %147
store i64 %150, i64* @sll, align 8
- %151 = load i8* @uc, align 1
+ %151 = load i8, i8* @uc, align 1
%152 = zext i8 %151 to i64
%153 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%154 = atomicrmw or i64* %153, i64 %152 monotonic
%155 = or i64 %154, %152
store i64 %155, i64* @ull, align 8
- %156 = load i8* @uc, align 1
+ %156 = load i8, i8* @uc, align 1
%157 = zext i8 %156 to i32
%158 = trunc i32 %157 to i8
%159 = atomicrmw xor i8* @sc, i8 %158 monotonic
%160 = xor i8 %159, %158
store i8 %160, i8* @sc, align 1
- %161 = load i8* @uc, align 1
+ %161 = load i8, i8* @uc, align 1
%162 = zext i8 %161 to i32
%163 = trunc i32 %162 to i8
%164 = atomicrmw xor i8* @uc, i8 %163 monotonic
%165 = xor i8 %164, %163
store i8 %165, i8* @uc, align 1
- %166 = load i8* @uc, align 1
+ %166 = load i8, i8* @uc, align 1
%167 = zext i8 %166 to i32
%168 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%169 = trunc i32 %167 to i16
%170 = atomicrmw xor i16* %168, i16 %169 monotonic
%171 = xor i16 %170, %169
store i16 %171, i16* @ss, align 2
- %172 = load i8* @uc, align 1
+ %172 = load i8, i8* @uc, align 1
%173 = zext i8 %172 to i32
%174 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%175 = trunc i32 %173 to i16
%176 = atomicrmw xor i16* %174, i16 %175 monotonic
%177 = xor i16 %176, %175
store i16 %177, i16* @us, align 2
- %178 = load i8* @uc, align 1
+ %178 = load i8, i8* @uc, align 1
%179 = zext i8 %178 to i32
%180 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%181 = atomicrmw xor i32* %180, i32 %179 monotonic
%182 = xor i32 %181, %179
store i32 %182, i32* @si, align 4
- %183 = load i8* @uc, align 1
+ %183 = load i8, i8* @uc, align 1
%184 = zext i8 %183 to i32
%185 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%186 = atomicrmw xor i32* %185, i32 %184 monotonic
%187 = xor i32 %186, %184
store i32 %187, i32* @ui, align 4
- %188 = load i8* @uc, align 1
+ %188 = load i8, i8* @uc, align 1
%189 = zext i8 %188 to i64
%190 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%191 = atomicrmw xor i64* %190, i64 %189 monotonic
%192 = xor i64 %191, %189
store i64 %192, i64* @sl, align 8
- %193 = load i8* @uc, align 1
+ %193 = load i8, i8* @uc, align 1
%194 = zext i8 %193 to i64
%195 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%196 = atomicrmw xor i64* %195, i64 %194 monotonic
%197 = xor i64 %196, %194
store i64 %197, i64* @ul, align 8
- %198 = load i8* @uc, align 1
+ %198 = load i8, i8* @uc, align 1
%199 = zext i8 %198 to i64
%200 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%201 = atomicrmw xor i64* %200, i64 %199 monotonic
%202 = xor i64 %201, %199
store i64 %202, i64* @sll, align 8
- %203 = load i8* @uc, align 1
+ %203 = load i8, i8* @uc, align 1
%204 = zext i8 %203 to i64
%205 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%206 = atomicrmw xor i64* %205, i64 %204 monotonic
%207 = xor i64 %206, %204
store i64 %207, i64* @ull, align 8
- %208 = load i8* @uc, align 1
+ %208 = load i8, i8* @uc, align 1
%209 = zext i8 %208 to i32
%210 = trunc i32 %209 to i8
%211 = atomicrmw and i8* @sc, i8 %210 monotonic
%212 = and i8 %211, %210
store i8 %212, i8* @sc, align 1
- %213 = load i8* @uc, align 1
+ %213 = load i8, i8* @uc, align 1
%214 = zext i8 %213 to i32
%215 = trunc i32 %214 to i8
%216 = atomicrmw and i8* @uc, i8 %215 monotonic
%217 = and i8 %216, %215
store i8 %217, i8* @uc, align 1
- %218 = load i8* @uc, align 1
+ %218 = load i8, i8* @uc, align 1
%219 = zext i8 %218 to i32
%220 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%221 = trunc i32 %219 to i16
%222 = atomicrmw and i16* %220, i16 %221 monotonic
%223 = and i16 %222, %221
store i16 %223, i16* @ss, align 2
- %224 = load i8* @uc, align 1
+ %224 = load i8, i8* @uc, align 1
%225 = zext i8 %224 to i32
%226 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%227 = trunc i32 %225 to i16
%228 = atomicrmw and i16* %226, i16 %227 monotonic
%229 = and i16 %228, %227
store i16 %229, i16* @us, align 2
- %230 = load i8* @uc, align 1
+ %230 = load i8, i8* @uc, align 1
%231 = zext i8 %230 to i32
%232 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%233 = atomicrmw and i32* %232, i32 %231 monotonic
%234 = and i32 %233, %231
store i32 %234, i32* @si, align 4
- %235 = load i8* @uc, align 1
+ %235 = load i8, i8* @uc, align 1
%236 = zext i8 %235 to i32
%237 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%238 = atomicrmw and i32* %237, i32 %236 monotonic
%239 = and i32 %238, %236
store i32 %239, i32* @ui, align 4
- %240 = load i8* @uc, align 1
+ %240 = load i8, i8* @uc, align 1
%241 = zext i8 %240 to i64
%242 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%243 = atomicrmw and i64* %242, i64 %241 monotonic
%244 = and i64 %243, %241
store i64 %244, i64* @sl, align 8
- %245 = load i8* @uc, align 1
+ %245 = load i8, i8* @uc, align 1
%246 = zext i8 %245 to i64
%247 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%248 = atomicrmw and i64* %247, i64 %246 monotonic
%249 = and i64 %248, %246
store i64 %249, i64* @ul, align 8
- %250 = load i8* @uc, align 1
+ %250 = load i8, i8* @uc, align 1
%251 = zext i8 %250 to i64
%252 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%253 = atomicrmw and i64* %252, i64 %251 monotonic
%254 = and i64 %253, %251
store i64 %254, i64* @sll, align 8
- %255 = load i8* @uc, align 1
+ %255 = load i8, i8* @uc, align 1
%256 = zext i8 %255 to i64
%257 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%258 = atomicrmw and i64* %257, i64 %256 monotonic
%259 = and i64 %258, %256
store i64 %259, i64* @ull, align 8
- %260 = load i8* @uc, align 1
+ %260 = load i8, i8* @uc, align 1
%261 = zext i8 %260 to i32
%262 = trunc i32 %261 to i8
%263 = atomicrmw nand i8* @sc, i8 %262 monotonic
%264 = xor i8 %263, -1
%265 = and i8 %264, %262
store i8 %265, i8* @sc, align 1
- %266 = load i8* @uc, align 1
+ %266 = load i8, i8* @uc, align 1
%267 = zext i8 %266 to i32
%268 = trunc i32 %267 to i8
%269 = atomicrmw nand i8* @uc, i8 %268 monotonic
%270 = xor i8 %269, -1
%271 = and i8 %270, %268
store i8 %271, i8* @uc, align 1
- %272 = load i8* @uc, align 1
+ %272 = load i8, i8* @uc, align 1
%273 = zext i8 %272 to i32
%274 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%275 = trunc i32 %273 to i16
@@ -640,7 +640,7 @@ entry:
%277 = xor i16 %276, -1
%278 = and i16 %277, %275
store i16 %278, i16* @ss, align 2
- %279 = load i8* @uc, align 1
+ %279 = load i8, i8* @uc, align 1
%280 = zext i8 %279 to i32
%281 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%282 = trunc i32 %280 to i16
@@ -648,42 +648,42 @@ entry:
%284 = xor i16 %283, -1
%285 = and i16 %284, %282
store i16 %285, i16* @us, align 2
- %286 = load i8* @uc, align 1
+ %286 = load i8, i8* @uc, align 1
%287 = zext i8 %286 to i32
%288 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%289 = atomicrmw nand i32* %288, i32 %287 monotonic
%290 = xor i32 %289, -1
%291 = and i32 %290, %287
store i32 %291, i32* @si, align 4
- %292 = load i8* @uc, align 1
+ %292 = load i8, i8* @uc, align 1
%293 = zext i8 %292 to i32
%294 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%295 = atomicrmw nand i32* %294, i32 %293 monotonic
%296 = xor i32 %295, -1
%297 = and i32 %296, %293
store i32 %297, i32* @ui, align 4
- %298 = load i8* @uc, align 1
+ %298 = load i8, i8* @uc, align 1
%299 = zext i8 %298 to i64
%300 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%301 = atomicrmw nand i64* %300, i64 %299 monotonic
%302 = xor i64 %301, -1
%303 = and i64 %302, %299
store i64 %303, i64* @sl, align 8
- %304 = load i8* @uc, align 1
+ %304 = load i8, i8* @uc, align 1
%305 = zext i8 %304 to i64
%306 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%307 = atomicrmw nand i64* %306, i64 %305 monotonic
%308 = xor i64 %307, -1
%309 = and i64 %308, %305
store i64 %309, i64* @ul, align 8
- %310 = load i8* @uc, align 1
+ %310 = load i8, i8* @uc, align 1
%311 = zext i8 %310 to i64
%312 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%313 = atomicrmw nand i64* %312, i64 %311 monotonic
%314 = xor i64 %313, -1
%315 = and i64 %314, %311
store i64 %315, i64* @sll, align 8
- %316 = load i8* @uc, align 1
+ %316 = load i8, i8* @uc, align 1
%317 = zext i8 %316 to i64
%318 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%319 = atomicrmw nand i64* %318, i64 %317 monotonic
@@ -698,28 +698,28 @@ return: ; preds = %entry
define void @test_compare_and_swap() nounwind {
entry:
- %0 = load i8* @sc, align 1
+ %0 = load i8, i8* @sc, align 1
%1 = zext i8 %0 to i32
- %2 = load i8* @uc, align 1
+ %2 = load i8, i8* @uc, align 1
%3 = zext i8 %2 to i32
%4 = trunc i32 %3 to i8
%5 = trunc i32 %1 to i8
%pair6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
%6 = extractvalue { i8, i1 } %pair6, 0
store i8 %6, i8* @sc, align 1
- %7 = load i8* @sc, align 1
+ %7 = load i8, i8* @sc, align 1
%8 = zext i8 %7 to i32
- %9 = load i8* @uc, align 1
+ %9 = load i8, i8* @uc, align 1
%10 = zext i8 %9 to i32
%11 = trunc i32 %10 to i8
%12 = trunc i32 %8 to i8
%pair13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
%13 = extractvalue { i8, i1 } %pair13, 0
store i8 %13, i8* @uc, align 1
- %14 = load i8* @sc, align 1
+ %14 = load i8, i8* @sc, align 1
%15 = sext i8 %14 to i16
%16 = zext i16 %15 to i32
- %17 = load i8* @uc, align 1
+ %17 = load i8, i8* @uc, align 1
%18 = zext i8 %17 to i32
%19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
%20 = trunc i32 %18 to i16
@@ -727,10 +727,10 @@ entry:
%pair22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
%22 = extractvalue { i16, i1 } %pair22, 0
store i16 %22, i16* @ss, align 2
- %23 = load i8* @sc, align 1
+ %23 = load i8, i8* @sc, align 1
%24 = sext i8 %23 to i16
%25 = zext i16 %24 to i32
- %26 = load i8* @uc, align 1
+ %26 = load i8, i8* @uc, align 1
%27 = zext i8 %26 to i32
%28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
%29 = trunc i32 %27 to i16
@@ -738,57 +738,57 @@ entry:
%pair31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
%31 = extractvalue { i16, i1 } %pair31, 0
store i16 %31, i16* @us, align 2
- %32 = load i8* @sc, align 1
+ %32 = load i8, i8* @sc, align 1
%33 = sext i8 %32 to i32
- %34 = load i8* @uc, align 1
+ %34 = load i8, i8* @uc, align 1
%35 = zext i8 %34 to i32
%36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
%pair37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
%37 = extractvalue { i32, i1 } %pair37, 0
store i32 %37, i32* @si, align 4
- %38 = load i8* @sc, align 1
+ %38 = load i8, i8* @sc, align 1
%39 = sext i8 %38 to i32
- %40 = load i8* @uc, align 1
+ %40 = load i8, i8* @uc, align 1
%41 = zext i8 %40 to i32
%42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
%pair43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
%43 = extractvalue { i32, i1 } %pair43, 0
store i32 %43, i32* @ui, align 4
- %44 = load i8* @sc, align 1
+ %44 = load i8, i8* @sc, align 1
%45 = sext i8 %44 to i64
- %46 = load i8* @uc, align 1
+ %46 = load i8, i8* @uc, align 1
%47 = zext i8 %46 to i64
%48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
%pair49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
%49 = extractvalue { i64, i1 } %pair49, 0
store i64 %49, i64* @sl, align 8
- %50 = load i8* @sc, align 1
+ %50 = load i8, i8* @sc, align 1
%51 = sext i8 %50 to i64
- %52 = load i8* @uc, align 1
+ %52 = load i8, i8* @uc, align 1
%53 = zext i8 %52 to i64
%54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
%pair55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
%55 = extractvalue { i64, i1 } %pair55, 0
store i64 %55, i64* @ul, align 8
- %56 = load i8* @sc, align 1
+ %56 = load i8, i8* @sc, align 1
%57 = sext i8 %56 to i64
- %58 = load i8* @uc, align 1
+ %58 = load i8, i8* @uc, align 1
%59 = zext i8 %58 to i64
%60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
%pair61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
%61 = extractvalue { i64, i1 } %pair61, 0
store i64 %61, i64* @sll, align 8
- %62 = load i8* @sc, align 1
+ %62 = load i8, i8* @sc, align 1
%63 = sext i8 %62 to i64
- %64 = load i8* @uc, align 1
+ %64 = load i8, i8* @uc, align 1
%65 = zext i8 %64 to i64
%66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
%pair67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
%67 = extractvalue { i64, i1 } %pair67, 0
store i64 %67, i64* @ull, align 8
- %68 = load i8* @sc, align 1
+ %68 = load i8, i8* @sc, align 1
%69 = zext i8 %68 to i32
- %70 = load i8* @uc, align 1
+ %70 = load i8, i8* @uc, align 1
%71 = zext i8 %70 to i32
%72 = trunc i32 %71 to i8
%73 = trunc i32 %69 to i8
@@ -798,9 +798,9 @@ entry:
%76 = zext i1 %75 to i8
%77 = zext i8 %76 to i32
store i32 %77, i32* @ui, align 4
- %78 = load i8* @sc, align 1
+ %78 = load i8, i8* @sc, align 1
%79 = zext i8 %78 to i32
- %80 = load i8* @uc, align 1
+ %80 = load i8, i8* @uc, align 1
%81 = zext i8 %80 to i32
%82 = trunc i32 %81 to i8
%83 = trunc i32 %79 to i8
@@ -810,10 +810,10 @@ entry:
%86 = zext i1 %85 to i8
%87 = zext i8 %86 to i32
store i32 %87, i32* @ui, align 4
- %88 = load i8* @sc, align 1
+ %88 = load i8, i8* @sc, align 1
%89 = sext i8 %88 to i16
%90 = zext i16 %89 to i32
- %91 = load i8* @uc, align 1
+ %91 = load i8, i8* @uc, align 1
%92 = zext i8 %91 to i32
%93 = trunc i32 %92 to i8
%94 = trunc i32 %90 to i8
@@ -823,10 +823,10 @@ entry:
%97 = zext i1 %96 to i8
%98 = zext i8 %97 to i32
store i32 %98, i32* @ui, align 4
- %99 = load i8* @sc, align 1
+ %99 = load i8, i8* @sc, align 1
%100 = sext i8 %99 to i16
%101 = zext i16 %100 to i32
- %102 = load i8* @uc, align 1
+ %102 = load i8, i8* @uc, align 1
%103 = zext i8 %102 to i32
%104 = trunc i32 %103 to i8
%105 = trunc i32 %101 to i8
@@ -836,9 +836,9 @@ entry:
%108 = zext i1 %107 to i8
%109 = zext i8 %108 to i32
store i32 %109, i32* @ui, align 4
- %110 = load i8* @sc, align 1
+ %110 = load i8, i8* @sc, align 1
%111 = sext i8 %110 to i32
- %112 = load i8* @uc, align 1
+ %112 = load i8, i8* @uc, align 1
%113 = zext i8 %112 to i32
%114 = trunc i32 %113 to i8
%115 = trunc i32 %111 to i8
@@ -848,9 +848,9 @@ entry:
%118 = zext i1 %117 to i8
%119 = zext i8 %118 to i32
store i32 %119, i32* @ui, align 4
- %120 = load i8* @sc, align 1
+ %120 = load i8, i8* @sc, align 1
%121 = sext i8 %120 to i32
- %122 = load i8* @uc, align 1
+ %122 = load i8, i8* @uc, align 1
%123 = zext i8 %122 to i32
%124 = trunc i32 %123 to i8
%125 = trunc i32 %121 to i8
@@ -860,9 +860,9 @@ entry:
%128 = zext i1 %127 to i8
%129 = zext i8 %128 to i32
store i32 %129, i32* @ui, align 4
- %130 = load i8* @sc, align 1
+ %130 = load i8, i8* @sc, align 1
%131 = sext i8 %130 to i64
- %132 = load i8* @uc, align 1
+ %132 = load i8, i8* @uc, align 1
%133 = zext i8 %132 to i64
%134 = trunc i64 %133 to i8
%135 = trunc i64 %131 to i8
@@ -872,9 +872,9 @@ entry:
%138 = zext i1 %137 to i8
%139 = zext i8 %138 to i32
store i32 %139, i32* @ui, align 4
- %140 = load i8* @sc, align 1
+ %140 = load i8, i8* @sc, align 1
%141 = sext i8 %140 to i64
- %142 = load i8* @uc, align 1
+ %142 = load i8, i8* @uc, align 1
%143 = zext i8 %142 to i64
%144 = trunc i64 %143 to i8
%145 = trunc i64 %141 to i8
@@ -884,9 +884,9 @@ entry:
%148 = zext i1 %147 to i8
%149 = zext i8 %148 to i32
store i32 %149, i32* @ui, align 4
- %150 = load i8* @sc, align 1
+ %150 = load i8, i8* @sc, align 1
%151 = sext i8 %150 to i64
- %152 = load i8* @uc, align 1
+ %152 = load i8, i8* @uc, align 1
%153 = zext i8 %152 to i64
%154 = trunc i64 %153 to i8
%155 = trunc i64 %151 to i8
@@ -896,9 +896,9 @@ entry:
%158 = zext i1 %157 to i8
%159 = zext i8 %158 to i32
store i32 %159, i32* @ui, align 4
- %160 = load i8* @sc, align 1
+ %160 = load i8, i8* @sc, align 1
%161 = sext i8 %160 to i64
- %162 = load i8* @uc, align 1
+ %162 = load i8, i8* @uc, align 1
%163 = zext i8 %162 to i64
%164 = trunc i64 %163 to i8
%165 = trunc i64 %161 to i8
diff --git a/llvm/test/CodeGen/X86/GC/alloc_loop.ll b/llvm/test/CodeGen/X86/GC/alloc_loop.ll
index fb78ba2cd10..2a505e80aac 100644
--- a/llvm/test/CodeGen/X86/GC/alloc_loop.ll
+++ b/llvm/test/CodeGen/X86/GC/alloc_loop.ll
@@ -31,8 +31,8 @@ entry:
store i8** %tmp.2, i8*** %B
;; *B = A;
- %B.1 = load i8*** %B
- %A.1 = load i8** %A
+ %B.1 = load i8**, i8*** %B
+ %A.1 = load i8*, i8** %A
call void @llvm.gcwrite(i8* %A.1, i8* %B.upgrd.1, i8** %B.1)
br label %AllocLoop
diff --git a/llvm/test/CodeGen/X86/GC/argpromotion.ll b/llvm/test/CodeGen/X86/GC/argpromotion.ll
index c63ce222b86..37baf325007 100644
--- a/llvm/test/CodeGen/X86/GC/argpromotion.ll
+++ b/llvm/test/CodeGen/X86/GC/argpromotion.ll
@@ -14,6 +14,6 @@ define internal i32 @f(i32* %xp) gc "example" {
entry:
%var = alloca i8*
call void @llvm.gcroot(i8** %var, i8* null)
- %x = load i32* %xp
+ %x = load i32, i32* %xp
ret i32 %x
}
diff --git a/llvm/test/CodeGen/X86/GC/inline.ll b/llvm/test/CodeGen/X86/GC/inline.ll
index 91d435fb789..9d74c1faa39 100644
--- a/llvm/test/CodeGen/X86/GC/inline.ll
+++ b/llvm/test/CodeGen/X86/GC/inline.ll
@@ -16,7 +16,7 @@ define internal i32 @g() gc "example" {
%obj.2 = bitcast %IntArray* %obj to i8* ; <i8*> [#uses=1]
store i8* %obj.2, i8** %root
%Length.ptr = getelementptr %IntArray, %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
- %Length = load i32* %Length.ptr ; <i32> [#uses=1]
+ %Length = load i32, i32* %Length.ptr ; <i32> [#uses=1]
ret i32 %Length
}
diff --git a/llvm/test/CodeGen/X86/GC/inline2.ll b/llvm/test/CodeGen/X86/GC/inline2.ll
index be35d8fa854..034c985a1df 100644
--- a/llvm/test/CodeGen/X86/GC/inline2.ll
+++ b/llvm/test/CodeGen/X86/GC/inline2.ll
@@ -17,7 +17,7 @@ define internal i32 @g() gc "example" {
%obj.2 = bitcast %IntArray* %obj to i8* ; <i8*> [#uses=1]
store i8* %obj.2, i8** %root
%Length.ptr = getelementptr %IntArray, %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
- %Length = load i32* %Length.ptr ; <i32> [#uses=1]
+ %Length = load i32, i32* %Length.ptr ; <i32> [#uses=1]
ret i32 %Length
}
diff --git a/llvm/test/CodeGen/X86/MachineBranchProb.ll b/llvm/test/CodeGen/X86/MachineBranchProb.ll
index cf41ef2ea3a..9b4e73716f4 100644
--- a/llvm/test/CodeGen/X86/MachineBranchProb.ll
+++ b/llvm/test/CodeGen/X86/MachineBranchProb.ll
@@ -13,7 +13,7 @@ for.cond2: ; preds = %for.inc, %for.cond
%i.1 = phi i32 [ %inc19, %for.inc ], [ 0, %for.cond ]
%bit.0 = phi i32 [ %shl, %for.inc ], [ 1, %for.cond ]
%tobool = icmp eq i32 %bit.0, 0
- %v3 = load i32* @max_regno, align 4
+ %v3 = load i32, i32* @max_regno, align 4
%cmp4 = icmp eq i32 %i.1, %v3
%or.cond = or i1 %tobool, %cmp4
br i1 %or.cond, label %for.inc20, label %for.inc, !prof !0
diff --git a/llvm/test/CodeGen/X86/MachineSink-DbgValue.ll b/llvm/test/CodeGen/X86/MachineSink-DbgValue.ll
index 3a2c58f97e8..86335a59ae4 100644
--- a/llvm/test/CodeGen/X86/MachineSink-DbgValue.ll
+++ b/llvm/test/CodeGen/X86/MachineSink-DbgValue.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.7.0"
define i32 @foo(i32 %i, i32* nocapture %c) nounwind uwtable readonly ssp {
tail call void @llvm.dbg.value(metadata i32 %i, i64 0, metadata !6, metadata !{!"0x102"}), !dbg !12
- %ab = load i32* %c, align 1, !dbg !14
+ %ab = load i32, i32* %c, align 1, !dbg !14
tail call void @llvm.dbg.value(metadata i32* %c, i64 0, metadata !7, metadata !{!"0x102"}), !dbg !13
tail call void @llvm.dbg.value(metadata i32 %ab, i64 0, metadata !10, metadata !{!"0x102"}), !dbg !14
%cd = icmp eq i32 %i, 42, !dbg !15
diff --git a/llvm/test/CodeGen/X86/MachineSink-eflags.ll b/llvm/test/CodeGen/X86/MachineSink-eflags.ll
index e9043d9be33..4e52c8c5f7d 100644
--- a/llvm/test/CodeGen/X86/MachineSink-eflags.ll
+++ b/llvm/test/CodeGen/X86/MachineSink-eflags.ll
@@ -16,18 +16,18 @@ entry:
%i2 = alloca i8*, align 8
%b.i = alloca [16 x <2 x double>], align 16
%conv = bitcast i8* %_stubArgs to i32*
- %tmp1 = load i32* %conv, align 4
+ %tmp1 = load i32, i32* %conv, align 4
%ptr8 = getelementptr i8, i8* %_stubArgs, i64 16
%i4 = bitcast i8* %ptr8 to <2 x double>*
%ptr20 = getelementptr i8, i8* %_stubArgs, i64 48
%i7 = bitcast i8* %ptr20 to <2 x double> addrspace(1)**
- %tmp21 = load <2 x double> addrspace(1)** %i7, align 8
+ %tmp21 = load <2 x double> addrspace(1)*, <2 x double> addrspace(1)** %i7, align 8
%ptr28 = getelementptr i8, i8* %_stubArgs, i64 64
%i9 = bitcast i8* %ptr28 to i32*
- %tmp29 = load i32* %i9, align 4
+ %tmp29 = load i32, i32* %i9, align 4
%ptr32 = getelementptr i8, i8* %_stubArgs, i64 68
%i10 = bitcast i8* %ptr32 to i32*
- %tmp33 = load i32* %i10, align 4
+ %tmp33 = load i32, i32* %i10, align 4
%tmp17.i = mul i32 10, 20
%tmp19.i = add i32 %tmp17.i, %tmp33
%conv21.i = zext i32 %tmp19.i to i64
@@ -49,14 +49,14 @@ entry:
%conv160.i = zext i32 %i39 to i64
%tmp22.sum652.i = add i64 %conv160.i, %conv21.i
%arrayidx161.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum652.i
- %tmp162.i = load <2 x double> addrspace(1)* %arrayidx161.i, align 16
+ %tmp162.i = load <2 x double>, <2 x double> addrspace(1)* %arrayidx161.i, align 16
%tmp222.i = add i32 %tmp154.i, 1
%i43 = mul i32 %tmp222.i, %tmp29
%i44 = add i32 %tmp158.i, %i43
%conv228.i = zext i32 %i44 to i64
%tmp22.sum656.i = add i64 %conv228.i, %conv21.i
%arrayidx229.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum656.i
- %tmp230.i = load <2 x double> addrspace(1)* %arrayidx229.i, align 16
+ %tmp230.i = load <2 x double>, <2 x double> addrspace(1)* %arrayidx229.i, align 16
%cmp432.i = icmp ult i32 %tmp156.i, %tmp1
; %shl.i should not be sinked below the compare.
diff --git a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
index 02fbbff8094..aff6fbc254b 100644
--- a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -166,8 +166,8 @@ define void @merge_loads_i16(i32 %count, %struct.A* noalias nocapture %q, %struc
; <label>:4 ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %9, %4 ]
%.01 = phi %struct.A* [ %p, %.lr.ph ], [ %10, %4 ]
- %5 = load i8* %2, align 1
- %6 = load i8* %3, align 1
+ %5 = load i8, i8* %2, align 1
+ %6 = load i8, i8* %3, align 1
%7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
store i8 %5, i8* %7, align 1
%8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
@@ -200,11 +200,11 @@ define void @no_merge_loads(i32 %count, %struct.A* noalias nocapture %q, %struct
a4: ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %a9, %a4 ]
%.01 = phi %struct.A* [ %p, %.lr.ph ], [ %a10, %a4 ]
- %a5 = load i8* %2, align 1
+ %a5 = load i8, i8* %2, align 1
%a7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
store i8 %a5, i8* %a7, align 1
%a8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
- %a6 = load i8* %3, align 1
+ %a6 = load i8, i8* %3, align 1
store i8 %a6, i8* %a8, align 1
%a9 = add nsw i32 %i.02, 1
%a10 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
@@ -234,8 +234,8 @@ define void @merge_loads_integer(i32 %count, %struct.B* noalias nocapture %q, %s
; <label>:4 ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %9, %4 ]
%.01 = phi %struct.B* [ %p, %.lr.ph ], [ %10, %4 ]
- %5 = load i32* %2
- %6 = load i32* %3
+ %5 = load i32, i32* %2
+ %6 = load i32, i32* %3
%7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
store i32 %5, i32* %7
%8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
@@ -274,10 +274,10 @@ block4: ; preds = %4, %.lr.ph
%a8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
%a9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
%a10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
- %b1 = load i32* %a2
- %b2 = load i32* %a3
- %b3 = load i32* %a4
- %b4 = load i32* %a5
+ %b1 = load i32, i32* %a2
+ %b2 = load i32, i32* %a3
+ %b3 = load i32, i32* %a4
+ %b4 = load i32, i32* %a5
store i32 %b1, i32* %a7
store i32 %b2, i32* %a8
store i32 %b3, i32* %a9
@@ -321,10 +321,10 @@ block4: ; preds = %4, %.lr.ph
%a8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
%a9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
%a10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
- %b1 = load i32* %a2, align 1
- %b2 = load i32* %a3, align 1
- %b3 = load i32* %a4, align 1
- %b4 = load i32* %a5, align 1
+ %b1 = load i32, i32* %a2, align 1
+ %b2 = load i32, i32* %a3, align 1
+ %b3 = load i32, i32* %a4, align 1
+ %b4 = load i32, i32* %a5, align 1
store i32 %b1, i32* %a7, align 1
store i32 %b2, i32* %a8, align 1
store i32 %b3, i32* %a9, align 1
@@ -351,12 +351,12 @@ define void @MergeLoadStoreBaseIndexOffset(i64* %a, i8* %b, i8* %c, i32 %n) {
%.08 = phi i8* [ %b, %0 ], [ %10, %1 ]
%.0 = phi i64* [ %a, %0 ], [ %2, %1 ]
%2 = getelementptr inbounds i64, i64* %.0, i64 1
- %3 = load i64* %.0, align 1
+ %3 = load i64, i64* %.0, align 1
%4 = getelementptr inbounds i8, i8* %c, i64 %3
- %5 = load i8* %4, align 1
+ %5 = load i8, i8* %4, align 1
%6 = add i64 %3, 1
%7 = getelementptr inbounds i8, i8* %c, i64 %6
- %8 = load i8* %7, align 1
+ %8 = load i8, i8* %7, align 1
store i8 %5, i8* %.08, align 1
%9 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %8, i8* %9, align 1
@@ -383,13 +383,13 @@ define void @MergeLoadStoreBaseIndexOffsetSext(i8* %a, i8* %b, i8* %c, i32 %n) {
%.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
%2 = getelementptr inbounds i8, i8* %.0, i64 1
- %3 = load i8* %.0, align 1
+ %3 = load i8, i8* %.0, align 1
%4 = sext i8 %3 to i64
%5 = getelementptr inbounds i8, i8* %c, i64 %4
- %6 = load i8* %5, align 1
+ %6 = load i8, i8* %5, align 1
%7 = add i64 %4, 1
%8 = getelementptr inbounds i8, i8* %c, i64 %7
- %9 = load i8* %8, align 1
+ %9 = load i8, i8* %8, align 1
store i8 %6, i8* %.08, align 1
%10 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %9, i8* %10, align 1
@@ -415,14 +415,14 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) {
%.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
%2 = getelementptr inbounds i8, i8* %.0, i64 1
- %3 = load i8* %.0, align 1
+ %3 = load i8, i8* %.0, align 1
%4 = sext i8 %3 to i64
%5 = getelementptr inbounds i8, i8* %c, i64 %4
- %6 = load i8* %5, align 1
+ %6 = load i8, i8* %5, align 1
%7 = add i8 %3, 1
%wrap.4 = sext i8 %7 to i64
%8 = getelementptr inbounds i8, i8* %c, i64 %wrap.4
- %9 = load i8* %8, align 1
+ %9 = load i8, i8* %8, align 1
store i8 %6, i8* %.08, align 1
%10 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %9, i8* %10, align 1
@@ -477,11 +477,11 @@ define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
%idx4 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 4
%idx5 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 5
- %a0 = load i64* %idx0, align 8
+ %a0 = load i64, i64* %idx0, align 8
store i64 %a0, i64* %idx4, align 8
%b = bitcast i64* %idx1 to <2 x i64>*
- %v = load <2 x i64>* %b, align 8
+ %v = load <2 x i64>, <2 x i64>* %b, align 8
%a1 = extractelement <2 x i64> %v, i32 0
store i64 %a1, i64* %idx5, align 8
ret void
diff --git a/llvm/test/CodeGen/X86/StackColoring.ll b/llvm/test/CodeGen/X86/StackColoring.ll
index e783877178f..414ccf469eb 100644
--- a/llvm/test/CodeGen/X86/StackColoring.ll
+++ b/llvm/test/CodeGen/X86/StackColoring.ll
@@ -414,7 +414,7 @@ define i32 @shady_range(i32 %argc, i8** nocapture %argv) uwtable {
%z2 = getelementptr inbounds [4 x %struct.Klass], [4 x %struct.Klass]* %a.i, i64 0, i64 0, i32 0
call void @llvm.lifetime.start(i64 -1, i8* %a8)
call void @llvm.lifetime.start(i64 -1, i8* %b8)
- %z3 = load i32* %z2, align 16
+ %z3 = load i32, i32* %z2, align 16
%r = call i32 @foo(i32 %z3, i8* %a8)
%r2 = call i32 @foo(i32 %z3, i8* %b8)
call void @llvm.lifetime.end(i64 -1, i8* %a8)
diff --git a/llvm/test/CodeGen/X86/SwitchLowering.ll b/llvm/test/CodeGen/X86/SwitchLowering.ll
index abc339f3e6b..5f17d9d8572 100644
--- a/llvm/test/CodeGen/X86/SwitchLowering.ll
+++ b/llvm/test/CodeGen/X86/SwitchLowering.ll
@@ -10,7 +10,7 @@ bb: ; preds = %bb, %entry
%CurPtr_addr.0.rec = bitcast i32 %indvar to i32 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %indvar to i64 ; <i64> [#uses=1]
%CurPtr_addr.0 = getelementptr i8, i8* %CurPtr, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
- %tmp = load i8* %CurPtr_addr.0 ; <i8> [#uses=3]
+ %tmp = load i8, i8* %CurPtr_addr.0 ; <i8> [#uses=3]
%tmp2.rec = add i32 %CurPtr_addr.0.rec, 1 ; <i32> [#uses=1]
%tmp2 = getelementptr i8, i8* %CurPtr, i32 %tmp2.rec ; <i8*> [#uses=1]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/SwizzleShuff.ll b/llvm/test/CodeGen/X86/SwizzleShuff.ll
index d387850abb9..e4c35c58210 100644
--- a/llvm/test/CodeGen/X86/SwizzleShuff.ll
+++ b/llvm/test/CodeGen/X86/SwizzleShuff.ll
@@ -6,8 +6,8 @@
; CHECK: xorl
; CHECK: ret
define void @pull_bitcast (<4 x i8>* %pA, <4 x i8>* %pB) {
- %A = load <4 x i8>* %pA
- %B = load <4 x i8>* %pB
+ %A = load <4 x i8>, <4 x i8>* %pA
+ %B = load <4 x i8>, <4 x i8>* %pB
%C = xor <4 x i8> %A, %B
store <4 x i8> %C, <4 x i8>* %pA
ret void
@@ -22,8 +22,8 @@ define void @pull_bitcast (<4 x i8>* %pA, <4 x i8>* %pB) {
; CHECK-NEXT: pxor
; CHECK-NEXT: ret
define <4 x i32> @multi_use_swizzle (<4 x i32>* %pA, <4 x i32>* %pB) {
- %A = load <4 x i32>* %pA
- %B = load <4 x i32>* %pB
+ %A = load <4 x i32>, <4 x i32>* %pA
+ %B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 1, i32 5, i32 6>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 2>
%S2 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 2, i32 1, i32 0, i32 2>
@@ -35,9 +35,9 @@ define <4 x i32> @multi_use_swizzle (<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK: xorl
; CHECK: ret
define <4 x i8> @pull_bitcast2 (<4 x i8>* %pA, <4 x i8>* %pB, <4 x i8>* %pC) {
- %A = load <4 x i8>* %pA
+ %A = load <4 x i8>, <4 x i8>* %pA
store <4 x i8> %A, <4 x i8>* %pC
- %B = load <4 x i8>* %pB
+ %B = load <4 x i8>, <4 x i8>* %pB
%C = xor <4 x i8> %A, %B
store <4 x i8> %C, <4 x i8>* %pA
ret <4 x i8> %C
@@ -49,8 +49,8 @@ define <4 x i8> @pull_bitcast2 (<4 x i8>* %pA, <4 x i8>* %pB, <4 x i8>* %pC) {
; CHECK-NOT: pshufd
; CHECK: ret
define <4 x i32> @reverse_1 (<4 x i32>* %pA, <4 x i32>* %pB) {
- %A = load <4 x i32>* %pA
- %B = load <4 x i32>* %pB
+ %A = load <4 x i32>, <4 x i32>* %pA
+ %B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x i32> %S1
@@ -61,8 +61,8 @@ define <4 x i32> @reverse_1 (<4 x i32>* %pA, <4 x i32>* %pB) {
; CHECK: pshufd
; CHECK: ret
define <4 x i32> @no_reverse_shuff (<4 x i32>* %pA, <4 x i32>* %pB) {
- %A = load <4 x i32>* %pA
- %B = load <4 x i32>* %pB
+ %A = load <4 x i32>, <4 x i32>* %pA
+ %B = load <4 x i32>, <4 x i32>* %pB
%S = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
%S1 = shufflevector <4 x i32> %S, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 3, i32 2>
ret <4 x i32> %S1
diff --git a/llvm/test/CodeGen/X86/abi-isel.ll b/llvm/test/CodeGen/X86/abi-isel.ll
index 752b6e66b58..234419b1397 100644
--- a/llvm/test/CodeGen/X86/abi-isel.ll
+++ b/llvm/test/CodeGen/X86/abi-isel.ll
@@ -33,7 +33,7 @@
define void @foo00() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 0), align 4
ret void
@@ -105,7 +105,7 @@ entry:
define void @fxo00() nounwind {
entry:
- %0 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
store i32 %0, i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 0), align 4
ret void
@@ -297,8 +297,8 @@ entry:
define void @foo02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 0), align 4
store i32 %1, i32* %0, align 4
ret void
; LINUX-64-STATIC-LABEL: foo02:
@@ -379,8 +379,8 @@ entry:
define void @fxo02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 0), align 4
store i32 %1, i32* %0, align 4
; LINUX-64-STATIC-LABEL: fxo02:
; LINUX-64-STATIC: movl xsrc(%rip), %
@@ -461,7 +461,7 @@ entry:
define void @foo03() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 0), align 32
ret void
; LINUX-64-STATIC-LABEL: foo03:
@@ -576,8 +576,8 @@ entry:
define void @foo05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
+ %0 = load i32*, i32** @dptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 0), align 32
store i32 %1, i32* %0, align 4
ret void
; LINUX-64-STATIC-LABEL: foo05:
@@ -648,7 +648,7 @@ entry:
define void @foo06() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 0), align 4
ret void
; LINUX-64-STATIC-LABEL: foo06:
@@ -760,8 +760,8 @@ entry:
define void @foo08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
+ %0 = load i32*, i32** @lptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 0), align 4
store i32 %1, i32* %0, align 4
ret void
; LINUX-64-STATIC-LABEL: foo08:
@@ -830,7 +830,7 @@ entry:
define void @qux00() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 16), align 4
ret void
; LINUX-64-STATIC-LABEL: qux00:
@@ -901,7 +901,7 @@ entry:
define void @qxx00() nounwind {
entry:
- %0 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
+ %0 = load i32, i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
store i32 %0, i32* getelementptr ([32 x i32]* @xdst, i32 0, i64 16), align 4
ret void
; LINUX-64-STATIC-LABEL: qxx00:
@@ -1104,8 +1104,8 @@ entry:
define void @qux02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
%2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux02:
@@ -1187,8 +1187,8 @@ entry:
define void @qxx02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
%2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qxx02:
@@ -1270,7 +1270,7 @@ entry:
define void @qux03() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 16), align 32
ret void
; LINUX-64-STATIC-LABEL: qux03:
@@ -1386,8 +1386,8 @@ entry:
define void @qux05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
+ %0 = load i32*, i32** @dptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
%2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux05:
@@ -1459,7 +1459,7 @@ entry:
define void @qux06() nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 16), align 4
ret void
; LINUX-64-STATIC-LABEL: qux06:
@@ -1571,8 +1571,8 @@ entry:
define void @qux08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
+ %0 = load i32*, i32** @lptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
%2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux08:
@@ -1643,7 +1643,7 @@ entry:
define void @ind00(i64 %i) nounwind {
entry:
%0 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %i
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
@@ -1721,7 +1721,7 @@ entry:
define void @ixd00(i64 %i) nounwind {
entry:
%0 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
@@ -1950,9 +1950,9 @@ entry:
define void @ind02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
@@ -2039,9 +2039,9 @@ entry:
define void @ixd02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
@@ -2129,7 +2129,7 @@ entry:
define void @ind03(i64 %i) nounwind {
entry:
%0 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
@@ -2271,9 +2271,9 @@ entry:
define void @ind05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
@@ -2354,7 +2354,7 @@ entry:
define void @ind06(i64 %i) nounwind {
entry:
%0 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %i
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
@@ -2495,9 +2495,9 @@ entry:
define void @ind08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
@@ -2578,7 +2578,7 @@ define void @off00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
%1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -2657,7 +2657,7 @@ define void @oxf00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
%1 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -2888,10 +2888,10 @@ entry:
define void @off02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -2978,10 +2978,10 @@ entry:
define void @oxf02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -3070,7 +3070,7 @@ define void @off03(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
%1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -3213,10 +3213,10 @@ entry:
define void @off05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -3298,7 +3298,7 @@ define void @off06(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
%1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -3440,10 +3440,10 @@ entry:
define void @off08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -3522,7 +3522,7 @@ entry:
define void @moo00(i64 %i) nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @dst, i32 0, i64 65536), align 4
ret void
; LINUX-64-STATIC-LABEL: moo00:
@@ -3659,8 +3659,8 @@ entry:
define void @moo02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
+ %0 = load i32*, i32** @ptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
%2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
@@ -3742,7 +3742,7 @@ entry:
define void @moo03(i64 %i) nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
store i32 %0, i32* getelementptr ([131072 x i32]* @ddst, i32 0, i64 65536), align 32
ret void
; LINUX-64-STATIC-LABEL: moo03:
@@ -3858,8 +3858,8 @@ entry:
define void @moo05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
+ %0 = load i32*, i32** @dptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
%2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
@@ -3931,7 +3931,7 @@ entry:
define void @moo06(i64 %i) nounwind {
entry:
- %0 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
+ %0 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
store i32 %0, i32* getelementptr ([131072 x i32]* @ldst, i32 0, i64 65536), align 4
ret void
; LINUX-64-STATIC-LABEL: moo06:
@@ -4043,8 +4043,8 @@ entry:
define void @moo08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
- %1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
+ %0 = load i32*, i32** @lptr, align 8
+ %1 = load i32, i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
%2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
@@ -4116,7 +4116,7 @@ define void @big00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
%1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -4270,10 +4270,10 @@ entry:
define void @big02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -4362,7 +4362,7 @@ define void @big03(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
%1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -4505,10 +4505,10 @@ entry:
define void @big05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -4590,7 +4590,7 @@ define void @big06(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
%1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %0
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
@@ -4732,10 +4732,10 @@ entry:
define void @big08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %1
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
%4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
@@ -5519,7 +5519,7 @@ entry:
define i8* @har02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = bitcast i32* %0 to i8*
ret i8* %1
; LINUX-64-STATIC-LABEL: har02:
@@ -5668,7 +5668,7 @@ entry:
define i8* @har05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = bitcast i32* %0 to i8*
ret i8* %1
; LINUX-64-STATIC-LABEL: har05:
@@ -5812,7 +5812,7 @@ entry:
define i8* @har08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = bitcast i32* %0 to i8*
ret i8* %1
; LINUX-64-STATIC-LABEL: har08:
@@ -6073,7 +6073,7 @@ entry:
define i8* @bat02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6235,7 +6235,7 @@ entry:
define i8* @bat05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6390,7 +6390,7 @@ entry:
define i8* @bat08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6609,7 +6609,7 @@ entry:
define i8* @bam02() nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6771,7 +6771,7 @@ entry:
define i8* @bam05() nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -6926,7 +6926,7 @@ entry:
define i8* @bam08() nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
@@ -7230,7 +7230,7 @@ entry:
define i8* @cat02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -7420,7 +7420,7 @@ entry:
define i8* @cat05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -7605,7 +7605,7 @@ entry:
define i8* @cat08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = add i64 %i, 16
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -7915,7 +7915,7 @@ entry:
define i8* @cam02(i64 %i) nounwind {
entry:
- %0 = load i32** @ptr, align 8
+ %0 = load i32*, i32** @ptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -8105,7 +8105,7 @@ entry:
define i8* @cam05(i64 %i) nounwind {
entry:
- %0 = load i32** @dptr, align 8
+ %0 = load i32*, i32** @dptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -8290,7 +8290,7 @@ entry:
define i8* @cam08(i64 %i) nounwind {
entry:
- %0 = load i32** @lptr, align 8
+ %0 = load i32*, i32** @lptr, align 8
%1 = add i64 %i, 65536
%2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
@@ -9180,9 +9180,9 @@ entry:
define void @icaller() nounwind {
entry:
- %0 = load void ()** @ifunc, align 8
+ %0 = load void ()*, void ()** @ifunc, align 8
call void %0() nounwind
- %1 = load void ()** @ifunc, align 8
+ %1 = load void ()*, void ()** @ifunc, align 8
call void %1() nounwind
ret void
; LINUX-64-STATIC-LABEL: icaller:
@@ -9270,9 +9270,9 @@ entry:
define void @dicaller() nounwind {
entry:
- %0 = load void ()** @difunc, align 8
+ %0 = load void ()*, void ()** @difunc, align 8
call void %0() nounwind
- %1 = load void ()** @difunc, align 8
+ %1 = load void ()*, void ()** @difunc, align 8
call void %1() nounwind
ret void
; LINUX-64-STATIC-LABEL: dicaller:
@@ -9353,9 +9353,9 @@ entry:
define void @licaller() nounwind {
entry:
- %0 = load void ()** @lifunc, align 8
+ %0 = load void ()*, void ()** @lifunc, align 8
call void %0() nounwind
- %1 = load void ()** @lifunc, align 8
+ %1 = load void ()*, void ()** @lifunc, align 8
call void %1() nounwind
ret void
; LINUX-64-STATIC-LABEL: licaller:
@@ -9435,9 +9435,9 @@ entry:
define void @itailcaller() nounwind {
entry:
- %0 = load void ()** @ifunc, align 8
+ %0 = load void ()*, void ()** @ifunc, align 8
call void %0() nounwind
- %1 = load void ()** @ifunc, align 8
+ %1 = load void ()*, void ()** @ifunc, align 8
call void %1() nounwind
ret void
; LINUX-64-STATIC-LABEL: itailcaller:
@@ -9525,7 +9525,7 @@ entry:
define void @ditailcaller() nounwind {
entry:
- %0 = load void ()** @difunc, align 8
+ %0 = load void ()*, void ()** @difunc, align 8
call void %0() nounwind
ret void
; LINUX-64-STATIC-LABEL: ditailcaller:
@@ -9593,7 +9593,7 @@ entry:
define void @litailcaller() nounwind {
entry:
- %0 = load void ()** @lifunc, align 8
+ %0 = load void ()*, void ()** @lifunc, align 8
call void %0() nounwind
ret void
; LINUX-64-STATIC-LABEL: litailcaller:
diff --git a/llvm/test/CodeGen/X86/addr-mode-matcher.ll b/llvm/test/CodeGen/X86/addr-mode-matcher.ll
index dc5052d043a..83d6858bda1 100644
--- a/llvm/test/CodeGen/X86/addr-mode-matcher.ll
+++ b/llvm/test/CodeGen/X86/addr-mode-matcher.ll
@@ -26,14 +26,14 @@ bb1692:
%tmp1702 = and i32 %tmp1701, 1020
%tmp1703 = getelementptr inbounds [1028 x i8], [1028 x i8]* null, i32 0, i32 %tmp1702
%tmp1704 = bitcast i8* %tmp1703 to i32*
- %load1 = load i32* %tmp1704, align 4
+ %load1 = load i32, i32* %tmp1704, align 4
; %load2 = (load (shl (and %xor, 255), 2))
%tmp1698 = and i32 %xor, 255
%tmp1706 = shl i32 %tmp1698, 2
%tmp1707 = getelementptr inbounds [1028 x i8], [1028 x i8]* null, i32 0, i32 %tmp1706
%tmp1708 = bitcast i8* %tmp1707 to i32*
- %load2 = load i32* %tmp1708, align 4
+ %load2 = load i32, i32* %tmp1708, align 4
%tmp1710 = or i32 %load2, %a
diff --git a/llvm/test/CodeGen/X86/address-type-promotion-constantexpr.ll b/llvm/test/CodeGen/X86/address-type-promotion-constantexpr.ll
index 32f29bd3cad..58baf31d906 100644
--- a/llvm/test/CodeGen/X86/address-type-promotion-constantexpr.ll
+++ b/llvm/test/CodeGen/X86/address-type-promotion-constantexpr.ll
@@ -10,7 +10,7 @@
; CHECK: xor %eax, %eax
define i32 @main() {
entry:
- %foo = load i8* getelementptr ([2 x i8]* @b, i64 0, i64 sext (i8 or (i8 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i8), i8 1) to i64)), align 1
+ %foo = load i8, i8* getelementptr ([2 x i8]* @b, i64 0, i64 sext (i8 or (i8 zext (i1 icmp eq (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i8), i8 1) to i64)), align 1
ret i32 0
}
diff --git a/llvm/test/CodeGen/X86/aliases.ll b/llvm/test/CodeGen/X86/aliases.ll
index 82a8e482b7f..6ce24e241c1 100644
--- a/llvm/test/CodeGen/X86/aliases.ll
+++ b/llvm/test/CodeGen/X86/aliases.ll
@@ -64,9 +64,9 @@ define i32 @foo_f() {
; CHECK-DAG: .globl test
define i32 @test() {
entry:
- %tmp = load i32* @foo1
- %tmp1 = load i32* @foo2
- %tmp0 = load i32* @bar_i
+ %tmp = load i32, i32* @foo1
+ %tmp1 = load i32, i32* @foo2
+ %tmp0 = load i32, i32* @bar_i
%tmp2 = call i32 @foo_f()
%tmp3 = add i32 %tmp, %tmp2
%tmp4 = call %FunTy* @bar_f()
diff --git a/llvm/test/CodeGen/X86/aligned-variadic.ll b/llvm/test/CodeGen/X86/aligned-variadic.ll
index 60d0f71e0b9..29415922062 100644
--- a/llvm/test/CodeGen/X86/aligned-variadic.ll
+++ b/llvm/test/CodeGen/X86/aligned-variadic.ll
@@ -12,7 +12,7 @@ entry:
%arraydecay1 = bitcast [1 x %struct.__va_list_tag]* %va to i8*
call void @llvm.va_start(i8* %arraydecay1)
%overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0, i32 2
- %overflow_arg_area = load i8** %overflow_arg_area_p, align 8
+ %overflow_arg_area = load i8*, i8** %overflow_arg_area_p, align 8
%overflow_arg_area.next = getelementptr i8, i8* %overflow_arg_area, i64 24
store i8* %overflow_arg_area.next, i8** %overflow_arg_area_p, align 8
; X32: leal 68(%esp), [[REG:%.*]]
diff --git a/llvm/test/CodeGen/X86/and-su.ll b/llvm/test/CodeGen/X86/and-su.ll
index 70c24615a7e..bdbab153501 100644
--- a/llvm/test/CodeGen/X86/and-su.ll
+++ b/llvm/test/CodeGen/X86/and-su.ll
@@ -6,7 +6,7 @@ define fastcc i32 @foo(i32* %p) nounwind {
; CHECK-LABEL: foo:
; CHECK: andl $10, %eax
; CHECK: je
- %t0 = load i32* %p
+ %t0 = load i32, i32* %p
%t2 = and i32 %t0, 10
%t3 = icmp ne i32 %t2, 0
br i1 %t3, label %bb63, label %bb76
diff --git a/llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll b/llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll
index 6237b66a5ea..2e144f87c33 100644
--- a/llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll
+++ b/llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll
@@ -36,34 +36,34 @@
define void @func() #0 {
entry:
store i32 0, i32* @sum, align 4
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
store i32 %0, i32* @i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %1 = load i32* @i, align 4
- %2 = load i32* @b, align 4
+ %1 = load i32, i32* @i, align 4
+ %2 = load i32, i32* @b, align 4
%cmp = icmp slt i32 %1, %2
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %3 = load i32 (i32, i32, i32, i32, i32, i32, i32, i32)** @funcp, align 4
- %4 = load i32* @i, align 4
- %5 = load i32* @b, align 4
- %6 = load i32* @c, align 4
- %7 = load i32* @d, align 4
- %8 = load i32* @e, align 4
- %9 = load i32* @f, align 4
- %10 = load i32* @g, align 4
- %11 = load i32* @h, align 4
+ %3 = load i32 (i32, i32, i32, i32, i32, i32, i32, i32)*, i32 (i32, i32, i32, i32, i32, i32, i32, i32)** @funcp, align 4
+ %4 = load i32, i32* @i, align 4
+ %5 = load i32, i32* @b, align 4
+ %6 = load i32, i32* @c, align 4
+ %7 = load i32, i32* @d, align 4
+ %8 = load i32, i32* @e, align 4
+ %9 = load i32, i32* @f, align 4
+ %10 = load i32, i32* @g, align 4
+ %11 = load i32, i32* @h, align 4
%call = call i32 %3(i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11)
- %12 = load i32* @sum, align 4
+ %12 = load i32, i32* @sum, align 4
%add = add nsw i32 %12, %call
store i32 %add, i32* @sum, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %13 = load i32* @i, align 4
+ %13 = load i32, i32* @i, align 4
%inc = add nsw i32 %13, 1
store i32 %inc, i32* @i, align 4
br label %for.cond
diff --git a/llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll b/llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll
index a196d8175aa..e82626ced20 100644
--- a/llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll
+++ b/llvm/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll
@@ -42,43 +42,43 @@
define void @func() #0 {
entry:
store i32 0, i32* @sum, align 4
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
store i32 %0, i32* @i, align 4
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %1 = load i32* @i, align 4
- %2 = load i32* @b, align 4
+ %1 = load i32, i32* @i, align 4
+ %2 = load i32, i32* @b, align 4
%cmp = icmp slt i32 %1, %2
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %3 = load i32 (i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)** @funcp, align 8
- %4 = load i32* @a, align 4
- %5 = load i32* @i, align 4
- %6 = load i32* @i, align 4
+ %3 = load i32 (i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)*, i32 (i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)** @funcp, align 8
+ %4 = load i32, i32* @a, align 4
+ %5 = load i32, i32* @i, align 4
+ %6 = load i32, i32* @i, align 4
%mul = mul nsw i32 %6, 2
- %7 = load i32* @i, align 4
- %8 = load i32* @b, align 4
+ %7 = load i32, i32* @i, align 4
+ %8 = load i32, i32* @b, align 4
%div = sdiv i32 %7, %8
- %9 = load i32* @c, align 4
- %10 = load i32* @d, align 4
- %11 = load i32* @e, align 4
- %12 = load i32* @f, align 4
- %13 = load i32* @g, align 4
- %14 = load i32* @h, align 4
- %15 = load i32* @j, align 4
- %16 = load i32* @k, align 4
- %17 = load i32* @l, align 4
- %18 = load i32* @n, align 4
+ %9 = load i32, i32* @c, align 4
+ %10 = load i32, i32* @d, align 4
+ %11 = load i32, i32* @e, align 4
+ %12 = load i32, i32* @f, align 4
+ %13 = load i32, i32* @g, align 4
+ %14 = load i32, i32* @h, align 4
+ %15 = load i32, i32* @j, align 4
+ %16 = load i32, i32* @k, align 4
+ %17 = load i32, i32* @l, align 4
+ %18 = load i32, i32* @n, align 4
%call = call i32 %3(i32 %4, i32 %5, i32 %mul, i32 %div, i32 %9, i32 %10, i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16, i32 %17, i32 %18)
- %19 = load i32* @sum, align 4
+ %19 = load i32, i32* @sum, align 4
%add = add nsw i32 %19, %call
store i32 %add, i32* @sum, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %20 = load i32* @i, align 4
+ %20 = load i32, i32* @i, align 4
%inc = add nsw i32 %20, 1
store i32 %inc, i32* @i, align 4
br label %for.cond
diff --git a/llvm/test/CodeGen/X86/atom-call-reg-indirect.ll b/llvm/test/CodeGen/X86/atom-call-reg-indirect.ll
index 48f2d4c1134..663b6f1eee5 100644
--- a/llvm/test/CodeGen/X86/atom-call-reg-indirect.ll
+++ b/llvm/test/CodeGen/X86/atom-call-reg-indirect.ll
@@ -14,8 +14,8 @@ define i32 @test1() #0 {
entry:
%call = tail call %class.A* @_Z3facv()
%0 = bitcast %class.A* %call to void (%class.A*)***
- %vtable = load void (%class.A*)*** %0, align 8
- %1 = load void (%class.A*)** %vtable, align 8
+ %vtable = load void (%class.A*)**, void (%class.A*)*** %0, align 8
+ %1 = load void (%class.A*)*, void (%class.A*)** %vtable, align 8
;ATOM32: movl (%ecx), %ecx
;ATOM32: calll *%ecx
;ATOM-NOT32: calll *(%ecx)
@@ -38,8 +38,8 @@ declare %class.A* @_Z3facv() #1
define i32 @test2() #0 {
;ATOM-LABEL: test2:
entry:
- %0 = load void (i32)*** @p, align 8
- %1 = load void (i32)** %0, align 8
+ %0 = load void (i32)**, void (i32)*** @p, align 8
+ %1 = load void (i32)*, void (i32)** %0, align 8
;ATOM32: movl (%eax), %eax
;ATOM32: calll *%eax
;ATOM-NOT: calll *(%eax)
diff --git a/llvm/test/CodeGen/X86/atom-cmpb.ll b/llvm/test/CodeGen/X86/atom-cmpb.ll
index 6177cff4045..baf0f5e87fc 100644
--- a/llvm/test/CodeGen/X86/atom-cmpb.ll
+++ b/llvm/test/CodeGen/X86/atom-cmpb.ll
@@ -12,9 +12,9 @@
define i8 @run_test(i8* %rd_p) {
entry:
%incdec.ptr = getelementptr inbounds i8, i8* %rd_p, i64 1
- %ld1 = load i8* %rd_p, align 1
+ %ld1 = load i8, i8* %rd_p, align 1
%incdec.ptr1 = getelementptr inbounds i8, i8* %rd_p, i64 2
- %ld2 = load i8* %incdec.ptr, align 1
+ %ld2 = load i8, i8* %incdec.ptr, align 1
%x4 = xor i8 %ld1, -1
%x5 = xor i8 %ld2, -1
%cmp34 = icmp ult i8 %ld2, %ld1
diff --git a/llvm/test/CodeGen/X86/atom-fixup-lea1.ll b/llvm/test/CodeGen/X86/atom-fixup-lea1.ll
index b89aa023f6d..f862fa6dfc3 100644
--- a/llvm/test/CodeGen/X86/atom-fixup-lea1.ll
+++ b/llvm/test/CodeGen/X86/atom-fixup-lea1.ll
@@ -26,7 +26,7 @@ for.body:
%i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %array, i32 %i.06
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.05
%inc = add nsw i32 %i.06, 1
%exitcond = icmp eq i32 %inc, %n
diff --git a/llvm/test/CodeGen/X86/atom-fixup-lea2.ll b/llvm/test/CodeGen/X86/atom-fixup-lea2.ll
index c9823a1b987..ec826138873 100644
--- a/llvm/test/CodeGen/X86/atom-fixup-lea2.ll
+++ b/llvm/test/CodeGen/X86/atom-fixup-lea2.ll
@@ -38,31 +38,31 @@ entry:
%n = alloca %struct.node_t, align 4
call void bitcast (void (%struct.node_t*, ...)* @getnode to void (%struct.node_t*)*)(%struct.node_t* sret %n)
%array = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 4
- %0 = load i32** %array, align 4
+ %0 = load i32*, i32** %array, align 4
%cmp = icmp eq i32* %0, null
br i1 %cmp, label %if.end, label %land.lhs.true
land.lhs.true:
%p = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 3
- %1 = load i32* %p, align 4
+ %1 = load i32, i32* %p, align 4
%cmp1 = icmp sgt i32 %1, 0
br i1 %cmp1, label %land.lhs.true2, label %if.end
land.lhs.true2:
%k = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 0
- %2 = load i32* %k, align 4
+ %2 = load i32, i32* %k, align 4
%cmp3 = icmp sgt i32 %2, 0
br i1 %cmp3, label %land.lhs.true4, label %if.end
land.lhs.true4:
%n5 = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 2
- %3 = load i32* %n5, align 4
+ %3 = load i32, i32* %n5, align 4
%cmp6 = icmp sgt i32 %3, 0
br i1 %cmp6, label %land.lhs.true7, label %if.end
land.lhs.true7:
%m = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 1
- %4 = load i32* %m, align 4
+ %4 = load i32, i32* %m, align 4
%cmp8 = icmp sgt i32 %4, 0
br i1 %cmp8, label %if.then, label %if.end
@@ -73,7 +73,7 @@ if.then:
%add15 = add nsw i32 %1, %5
%6 = inttoptr i32 %add15 to i32*
%arrayidx = getelementptr inbounds i32, i32* %6, i32 %add12
- %7 = load i32* %arrayidx, align 4
+ %7 = load i32, i32* %arrayidx, align 4
br label %if.end
if.end:
diff --git a/llvm/test/CodeGen/X86/atom-fixup-lea3.ll b/llvm/test/CodeGen/X86/atom-fixup-lea3.ll
index f51ee9164b7..ed2df277480 100644
--- a/llvm/test/CodeGen/X86/atom-fixup-lea3.ll
+++ b/llvm/test/CodeGen/X86/atom-fixup-lea3.ll
@@ -26,7 +26,7 @@ entry:
br i1 %cmp7, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
- %.pre = load i32* %m, align 4
+ %.pre = load i32, i32* %m, align 4
br label %for.body
for.body: ; preds = %for.body, %for.body.lr.ph
@@ -35,11 +35,11 @@ for.body: ; preds = %for.body, %for.body
%j.09 = phi i32 [ 0, %for.body.lr.ph ], [ %inc1, %for.body ]
%inc1 = add nsw i32 %j.09, 1
%arrayidx = getelementptr inbounds i32, i32* %array2, i32 %j.09
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %1
store i32 %add, i32* %m, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %array, i32 %inc1
- %2 = load i32* %arrayidx2, align 4
+ %2 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %2, %sum.010
%exitcond = icmp eq i32 %inc1, %n
br i1 %exitcond, label %for.end, label %for.body
diff --git a/llvm/test/CodeGen/X86/atom-fixup-lea4.ll b/llvm/test/CodeGen/X86/atom-fixup-lea4.ll
index be80e47f61c..f0da1d2015c 100644
--- a/llvm/test/CodeGen/X86/atom-fixup-lea4.ll
+++ b/llvm/test/CodeGen/X86/atom-fixup-lea4.ll
@@ -10,7 +10,7 @@ define linkonce_odr void @_ZN12ValueWrapperIS_IS_IS_IdEEEEC2Ev(%struct.ValueWrap
entry:
%this.addr = alloca %struct.ValueWrapper.6*, align 8
store %struct.ValueWrapper.6* %this, %struct.ValueWrapper.6** %this.addr, align 8
- %this1 = load %struct.ValueWrapper.6** %this.addr
+ %this1 = load %struct.ValueWrapper.6*, %struct.ValueWrapper.6** %this.addr
%value = getelementptr inbounds %struct.ValueWrapper.6, %struct.ValueWrapper.6* %this1, i32 0, i32 0
call void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7* %value)
ret void
diff --git a/llvm/test/CodeGen/X86/atom-lea-addw-bug.ll b/llvm/test/CodeGen/X86/atom-lea-addw-bug.ll
index 5cda2df432f..d8147e5fbef 100644
--- a/llvm/test/CodeGen/X86/atom-lea-addw-bug.ll
+++ b/llvm/test/CodeGen/X86/atom-lea-addw-bug.ll
@@ -5,9 +5,9 @@ target triple = "x86_64-apple-darwin12.5.0"
define i32 @DoLayout() {
entry:
- %tmp1 = load i16* undef, align 2
- %tmp17 = load i16* null, align 2
- %tmp19 = load i16* undef, align 2
+ %tmp1 = load i16, i16* undef, align 2
+ %tmp17 = load i16, i16* null, align 2
+ %tmp19 = load i16, i16* undef, align 2
%shl = shl i16 %tmp19, 1
%add55 = add i16 %tmp17, %tmp1
%add57 = add i16 %add55, %shl
diff --git a/llvm/test/CodeGen/X86/atom-sched.ll b/llvm/test/CodeGen/X86/atom-sched.ll
index fd18472bff8..b81359e2832 100644
--- a/llvm/test/CodeGen/X86/atom-sched.ll
+++ b/llvm/test/CodeGen/X86/atom-sched.ll
@@ -21,12 +21,12 @@ define void @func() nounwind uwtable {
; CHECK: movl
; CHECK: imull
entry:
- %0 = load i32* @b, align 4
- %1 = load i32* @c, align 4
+ %0 = load i32, i32* @b, align 4
+ %1 = load i32, i32* @c, align 4
%mul = mul nsw i32 %0, %1
store i32 %mul, i32* @a, align 4
- %2 = load i32* @e, align 4
- %3 = load i32* @f, align 4
+ %2 = load i32, i32* @e, align 4
+ %3 = load i32, i32* @f, align 4
%mul1 = mul nsw i32 %2, %3
store i32 %mul1, i32* @d, align 4
ret void
diff --git a/llvm/test/CodeGen/X86/atomic-dagsched.ll b/llvm/test/CodeGen/X86/atomic-dagsched.ll
index 750b73d4823..97bb1afa47a 100644
--- a/llvm/test/CodeGen/X86/atomic-dagsched.ll
+++ b/llvm/test/CodeGen/X86/atomic-dagsched.ll
@@ -2,10 +2,10 @@
define void @test(i8** %a, i64* %b, i64 %c, i64 %d) nounwind {
entry:
- %ptrtoarg4 = load i8** %a, align 8
+ %ptrtoarg4 = load i8*, i8** %a, align 8
%brglist1 = getelementptr i8*, i8** %a, i64 1
- %ptrtoarg25 = load i8** %brglist1, align 8
- %0 = load i64* %b, align 8
+ %ptrtoarg25 = load i8*, i8** %brglist1, align 8
+ %0 = load i64, i64* %b, align 8
%1 = mul i64 %0, 4
%scevgep = getelementptr i8, i8* %ptrtoarg25, i64 %1
%2 = mul i64 %d, 4
@@ -18,8 +18,8 @@ loop.cond: ; preds = %test.exit, %entry
br i1 %3, label %return, label %loop
loop: ; preds = %loop.cond
- %4 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
- %5 = load i64* %4, align 8
+ %4 = load i64*, i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
+ %5 = load i64, i64* %4, align 8
%vector.size.i = ashr i64 %5, 3
%num.vector.wi.i = shl i64 %vector.size.i, 3
%6 = icmp eq i64 %vector.size.i, 0
@@ -36,7 +36,7 @@ vector_kernel_entry.i: ; preds = %vector_kernel_entry
%asr.iv = phi i64 [ %asr.iv.next, %vector_kernel_entry.i ], [ %vector.size.i, %dim_0_vector_pre_head.i ]
%8 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
%asr.iv911 = addrspacecast i8* %asr.iv9 to <8 x i32> addrspace(1)*
- %9 = load <8 x i32> addrspace(1)* %asr.iv911, align 4
+ %9 = load <8 x i32>, <8 x i32> addrspace(1)* %asr.iv911, align 4
%extract8vector_func.i = extractelement <8 x i32> %9, i32 0
%extract9vector_func.i = extractelement <8 x i32> %9, i32 1
%extract10vector_func.i = extractelement <8 x i32> %9, i32 2
@@ -65,8 +65,8 @@ scalarIf.i: ; preds = %vector_kernel_entry
br i1 %18, label %test.exit, label %dim_0_pre_head.i
dim_0_pre_head.i: ; preds = %scalarIf.i
- %19 = load i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
- %20 = load i64* %19, align 8
+ %19 = load i64*, i64* addrspace(256)* inttoptr (i64 264 to i64* addrspace(256)*), align 8
+ %20 = load i64, i64* %19, align 8
%21 = trunc i64 %20 to i32
%22 = mul i64 %vector.size.i, 8
br label %scalar_kernel_entry.i
@@ -76,7 +76,7 @@ scalar_kernel_entry.i: ; preds = %scalar_kernel_entry
%23 = addrspacecast i8* %asr.iv6 to i32 addrspace(1)*
%24 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
%scevgep16 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
- %25 = load i32 addrspace(1)* %scevgep16, align 4
+ %25 = load i32, i32 addrspace(1)* %scevgep16, align 4
%26 = atomicrmw min i32 addrspace(1)* %24, i32 %25 seq_cst
%scevgep15 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
store i32 %21, i32 addrspace(1)* %scevgep15, align 4
diff --git a/llvm/test/CodeGen/X86/atomic-load-store-wide.ll b/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
index ad1a5c6d026..5c463978ae5 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store-wide.ll
@@ -16,6 +16,6 @@ define i64 @test2(i64* %ptr) {
; CHECK-LABEL: test2
; CHECK: lock
; CHECK-NEXT: cmpxchg8b
- %val = load atomic i64* %ptr seq_cst, align 8
+ %val = load atomic i64, i64* %ptr seq_cst, align 8
ret i64 %val
}
diff --git a/llvm/test/CodeGen/X86/atomic-load-store.ll b/llvm/test/CodeGen/X86/atomic-load-store.ll
index 86a744ed00f..dab79bd274c 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store.ll
@@ -18,6 +18,6 @@ define void @test2(i32* %ptr, i32 %val1) {
define i32 @test3(i32* %ptr) {
; CHECK: test3
; CHECK: movl (%rdi), %eax
- %val = load atomic i32* %ptr seq_cst, align 4
+ %val = load atomic i32, i32* %ptr seq_cst, align 4
ret i32 %val
}
diff --git a/llvm/test/CodeGen/X86/atomic-or.ll b/llvm/test/CodeGen/X86/atomic-or.ll
index 1687e07d57e..07838468ade 100644
--- a/llvm/test/CodeGen/X86/atomic-or.ll
+++ b/llvm/test/CodeGen/X86/atomic-or.ll
@@ -6,7 +6,7 @@ define void @t1(i64* %p, i32 %b) nounwind {
entry:
%p.addr = alloca i64*, align 8
store i64* %p, i64** %p.addr, align 8
- %tmp = load i64** %p.addr, align 8
+ %tmp = load i64*, i64** %p.addr, align 8
; CHECK-LABEL: t1:
; CHECK: movl $2147483648, %eax
; CHECK: lock
@@ -19,7 +19,7 @@ define void @t2(i64* %p, i32 %b) nounwind {
entry:
%p.addr = alloca i64*, align 8
store i64* %p, i64** %p.addr, align 8
- %tmp = load i64** %p.addr, align 8
+ %tmp = load i64*, i64** %p.addr, align 8
; CHECK-LABEL: t2:
; CHECK: lock
; CHECK-NEXT: orq $2147483644, (%r{{.*}})
diff --git a/llvm/test/CodeGen/X86/atomic-pointer.ll b/llvm/test/CodeGen/X86/atomic-pointer.ll
index ec3e6c3a8c1..66e021742fc 100644
--- a/llvm/test/CodeGen/X86/atomic-pointer.ll
+++ b/llvm/test/CodeGen/X86/atomic-pointer.ll
@@ -6,7 +6,7 @@ define i32* @test_atomic_ptr_load(i32** %a0) {
; CHECK: movl
; CHECK: ret
0:
- %0 = load atomic i32** %a0 seq_cst, align 4
+ %0 = load atomic i32*, i32** %a0 seq_cst, align 4
ret i32* %0
}
diff --git a/llvm/test/CodeGen/X86/atomic128.ll b/llvm/test/CodeGen/X86/atomic128.ll
index 741d2904229..dea7d482f98 100644
--- a/llvm/test/CodeGen/X86/atomic128.ll
+++ b/llvm/test/CodeGen/X86/atomic128.ll
@@ -249,7 +249,7 @@ define i128 @atomic_load_seq_cst(i128* %p) {
; CHECK: lock
; CHECK: cmpxchg16b (%rdi)
- %r = load atomic i128* %p seq_cst, align 16
+ %r = load atomic i128, i128* %p seq_cst, align 16
ret i128 %r
}
@@ -262,7 +262,7 @@ define i128 @atomic_load_relaxed(i128* %p) {
; CHECK: lock
; CHECK: cmpxchg16b (%rdi)
- %r = load atomic i128* %p monotonic, align 16
+ %r = load atomic i128, i128* %p monotonic, align 16
ret i128 %r
}
diff --git a/llvm/test/CodeGen/X86/atomic_mi.ll b/llvm/test/CodeGen/X86/atomic_mi.ll
index 19e019eaddc..7a6204fc893 100644
--- a/llvm/test/CodeGen/X86/atomic_mi.ll
+++ b/llvm/test/CodeGen/X86/atomic_mi.ll
@@ -103,7 +103,7 @@ define void @add_8(i8* %p) {
; X32-NOT: lock
; X32: addb
; X32-NOT: movb
- %1 = load atomic i8* %p seq_cst, align 1
+ %1 = load atomic i8, i8* %p seq_cst, align 1
%2 = add i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -116,7 +116,7 @@ define void @add_16(i16* %p) {
; X64-NOT: addw
; X32-LABEL: add_16
; X32-NOT: addw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = add i16 %1, 2
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -131,7 +131,7 @@ define void @add_32(i32* %p) {
; X32-NOT: lock
; X32: addl
; X32-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = add i32 %1, 2
store atomic i32 %2, i32* %p monotonic, align 4
ret void
@@ -144,7 +144,7 @@ define void @add_64(i64* %p) {
; X64-NOT: movq
; We do not check X86-32 as it cannot do 'addq'.
; X32-LABEL: add_64
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = add i64 %1, 2
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -155,7 +155,7 @@ define void @add_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: add_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = add i32 %1, 2
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -172,7 +172,7 @@ define void @and_8(i8* %p) {
; X32-NOT: lock
; X32: andb
; X32-NOT: movb
- %1 = load atomic i8* %p monotonic, align 1
+ %1 = load atomic i8, i8* %p monotonic, align 1
%2 = and i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -185,7 +185,7 @@ define void @and_16(i16* %p) {
; X64-NOT: andw
; X32-LABEL: and_16
; X32-NOT: andw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = and i16 %1, 2
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -200,7 +200,7 @@ define void @and_32(i32* %p) {
; X32-NOT: lock
; X32: andl
; X32-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = and i32 %1, 2
store atomic i32 %2, i32* %p release, align 4
ret void
@@ -213,7 +213,7 @@ define void @and_64(i64* %p) {
; X64-NOT: movq
; We do not check X86-32 as it cannot do 'andq'.
; X32-LABEL: and_64
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = and i64 %1, 2
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -224,7 +224,7 @@ define void @and_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: and_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = and i32 %1, 2
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -241,7 +241,7 @@ define void @or_8(i8* %p) {
; X32-NOT: lock
; X32: orb
; X32-NOT: movb
- %1 = load atomic i8* %p acquire, align 1
+ %1 = load atomic i8, i8* %p acquire, align 1
%2 = or i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -252,7 +252,7 @@ define void @or_16(i16* %p) {
; X64-NOT: orw
; X32-LABEL: or_16
; X32-NOT: orw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = or i16 %1, 2
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -267,7 +267,7 @@ define void @or_32(i32* %p) {
; X32-NOT: lock
; X32: orl
; X32-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = or i32 %1, 2
store atomic i32 %2, i32* %p release, align 4
ret void
@@ -280,7 +280,7 @@ define void @or_64(i64* %p) {
; X64-NOT: movq
; We do not check X86-32 as it cannot do 'orq'.
; X32-LABEL: or_64
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = or i64 %1, 2
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -291,7 +291,7 @@ define void @or_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: or_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = or i32 %1, 2
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -308,7 +308,7 @@ define void @xor_8(i8* %p) {
; X32-NOT: lock
; X32: xorb
; X32-NOT: movb
- %1 = load atomic i8* %p acquire, align 1
+ %1 = load atomic i8, i8* %p acquire, align 1
%2 = xor i8 %1, 2
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -319,7 +319,7 @@ define void @xor_16(i16* %p) {
; X64-NOT: xorw
; X32-LABEL: xor_16
; X32-NOT: xorw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = xor i16 %1, 2
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -334,7 +334,7 @@ define void @xor_32(i32* %p) {
; X32-NOT: lock
; X32: xorl
; X32-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = xor i32 %1, 2
store atomic i32 %2, i32* %p release, align 4
ret void
@@ -347,7 +347,7 @@ define void @xor_64(i64* %p) {
; X64-NOT: movq
; We do not check X86-32 as it cannot do 'xorq'.
; X32-LABEL: xor_64
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = xor i64 %1, 2
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -358,7 +358,7 @@ define void @xor_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: xor_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = xor i32 %1, 2
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -378,7 +378,7 @@ define void @inc_8(i8* %p) {
; SLOW_INC-LABEL: inc_8
; SLOW_INC-NOT: incb
; SLOW_INC-NOT: movb
- %1 = load atomic i8* %p seq_cst, align 1
+ %1 = load atomic i8, i8* %p seq_cst, align 1
%2 = add i8 %1, 1
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -393,7 +393,7 @@ define void @inc_16(i16* %p) {
; X32-NOT: incw
; SLOW_INC-LABEL: inc_16
; SLOW_INC-NOT: incw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = add i16 %1, 1
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -411,7 +411,7 @@ define void @inc_32(i32* %p) {
; SLOW_INC-LABEL: inc_32
; SLOW_INC-NOT: incl
; SLOW_INC-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = add i32 %1, 1
store atomic i32 %2, i32* %p monotonic, align 4
ret void
@@ -427,7 +427,7 @@ define void @inc_64(i64* %p) {
; SLOW_INC-LABEL: inc_64
; SLOW_INC-NOT: incq
; SLOW_INC-NOT: movq
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = add i64 %1, 1
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -438,7 +438,7 @@ define void @inc_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: inc_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = add i32 %1, 1
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
@@ -458,7 +458,7 @@ define void @dec_8(i8* %p) {
; SLOW_INC-LABEL: dec_8
; SLOW_INC-NOT: decb
; SLOW_INC-NOT: movb
- %1 = load atomic i8* %p seq_cst, align 1
+ %1 = load atomic i8, i8* %p seq_cst, align 1
%2 = sub i8 %1, 1
store atomic i8 %2, i8* %p release, align 1
ret void
@@ -473,7 +473,7 @@ define void @dec_16(i16* %p) {
; X32-NOT: decw
; SLOW_INC-LABEL: dec_16
; SLOW_INC-NOT: decw
- %1 = load atomic i16* %p acquire, align 2
+ %1 = load atomic i16, i16* %p acquire, align 2
%2 = sub i16 %1, 1
store atomic i16 %2, i16* %p release, align 2
ret void
@@ -491,7 +491,7 @@ define void @dec_32(i32* %p) {
; SLOW_INC-LABEL: dec_32
; SLOW_INC-NOT: decl
; SLOW_INC-NOT: movl
- %1 = load atomic i32* %p acquire, align 4
+ %1 = load atomic i32, i32* %p acquire, align 4
%2 = sub i32 %1, 1
store atomic i32 %2, i32* %p monotonic, align 4
ret void
@@ -507,7 +507,7 @@ define void @dec_64(i64* %p) {
; SLOW_INC-LABEL: dec_64
; SLOW_INC-NOT: decq
; SLOW_INC-NOT: movq
- %1 = load atomic i64* %p acquire, align 8
+ %1 = load atomic i64, i64* %p acquire, align 8
%2 = sub i64 %1, 1
store atomic i64 %2, i64* %p release, align 8
ret void
@@ -518,7 +518,7 @@ define void @dec_32_seq_cst(i32* %p) {
; X64: xchgl
; X32-LABEL: dec_32_seq_cst
; X32: xchgl
- %1 = load atomic i32* %p monotonic, align 4
+ %1 = load atomic i32, i32* %p monotonic, align 4
%2 = sub i32 %1, 1
store atomic i32 %2, i32* %p seq_cst, align 4
ret void
diff --git a/llvm/test/CodeGen/X86/atomic_op.ll b/llvm/test/CodeGen/X86/atomic_op.ll
index d0ab28aa61f..aa895dedfe4 100644
--- a/llvm/test/CodeGen/X86/atomic_op.ll
+++ b/llvm/test/CodeGen/X86/atomic_op.ll
@@ -22,7 +22,7 @@ entry:
store i32 3855, i32* %ort
store i32 3855, i32* %xort
store i32 4, i32* %temp
- %tmp = load i32* %temp
+ %tmp = load i32, i32* %temp
; CHECK: lock
; CHECK: xaddl
%0 = atomicrmw add i32* %val1, i32 %tmp monotonic
diff --git a/llvm/test/CodeGen/X86/avoid-loop-align-2.ll b/llvm/test/CodeGen/X86/avoid-loop-align-2.ll
index 3e1e4527d0b..e02f3569c89 100644
--- a/llvm/test/CodeGen/X86/avoid-loop-align-2.ll
+++ b/llvm/test/CodeGen/X86/avoid-loop-align-2.ll
@@ -13,7 +13,7 @@ entry:
bb.nph12: ; preds = %entry
%1 = icmp eq i32 %b, 0 ; <i1> [#uses=1]
- %2 = load i32** @x, align 8 ; <i32*> [#uses=1]
+ %2 = load i32*, i32** @x, align 8 ; <i32*> [#uses=1]
br i1 %1, label %bb2.preheader, label %bb2.preheader.us
bb2.preheader.us: ; preds = %bb2.bb3_crit_edge.us, %bb.nph12
@@ -27,7 +27,7 @@ bb1.us: ; preds = %bb1.us, %bb2.preheader.us
%tmp17 = add i32 %indvar, %tmp16 ; <i32> [#uses=1]
%tmp. = zext i32 %tmp17 to i64 ; <i64> [#uses=1]
%3 = getelementptr i32, i32* %2, i64 %tmp. ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4 ; <i32> [#uses=2]
+ %4 = load i32, i32* %3, align 4 ; <i32> [#uses=2]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %b ; <i1> [#uses=1]
br i1 %exitcond, label %bb2.bb3_crit_edge.us, label %bb1.us
diff --git a/llvm/test/CodeGen/X86/avoid-loop-align.ll b/llvm/test/CodeGen/X86/avoid-loop-align.ll
index c14eebbd40c..5d00ed049f3 100644
--- a/llvm/test/CodeGen/X86/avoid-loop-align.ll
+++ b/llvm/test/CodeGen/X86/avoid-loop-align.ll
@@ -22,7 +22,7 @@ bb: ; preds = %bb1, %bb1
bb1: ; preds = %bb, %entry
%P.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%P.0 = getelementptr i8, i8* %tmp1, i32 %P.0.rec ; <i8*> [#uses=3]
- %tmp2 = load i8* %P.0, align 1 ; <i8> [#uses=1]
+ %tmp2 = load i8, i8* %P.0, align 1 ; <i8> [#uses=1]
switch i8 %tmp2, label %bb4 [
i8 12, label %bb
i8 42, label %bb
diff --git a/llvm/test/CodeGen/X86/avoid_complex_am.ll b/llvm/test/CodeGen/X86/avoid_complex_am.ll
index fa90a35dae0..fafa236b8dd 100644
--- a/llvm/test/CodeGen/X86/avoid_complex_am.ll
+++ b/llvm/test/CodeGen/X86/avoid_complex_am.ll
@@ -20,12 +20,12 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
%tmp = add nsw i64 %indvars.iv, -1
%arrayidx = getelementptr inbounds double, double* %b, i64 %tmp
- %tmp1 = load double* %arrayidx, align 8
+ %tmp1 = load double, double* %arrayidx, align 8
; The induction variable should carry the scaling factor: 1.
; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 1
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx2 = getelementptr inbounds double, double* %c, i64 %indvars.iv.next
- %tmp2 = load double* %arrayidx2, align 8
+ %tmp2 = load double, double* %arrayidx2, align 8
%mul = fmul double %tmp1, %tmp2
%arrayidx4 = getelementptr inbounds double, double* %a, i64 %indvars.iv
store double %mul, double* %arrayidx4, align 8
diff --git a/llvm/test/CodeGen/X86/avx-arith.ll b/llvm/test/CodeGen/X86/avx-arith.ll
index a9da1ec067c..792a9987e10 100644
--- a/llvm/test/CodeGen/X86/avx-arith.ll
+++ b/llvm/test/CodeGen/X86/avx-arith.ll
@@ -38,7 +38,7 @@ entry:
; CHECK: vsubpd (%
define <4 x double> @subpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
entry:
- %tmp2 = load <4 x double>* %x, align 32
+ %tmp2 = load <4 x double>, <4 x double>* %x, align 32
%sub.i = fsub <4 x double> %y, %tmp2
ret <4 x double> %sub.i
}
@@ -53,7 +53,7 @@ entry:
; CHECK: vsubps (%
define <8 x float> @subps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
entry:
- %tmp2 = load <8 x float>* %x, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %x, align 32
%sub.i = fsub <8 x float> %y, %tmp2
ret <8 x float> %sub.i
}
@@ -264,7 +264,7 @@ declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
define <4 x float> @int_sqrt_ss() {
; CHECK: int_sqrt_ss
; CHECK: vsqrtss
- %x0 = load float addrspace(1)* undef, align 8
+ %x0 = load float, float addrspace(1)* undef, align 8
%x1 = insertelement <4 x float> undef, float %x0, i32 0
%x2 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %x1) nounwind
ret <4 x float> %x2
diff --git a/llvm/test/CodeGen/X86/avx-basic.ll b/llvm/test/CodeGen/X86/avx-basic.ll
index 8e82551ee4d..5307527b0a0 100644
--- a/llvm/test/CodeGen/X86/avx-basic.ll
+++ b/llvm/test/CodeGen/X86/avx-basic.ll
@@ -57,10 +57,10 @@ entry:
define <8 x i32> @VMOVZQI2PQI([0 x float]* nocapture %aFOO) nounwind {
allocas:
%ptrcast.i33.i = bitcast [0 x float]* %aFOO to i32*
- %val.i34.i = load i32* %ptrcast.i33.i, align 4
+ %val.i34.i = load i32, i32* %ptrcast.i33.i, align 4
%ptroffset.i22.i992 = getelementptr [0 x float], [0 x float]* %aFOO, i64 0, i64 1
%ptrcast.i23.i = bitcast float* %ptroffset.i22.i992 to i32*
- %val.i24.i = load i32* %ptrcast.i23.i, align 4
+ %val.i24.i = load i32, i32* %ptrcast.i23.i, align 4
%updatedret.i30.i = insertelement <8 x i32> undef, i32 %val.i34.i, i32 1
ret <8 x i32> %updatedret.i30.i
}
diff --git a/llvm/test/CodeGen/X86/avx-bitcast.ll b/llvm/test/CodeGen/X86/avx-bitcast.ll
index c9d828c1f6e..bb3e5a5bcbc 100644
--- a/llvm/test/CodeGen/X86/avx-bitcast.ll
+++ b/llvm/test/CodeGen/X86/avx-bitcast.ll
@@ -3,7 +3,7 @@
; CHECK: vmovsd (%
; CHECK-NEXT: vmovq %xmm
define i64 @bitcasti64tof64() {
- %a = load double* undef
+ %a = load double, double* undef
%b = bitcast double %a to i64
ret i64 %b
}
diff --git a/llvm/test/CodeGen/X86/avx-cvt.ll b/llvm/test/CodeGen/X86/avx-cvt.ll
index 10ab9713e8c..9f154abdf7a 100644
--- a/llvm/test/CodeGen/X86/avx-cvt.ll
+++ b/llvm/test/CodeGen/X86/avx-cvt.ll
@@ -47,7 +47,7 @@ define <4 x double> @fpext00(<4 x float> %b) nounwind {
; CHECK: vcvtsi2sdq (%
define double @funcA(i64* nocapture %e) nounwind uwtable readonly ssp {
entry:
- %tmp1 = load i64* %e, align 8
+ %tmp1 = load i64, i64* %e, align 8
%conv = sitofp i64 %tmp1 to double
ret double %conv
}
@@ -55,7 +55,7 @@ entry:
; CHECK: vcvtsi2sdl (%
define double @funcB(i32* nocapture %e) nounwind uwtable readonly ssp {
entry:
- %tmp1 = load i32* %e, align 4
+ %tmp1 = load i32, i32* %e, align 4
%conv = sitofp i32 %tmp1 to double
ret double %conv
}
@@ -63,7 +63,7 @@ entry:
; CHECK: vcvtsi2ssl (%
define float @funcC(i32* nocapture %e) nounwind uwtable readonly ssp {
entry:
- %tmp1 = load i32* %e, align 4
+ %tmp1 = load i32, i32* %e, align 4
%conv = sitofp i32 %tmp1 to float
ret float %conv
}
@@ -71,7 +71,7 @@ entry:
; CHECK: vcvtsi2ssq (%
define float @funcD(i64* nocapture %e) nounwind uwtable readonly ssp {
entry:
- %tmp1 = load i64* %e, align 8
+ %tmp1 = load i64, i64* %e, align 8
%conv = sitofp i64 %tmp1 to float
ret float %conv
}
@@ -81,7 +81,7 @@ define void @fpext() nounwind uwtable {
entry:
%f = alloca float, align 4
%d = alloca double, align 8
- %tmp = load float* %f, align 4
+ %tmp = load float, float* %f, align 4
%conv = fpext float %tmp to double
store double %conv, double* %d, align 8
ret void
diff --git a/llvm/test/CodeGen/X86/avx-intel-ocl.ll b/llvm/test/CodeGen/X86/avx-intel-ocl.ll
index 70ec1248cdd..b2836d30a08 100644
--- a/llvm/test/CodeGen/X86/avx-intel-ocl.ll
+++ b/llvm/test/CodeGen/X86/avx-intel-ocl.ll
@@ -33,7 +33,7 @@ define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %2, %1
ret <16 x float> %3
}
@@ -58,7 +58,7 @@ define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %1, %b
%4 = fadd <16 x float> %2, %3
ret <16 x float> %4
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll
index 3ecf709c87e..8f63a082573 100644
--- a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -1126,8 +1126,8 @@ define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
; CHECK: movl $7
; CHECK: vpcmpestri $7, (
; CHECK: movl
- %1 = load <16 x i8>* %a0
- %2 = load <16 x i8>* %a2
+ %1 = load <16 x i8>, <16 x i8>* %a0
+ %2 = load <16 x i8>, <16 x i8>* %a2
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1]
ret i32 %res
}
@@ -1204,7 +1204,7 @@ define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2
; CHECK: movl $7
; CHECK: vpcmpestrm $7,
; CHECK-NOT: vmov
- %1 = load <16 x i8>* %a2
+ %1 = load <16 x i8>, <16 x i8>* %a2
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %1, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
ret <16 x i8> %res
}
@@ -1222,8 +1222,8 @@ declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind read
define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
; CHECK: vpcmpistri $7, (
; CHECK: movl
- %1 = load <16 x i8>* %a0
- %2 = load <16 x i8>* %a1
+ %1 = load <16 x i8>, <16 x i8>* %a0
+ %2 = load <16 x i8>, <16 x i8>* %a1
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1]
ret i32 %res
}
@@ -1286,7 +1286,7 @@ declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwin
define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) {
; CHECK: vpcmpistrm $7, (
; CHECK-NOT: vmov
- %1 = load <16 x i8>* %a1
+ %1 = load <16 x i8>, <16 x i8>* %a1
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1]
ret <16 x i8> %res
}
@@ -2330,7 +2330,7 @@ define <4 x float> @test_x86_avx_vpermilvar_ps(<4 x float> %a0, <4 x i32> %a1) {
}
define <4 x float> @test_x86_avx_vpermilvar_ps_load(<4 x float> %a0, <4 x i32>* %a1) {
; CHECK: vpermilps
- %a2 = load <4 x i32>* %a1
+ %a2 = load <4 x i32>, <4 x i32>* %a1
%res = call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %a0, <4 x i32> %a2) ; <<4 x float>> [#uses=1]
ret <4 x float> %res
}
diff --git a/llvm/test/CodeGen/X86/avx-load-store.ll b/llvm/test/CodeGen/X86/avx-load-store.ll
index a6775aba098..ee5bd0e68e7 100644
--- a/llvm/test/CodeGen/X86/avx-load-store.ll
+++ b/llvm/test/CodeGen/X86/avx-load-store.ll
@@ -10,10 +10,10 @@
define void @test_256_load(double* nocapture %d, float* nocapture %f, <4 x i64>* nocapture %i) nounwind uwtable ssp {
entry:
%0 = bitcast double* %d to <4 x double>*
- %tmp1.i = load <4 x double>* %0, align 32
+ %tmp1.i = load <4 x double>, <4 x double>* %0, align 32
%1 = bitcast float* %f to <8 x float>*
- %tmp1.i17 = load <8 x float>* %1, align 32
- %tmp1.i16 = load <4 x i64>* %i, align 32
+ %tmp1.i17 = load <8 x float>, <8 x float>* %1, align 32
+ %tmp1.i16 = load <4 x i64>, <4 x i64>* %i, align 32
tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind
store <4 x double> %tmp1.i, <4 x double>* %0, align 32
store <8 x float> %tmp1.i17, <8 x float>* %1, align 32
@@ -29,7 +29,7 @@ declare void @dummy(<4 x double>, <8 x float>, <4 x i64>)
; CHECK: mov00
define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
- %val = load float* %ptr
+ %val = load float, float* %ptr
; CHECK: vinsertps
; CHECK: vinsertf128
%i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0
@@ -39,7 +39,7 @@ define <8 x float> @mov00(<8 x float> %v, float * %ptr) nounwind {
; CHECK: mov01
define <4 x double> @mov01(<4 x double> %v, double * %ptr) nounwind {
- %val = load double* %ptr
+ %val = load double, double* %ptr
; CHECK: vmovlpd
; CHECK: vinsertf128
%i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0
@@ -122,7 +122,7 @@ cif_mixed_test_any_check: ; preds = %cif_mask_mixed
; CHECK: vmovups
; CHECK: vmovups
define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
- %b = load <8 x i32>* %bp, align 1
+ %b = load <8 x i32>, <8 x i32>* %bp, align 1
%x = add <8 x i32> zeroinitializer, %b
store <8 x i32> %x, <8 x i32>* %ret, align 1
ret void
@@ -132,7 +132,7 @@ define void @add8i32(<8 x i32>* %ret, <8 x i32>* %bp) nounwind {
; CHECK: vmovaps ({{.*}}), %ymm{{.*}}
; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
- %b = load <4 x i64>* %bp, align 64
+ %b = load <4 x i64>, <4 x i64>* %bp, align 64
%x = add <4 x i64> zeroinitializer, %b
store <4 x i64> %x, <4 x i64>* %ret, align 64
ret void
@@ -144,7 +144,7 @@ define void @add4i64a64(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
; CHECK: vmovaps %xmm{{.*}}, {{.*}}({{.*}})
define void @add4i64a16(<4 x i64>* %ret, <4 x i64>* %bp) nounwind {
- %b = load <4 x i64>* %bp, align 16
+ %b = load <4 x i64>, <4 x i64>* %bp, align 16
%x = add <4 x i64> zeroinitializer, %b
store <4 x i64> %x, <4 x i64>* %ret, align 16
ret void
diff --git a/llvm/test/CodeGen/X86/avx-logic.ll b/llvm/test/CodeGen/X86/avx-logic.ll
index 115cefb1b5e..062956fbbc6 100644
--- a/llvm/test/CodeGen/X86/avx-logic.ll
+++ b/llvm/test/CodeGen/X86/avx-logic.ll
@@ -142,7 +142,7 @@ entry:
; CHECK: vandnpd (%
define <4 x double> @andnotpd256fold(<4 x double> %y, <4 x double>* nocapture %x) nounwind uwtable readonly ssp {
entry:
- %tmp2 = load <4 x double>* %x, align 32
+ %tmp2 = load <4 x double>, <4 x double>* %x, align 32
%0 = bitcast <4 x double> %y to <4 x i64>
%neg.i = xor <4 x i64> %0, <i64 -1, i64 -1, i64 -1, i64 -1>
%1 = bitcast <4 x double> %tmp2 to <4 x i64>
@@ -167,7 +167,7 @@ entry:
; CHECK: vandnps (%
define <8 x float> @andnotps256fold(<8 x float> %y, <8 x float>* nocapture %x) nounwind uwtable readonly ssp {
entry:
- %tmp2 = load <8 x float>* %x, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %x, align 32
%0 = bitcast <8 x float> %y to <8 x i32>
%neg.i = xor <8 x i32> %0, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%1 = bitcast <8 x float> %tmp2 to <8 x i32>
diff --git a/llvm/test/CodeGen/X86/avx-splat.ll b/llvm/test/CodeGen/X86/avx-splat.ll
index 3b24d950638..3ea7e386c42 100644
--- a/llvm/test/CodeGen/X86/avx-splat.ll
+++ b/llvm/test/CodeGen/X86/avx-splat.ll
@@ -58,7 +58,7 @@ for_exit499: ; preds = %for_test505.prehead
load.i1247: ; preds = %for_exit499
%ptr1227 = getelementptr [18 x [18 x float]], [18 x [18 x float]]* %udx495, i64 0, i64 1, i64 1
%ptr.i1237 = bitcast float* %ptr1227 to i32*
- %val.i1238 = load i32* %ptr.i1237, align 4
+ %val.i1238 = load i32, i32* %ptr.i1237, align 4
%ret6.i1245 = insertelement <8 x i32> undef, i32 %val.i1238, i32 6
%ret7.i1246 = insertelement <8 x i32> %ret6.i1245, i32 %val.i1238, i32 7
%phitmp = bitcast <8 x i32> %ret7.i1246 to <8 x float>
diff --git a/llvm/test/CodeGen/X86/avx-unpack.ll b/llvm/test/CodeGen/X86/avx-unpack.ll
index 20f53453226..6924d98b38b 100644
--- a/llvm/test/CodeGen/X86/avx-unpack.ll
+++ b/llvm/test/CodeGen/X86/avx-unpack.ll
@@ -70,8 +70,8 @@ entry:
; CHECK: vunpckhps (%
define <8 x i32> @unpackhips2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
entry:
- %a = load <8 x i32>* %src1
- %b = load <8 x i32>* %src2
+ %a = load <8 x i32>, <8 x i32>* %src1
+ %b = load <8 x i32>, <8 x i32>* %src2
%shuffle.i = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
ret <8 x i32> %shuffle.i
}
@@ -86,8 +86,8 @@ entry:
; CHECK: vunpckhpd (%
define <4 x i64> @unpackhipd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
entry:
- %a = load <4 x i64>* %src1
- %b = load <4 x i64>* %src2
+ %a = load <4 x i64>, <4 x i64>* %src1
+ %b = load <4 x i64>, <4 x i64>* %src2
%shuffle.i = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
ret <4 x i64> %shuffle.i
}
@@ -102,8 +102,8 @@ entry:
; CHECK: vunpcklps (%
define <8 x i32> @unpacklops2(<8 x i32>* %src1, <8 x i32>* %src2) nounwind uwtable readnone ssp {
entry:
- %a = load <8 x i32>* %src1
- %b = load <8 x i32>* %src2
+ %a = load <8 x i32>, <8 x i32>* %src1
+ %b = load <8 x i32>, <8 x i32>* %src2
%shuffle.i = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
ret <8 x i32> %shuffle.i
}
@@ -118,8 +118,8 @@ entry:
; CHECK: vunpcklpd (%
define <4 x i64> @unpacklopd2(<4 x i64>* %src1, <4 x i64>* %src2) nounwind uwtable readnone ssp {
entry:
- %a = load <4 x i64>* %src1
- %b = load <4 x i64>* %src2
+ %a = load <4 x i64>, <4 x i64>* %src1
+ %b = load <4 x i64>, <4 x i64>* %src2
%shuffle.i = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
ret <4 x i64> %shuffle.i
}
diff --git a/llvm/test/CodeGen/X86/avx-varargs-x86_64.ll b/llvm/test/CodeGen/X86/avx-varargs-x86_64.ll
index f73174dd2bc..f550733ca4f 100644
--- a/llvm/test/CodeGen/X86/avx-varargs-x86_64.ll
+++ b/llvm/test/CodeGen/X86/avx-varargs-x86_64.ll
@@ -9,7 +9,7 @@ declare i32 @f(i32, ...)
; CHECK: vmovaps %ymm0, (%rsp)
define void @test1() nounwind uwtable ssp {
entry:
- %0 = load <8 x float>* @x, align 32
+ %0 = load <8 x float>, <8 x float>* @x, align 32
%call = call i32 (i32, ...)* @f(i32 1, <8 x float> %0)
ret void
}
diff --git a/llvm/test/CodeGen/X86/avx-vbroadcast.ll b/llvm/test/CodeGen/X86/avx-vbroadcast.ll
index 2ebe6fda37a..8b8c11b8587 100644
--- a/llvm/test/CodeGen/X86/avx-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx-vbroadcast.ll
@@ -3,7 +3,7 @@
; CHECK: vbroadcastsd (%
define <4 x i64> @A(i64* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i64* %ptr, align 8
+ %q = load i64, i64* %ptr, align 8
%vecinit.i = insertelement <4 x i64> undef, i64 %q, i32 0
%vecinit2.i = insertelement <4 x i64> %vecinit.i, i64 %q, i32 1
%vecinit4.i = insertelement <4 x i64> %vecinit2.i, i64 %q, i32 2
@@ -14,7 +14,7 @@ entry:
; CHECK: vbroadcastss (%
define <8 x i32> @B(i32* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i32* %ptr, align 4
+ %q = load i32, i32* %ptr, align 4
%vecinit.i = insertelement <8 x i32> undef, i32 %q, i32 0
%vecinit2.i = insertelement <8 x i32> %vecinit.i, i32 %q, i32 1
%vecinit4.i = insertelement <8 x i32> %vecinit2.i, i32 %q, i32 2
@@ -25,7 +25,7 @@ entry:
; CHECK: vbroadcastsd (%
define <4 x double> @C(double* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load double* %ptr, align 8
+ %q = load double, double* %ptr, align 8
%vecinit.i = insertelement <4 x double> undef, double %q, i32 0
%vecinit2.i = insertelement <4 x double> %vecinit.i, double %q, i32 1
%vecinit4.i = insertelement <4 x double> %vecinit2.i, double %q, i32 2
@@ -36,7 +36,7 @@ entry:
; CHECK: vbroadcastss (%
define <8 x float> @D(float* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load float* %ptr, align 4
+ %q = load float, float* %ptr, align 4
%vecinit.i = insertelement <8 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <8 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <8 x float> %vecinit2.i, float %q, i32 2
@@ -49,7 +49,7 @@ entry:
; CHECK: vbroadcastss (%
define <4 x float> @e(float* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load float* %ptr, align 4
+ %q = load float, float* %ptr, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -73,7 +73,7 @@ define <4 x float> @_e2(float* %ptr) nounwind uwtable readnone ssp {
; CHECK: vbroadcastss (%
define <4 x i32> @F(i32* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i32* %ptr, align 4
+ %q = load i32, i32* %ptr, align 4
%vecinit.i = insertelement <4 x i32> undef, i32 %q, i32 0
%vecinit2.i = insertelement <4 x i32> %vecinit.i, i32 %q, i32 1
%vecinit4.i = insertelement <4 x i32> %vecinit2.i, i32 %q, i32 2
@@ -88,7 +88,7 @@ entry:
; CHECK: ret
define <2 x i64> @G(i64* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i64* %ptr, align 8
+ %q = load i64, i64* %ptr, align 8
%vecinit.i = insertelement <2 x i64> undef, i64 %q, i32 0
%vecinit2.i = insertelement <2 x i64> %vecinit.i, i64 %q, i32 1
ret <2 x i64> %vecinit2.i
@@ -107,7 +107,7 @@ define <4 x i32> @H(<4 x i32> %a) {
; CHECK: ret
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load double* %ptr, align 4
+ %q = load double, double* %ptr, align 4
%vecinit.i = insertelement <2 x double> undef, double %q, i32 0
%vecinit2.i = insertelement <2 x double> %vecinit.i, double %q, i32 1
ret <2 x double> %vecinit2.i
@@ -118,13 +118,13 @@ entry:
; CHECK: ret
define <4 x float> @_RR(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
entry:
- %q = load float* %ptr, align 4
+ %q = load float, float* %ptr, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
%vecinit6.i = insertelement <4 x float> %vecinit4.i, float %q, i32 3
; force a chain
- %j = load i32* %k, align 4
+ %j = load i32, i32* %k, align 4
store i32 %j, i32* undef
ret <4 x float> %vecinit6.i
}
@@ -135,7 +135,7 @@ entry:
; CHECK: ret
define <4 x float> @_RR2(float* %ptr, i32* %k) nounwind uwtable readnone ssp {
entry:
- %q = load float* %ptr, align 4
+ %q = load float, float* %ptr, align 4
%v = insertelement <4 x float> undef, float %q, i32 0
%t = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %t
@@ -151,7 +151,7 @@ entry:
; CHECK: vbroadcastss (%
; CHECK-NEXT: ret
define <8 x float> @splat_concat1(float* %p) {
- %1 = load float* %p, align 4
+ %1 = load float, float* %p, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = insertelement <4 x float> %2, float %1, i32 1
%4 = insertelement <4 x float> %3, float %1, i32 2
@@ -165,7 +165,7 @@ define <8 x float> @splat_concat1(float* %p) {
; CHECK: vbroadcastss (%
; CHECK-NEXT: ret
define <8 x float> @splat_concat2(float* %p) {
- %1 = load float* %p, align 4
+ %1 = load float, float* %p, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = insertelement <4 x float> %2, float %1, i32 1
%4 = insertelement <4 x float> %3, float %1, i32 2
@@ -183,7 +183,7 @@ define <8 x float> @splat_concat2(float* %p) {
; CHECK: vbroadcastsd (%
; CHECK-NEXT: ret
define <4 x double> @splat_concat3(double* %p) {
- %1 = load double* %p, align 8
+ %1 = load double, double* %p, align 8
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
%4 = shufflevector <2 x double> %3, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -195,7 +195,7 @@ define <4 x double> @splat_concat3(double* %p) {
; CHECK: vbroadcastsd (%
; CHECK-NEXT: ret
define <4 x double> @splat_concat4(double* %p) {
- %1 = load double* %p, align 8
+ %1 = load double, double* %p, align 8
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
%4 = insertelement <2 x double> undef, double %1, i32 0
diff --git a/llvm/test/CodeGen/X86/avx-vinsertf128.ll b/llvm/test/CodeGen/X86/avx-vinsertf128.ll
index e1984feb7f3..d0f8f4ebaea 100644
--- a/llvm/test/CodeGen/X86/avx-vinsertf128.ll
+++ b/llvm/test/CodeGen/X86/avx-vinsertf128.ll
@@ -112,7 +112,7 @@ define <8 x float> @vinsertf128_combine(float* nocapture %f) nounwind uwtable re
entry:
%add.ptr = getelementptr inbounds float, float* %f, i64 4
%0 = bitcast float* %add.ptr to <4 x float>*
- %1 = load <4 x float>* %0, align 16
+ %1 = load <4 x float>, <4 x float>* %0, align 16
%2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
ret <8 x float> %2
}
@@ -125,7 +125,7 @@ define <8 x float> @vinsertf128_ucombine(float* nocapture %f) nounwind uwtable r
entry:
%add.ptr = getelementptr inbounds float, float* %f, i64 4
%0 = bitcast float* %add.ptr to <4 x float>*
- %1 = load <4 x float>* %0, align 8
+ %1 = load <4 x float>, <4 x float>* %0, align 8
%2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
ret <8 x float> %2
}
diff --git a/llvm/test/CodeGen/X86/avx-vperm2x128.ll b/llvm/test/CodeGen/X86/avx-vperm2x128.ll
index 43303ca57c4..ef3f7952477 100644
--- a/llvm/test/CodeGen/X86/avx-vperm2x128.ll
+++ b/llvm/test/CodeGen/X86/avx-vperm2x128.ll
@@ -160,8 +160,8 @@ define <16 x i16> @E5i(<16 x i16>* %a, <16 x i16>* %b) nounwind uwtable readnone
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
entry:
- %c = load <16 x i16>* %a
- %d = load <16 x i16>* %b
+ %c = load <16 x i16>, <16 x i16>* %a
+ %d = load <16 x i16>, <16 x i16>* %b
%c2 = add <16 x i16> %c, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%shuffle = shufflevector <16 x i16> %c2, <16 x i16> %d, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <16 x i16> %shuffle
diff --git a/llvm/test/CodeGen/X86/avx-vzeroupper.ll b/llvm/test/CodeGen/X86/avx-vzeroupper.ll
index a2163a254e1..4f0c6001a7c 100644
--- a/llvm/test/CodeGen/X86/avx-vzeroupper.ll
+++ b/llvm/test/CodeGen/X86/avx-vzeroupper.ll
@@ -24,7 +24,7 @@ entry:
; CHECK: _test01
define <8 x float> @test01(<4 x float> %a, <4 x float> %b, <8 x float> %c) nounwind uwtable ssp {
entry:
- %tmp = load <4 x float>* @x, align 16
+ %tmp = load <4 x float>, <4 x float>* @x, align 16
; CHECK: vzeroupper
; CHECK-NEXT: callq _do_sse
%call = tail call <4 x float> @do_sse(<4 x float> %tmp) nounwind
@@ -73,7 +73,7 @@ for.body:
%call5 = tail call <4 x float> @do_sse(<4 x float> %c.017) nounwind
; CHECK-NEXT: callq _do_sse
%call7 = tail call <4 x float> @do_sse(<4 x float> %call5) nounwind
- %tmp11 = load <8 x float>* @g, align 32
+ %tmp11 = load <8 x float>, <8 x float>* @g, align 32
%0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %tmp11, i8 1) nounwind
; CHECK: vzeroupper
; CHECK-NEXT: callq _do_sse
diff --git a/llvm/test/CodeGen/X86/avx.ll b/llvm/test/CodeGen/X86/avx.ll
index 1604f108731..f71ec5c10e6 100644
--- a/llvm/test/CodeGen/X86/avx.ll
+++ b/llvm/test/CodeGen/X86/avx.ll
@@ -34,7 +34,7 @@ define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocap
; CHECK-NOT: mov
; CHECK: insertps $48
; CHECK-NEXT: ret
- %1 = load <4 x float>* %pb, align 16
+ %1 = load <4 x float>, <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
ret <4 x float> %2
}
@@ -48,7 +48,7 @@ define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>
;; Try to match a bit more of the instr, since we need the load's offset.
; CHECK: insertps $96, 4(%{{...}}), %
; CHECK-NEXT: ret
- %1 = load <4 x float>* %pb, align 16
+ %1 = load <4 x float>, <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
ret <4 x float> %2
}
@@ -63,7 +63,7 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
; CHECK: vinsertps $192, 12(%{{...}},%{{...}}), %
; CHECK-NEXT: ret
%1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
- %2 = load <4 x float>* %1, align 16
+ %2 = load <4 x float>, <4 x float>* %1, align 16
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
ret <4 x float> %3
}
@@ -77,7 +77,7 @@ define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocap
; CHECK: insertps $48
; CHECK-NEXT: ret
%1 = getelementptr inbounds float, float* %fb, i64 %index
- %2 = load float* %1, align 4
+ %2 = load float, float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
%5 = insertelement <4 x float> %4, float %2, i32 2
@@ -93,7 +93,7 @@ define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float
; CHECK-NOT: mov
; CHECK: insertps $48
; CHECK-NEXT: ret
- %1 = load <4 x float>* %b, align 4
+ %1 = load <4 x float>, <4 x float>* %b, align 4
%2 = extractelement <4 x float> %1, i32 0
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
@@ -120,7 +120,7 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; CHECK: vaddps
; CHECK-NEXT: ret
%1 = getelementptr inbounds float, float* %fb, i64 %index
- %2 = load float* %1, align 4
+ %2 = load float, float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
%5 = insertelement <4 x float> %4, float %2, i32 2
diff --git a/llvm/test/CodeGen/X86/avx1-logical-load-folding.ll b/llvm/test/CodeGen/X86/avx1-logical-load-folding.ll
index 32301b1bf9e..90e00c96539 100644
--- a/llvm/test/CodeGen/X86/avx1-logical-load-folding.ll
+++ b/llvm/test/CodeGen/X86/avx1-logical-load-folding.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-apple-macosx10.9.0"
; Function Attrs: nounwind ssp uwtable
define void @test1(float* %A, float* %C) #0 {
%tmp1 = bitcast float* %A to <8 x float>*
- %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
%tmp4 = and <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
@@ -20,7 +20,7 @@ define void @test1(float* %A, float* %C) #0 {
; Function Attrs: nounwind ssp uwtable
define void @test2(float* %A, float* %C) #0 {
%tmp1 = bitcast float* %A to <8 x float>*
- %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
%tmp4 = or <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
@@ -34,7 +34,7 @@ define void @test2(float* %A, float* %C) #0 {
; Function Attrs: nounwind ssp uwtable
define void @test3(float* %A, float* %C) #0 {
%tmp1 = bitcast float* %A to <8 x float>*
- %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
%tmp4 = xor <8 x i32> %tmp3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%tmp5 = bitcast <8 x i32> %tmp4 to <8 x float>
@@ -47,7 +47,7 @@ define void @test3(float* %A, float* %C) #0 {
define void @test4(float* %A, float* %C) #0 {
%tmp1 = bitcast float* %A to <8 x float>*
- %tmp2 = load <8 x float>* %tmp1, align 32
+ %tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
%tmp4 = xor <8 x i32> %tmp3, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%tmp5 = and <8 x i32> %tmp4, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
diff --git a/llvm/test/CodeGen/X86/avx2-conversions.ll b/llvm/test/CodeGen/X86/avx2-conversions.ll
index 5f17f1bfe37..9b6d5aa5eea 100644
--- a/llvm/test/CodeGen/X86/avx2-conversions.ll
+++ b/llvm/test/CodeGen/X86/avx2-conversions.ll
@@ -95,7 +95,7 @@ define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; CHECK: vpmovsxdq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
- %X = load <4 x i32>* %ptr
+ %X = load <4 x i32>, <4 x i32>* %ptr
%Y = sext <4 x i32> %X to <4 x i64>
ret <4 x i64>%Y
}
@@ -104,7 +104,7 @@ define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
; CHECK: vpmovsxbq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
- %X = load <4 x i8>* %ptr
+ %X = load <4 x i8>, <4 x i8>* %ptr
%Y = sext <4 x i8> %X to <4 x i64>
ret <4 x i64>%Y
}
@@ -113,7 +113,7 @@ define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
; CHECK: vpmovsxwq (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
- %X = load <4 x i16>* %ptr
+ %X = load <4 x i16>, <4 x i16>* %ptr
%Y = sext <4 x i16> %X to <4 x i64>
ret <4 x i64>%Y
}
@@ -122,7 +122,7 @@ define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
; CHECK: vpmovsxwd (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
- %X = load <8 x i16>* %ptr
+ %X = load <8 x i16>, <8 x i16>* %ptr
%Y = sext <8 x i16> %X to <8 x i32>
ret <8 x i32>%Y
}
@@ -131,7 +131,7 @@ define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
; CHECK: vpmovsxbd (%r{{[^,]*}}), %ymm{{.*}}
; CHECK: ret
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
- %X = load <8 x i8>* %ptr
+ %X = load <8 x i8>, <8 x i8>* %ptr
%Y = sext <8 x i8> %X to <8 x i32>
ret <8 x i32>%Y
}
diff --git a/llvm/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll b/llvm/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll
index 7301b7cbfc4..6bd6a5041d4 100644
--- a/llvm/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll
@@ -3,7 +3,7 @@
define <16 x i16> @test_lvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
; CHECK-LABEL: test_lvm_x86_avx2_pmovsxbw
; CHECK: vpmovsxbw (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %1)
ret <16 x i16> %2
}
@@ -11,7 +11,7 @@ define <16 x i16> @test_lvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbd
; CHECK: vpmovsxbd (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %1)
ret <8 x i32> %2
}
@@ -19,7 +19,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbq
; CHECK: vpmovsxbq (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %1)
ret <4 x i64> %2
}
@@ -27,7 +27,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwd
; CHECK: vpmovsxwd (%rdi), %ymm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %1)
ret <8 x i32> %2
}
@@ -35,7 +35,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwq
; CHECK: vpmovsxwq (%rdi), %ymm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %1)
ret <4 x i64> %2
}
@@ -43,7 +43,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxdq
; CHECK: vpmovsxdq (%rdi), %ymm0
- %1 = load <4 x i32>* %a, align 1
+ %1 = load <4 x i32>, <4 x i32>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %1)
ret <4 x i64> %2
}
@@ -51,7 +51,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
define <16 x i16> @test_lvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
; CHECK-LABEL: test_lvm_x86_avx2_pmovzxbw
; CHECK: vpmovzxbw (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %1)
ret <16 x i16> %2
}
@@ -59,7 +59,7 @@ define <16 x i16> @test_lvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbd
; CHECK: vpmovzxbd (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %1)
ret <8 x i32> %2
}
@@ -67,7 +67,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbq
; CHECK: vpmovzxbq (%rdi), %ymm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %1)
ret <4 x i64> %2
}
@@ -75,7 +75,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwd
; CHECK: vpmovzxwd (%rdi), %ymm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %1)
ret <8 x i32> %2
}
@@ -83,7 +83,7 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwq
; CHECK: vpmovzxwq (%rdi), %ymm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %1)
ret <4 x i64> %2
}
@@ -91,7 +91,7 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) {
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxdq
; CHECK: vpmovzxdq (%rdi), %ymm0
- %1 = load <4 x i32>* %a, align 1
+ %1 = load <4 x i32>, <4 x i32>* %a, align 1
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %1)
ret <4 x i64> %2
}
diff --git a/llvm/test/CodeGen/X86/avx2-shift.ll b/llvm/test/CodeGen/X86/avx2-shift.ll
index 025d52ede0f..5adbb2ef665 100644
--- a/llvm/test/CodeGen/X86/avx2-shift.ll
+++ b/llvm/test/CodeGen/X86/avx2-shift.ll
@@ -130,7 +130,7 @@ define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
; CHECK: vpsravd (%
; CHECK: ret
define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
- %y1 = load <4 x i32>* %y
+ %y1 = load <4 x i32>, <4 x i32>* %y
%k = ashr <4 x i32> %x, %y1
ret <4 x i32> %k
}
@@ -139,7 +139,7 @@ define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
; CHECK: vpsravd (%
; CHECK: ret
define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
- %y1 = load <8 x i32>* %y
+ %y1 = load <8 x i32>, <8 x i32>* %y
%k = ashr <8 x i32> %x, %y1
ret <8 x i32> %k
}
@@ -148,7 +148,7 @@ define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
; CHECK: vpsllvd (%
; CHECK: ret
define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
- %y1 = load <4 x i32>* %y
+ %y1 = load <4 x i32>, <4 x i32>* %y
%k = shl <4 x i32> %x, %y1
ret <4 x i32> %k
}
@@ -156,7 +156,7 @@ define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
; CHECK: vpsllvd (%
; CHECK: ret
define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
- %y1 = load <8 x i32>* %y
+ %y1 = load <8 x i32>, <8 x i32>* %y
%k = shl <8 x i32> %x, %y1
ret <8 x i32> %k
}
@@ -164,7 +164,7 @@ define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
; CHECK: vpsllvq (%
; CHECK: ret
define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
- %y1 = load <2 x i64>* %y
+ %y1 = load <2 x i64>, <2 x i64>* %y
%k = shl <2 x i64> %x, %y1
ret <2 x i64> %k
}
@@ -172,7 +172,7 @@ define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
; CHECK: vpsllvq (%
; CHECK: ret
define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
- %y1 = load <4 x i64>* %y
+ %y1 = load <4 x i64>, <4 x i64>* %y
%k = shl <4 x i64> %x, %y1
ret <4 x i64> %k
}
@@ -180,7 +180,7 @@ define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
; CHECK: vpsrlvd (%
; CHECK: ret
define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
- %y1 = load <4 x i32>* %y
+ %y1 = load <4 x i32>, <4 x i32>* %y
%k = lshr <4 x i32> %x, %y1
ret <4 x i32> %k
}
@@ -188,7 +188,7 @@ define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
; CHECK: vpsrlvd (%
; CHECK: ret
define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
- %y1 = load <8 x i32>* %y
+ %y1 = load <8 x i32>, <8 x i32>* %y
%k = lshr <8 x i32> %x, %y1
ret <8 x i32> %k
}
@@ -196,7 +196,7 @@ define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
; CHECK: vpsrlvq (%
; CHECK: ret
define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
- %y1 = load <2 x i64>* %y
+ %y1 = load <2 x i64>, <2 x i64>* %y
%k = lshr <2 x i64> %x, %y1
ret <2 x i64> %k
}
@@ -204,7 +204,7 @@ define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
; CHECK: vpsrlvq (%
; CHECK: ret
define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
- %y1 = load <4 x i64>* %y
+ %y1 = load <4 x i64>, <4 x i64>* %y
%k = lshr <4 x i64> %x, %y1
ret <4 x i64> %k
}
diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
index 83100a8691f..94dcdcabdd3 100644
--- a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -3,7 +3,7 @@
; CHECK: vpbroadcastb (%
define <16 x i8> @BB16(i8* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i8* %ptr, align 4
+ %q = load i8, i8* %ptr, align 4
%q0 = insertelement <16 x i8> undef, i8 %q, i32 0
%q1 = insertelement <16 x i8> %q0, i8 %q, i32 1
%q2 = insertelement <16 x i8> %q1, i8 %q, i32 2
@@ -25,7 +25,7 @@ entry:
; CHECK: vpbroadcastb (%
define <32 x i8> @BB32(i8* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i8* %ptr, align 4
+ %q = load i8, i8* %ptr, align 4
%q0 = insertelement <32 x i8> undef, i8 %q, i32 0
%q1 = insertelement <32 x i8> %q0, i8 %q, i32 1
%q2 = insertelement <32 x i8> %q1, i8 %q, i32 2
@@ -65,7 +65,7 @@ entry:
define <8 x i16> @W16(i16* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i16* %ptr, align 4
+ %q = load i16, i16* %ptr, align 4
%q0 = insertelement <8 x i16> undef, i16 %q, i32 0
%q1 = insertelement <8 x i16> %q0, i16 %q, i32 1
%q2 = insertelement <8 x i16> %q1, i16 %q, i32 2
@@ -79,7 +79,7 @@ entry:
; CHECK: vpbroadcastw (%
define <16 x i16> @WW16(i16* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i16* %ptr, align 4
+ %q = load i16, i16* %ptr, align 4
%q0 = insertelement <16 x i16> undef, i16 %q, i32 0
%q1 = insertelement <16 x i16> %q0, i16 %q, i32 1
%q2 = insertelement <16 x i16> %q1, i16 %q, i32 2
@@ -101,7 +101,7 @@ entry:
; CHECK: vbroadcastss (%
define <4 x i32> @D32(i32* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i32* %ptr, align 4
+ %q = load i32, i32* %ptr, align 4
%q0 = insertelement <4 x i32> undef, i32 %q, i32 0
%q1 = insertelement <4 x i32> %q0, i32 %q, i32 1
%q2 = insertelement <4 x i32> %q1, i32 %q, i32 2
@@ -111,7 +111,7 @@ entry:
; CHECK: vbroadcastss (%
define <8 x i32> @DD32(i32* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i32* %ptr, align 4
+ %q = load i32, i32* %ptr, align 4
%q0 = insertelement <8 x i32> undef, i32 %q, i32 0
%q1 = insertelement <8 x i32> %q0, i32 %q, i32 1
%q2 = insertelement <8 x i32> %q1, i32 %q, i32 2
@@ -125,7 +125,7 @@ entry:
; CHECK: vpbroadcastq (%
define <2 x i64> @Q64(i64* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i64* %ptr, align 4
+ %q = load i64, i64* %ptr, align 4
%q0 = insertelement <2 x i64> undef, i64 %q, i32 0
%q1 = insertelement <2 x i64> %q0, i64 %q, i32 1
ret <2 x i64> %q1
@@ -133,7 +133,7 @@ entry:
; CHECK: vbroadcastsd (%
define <4 x i64> @QQ64(i64* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load i64* %ptr, align 4
+ %q = load i64, i64* %ptr, align 4
%q0 = insertelement <4 x i64> undef, i64 %q, i32 0
%q1 = insertelement <4 x i64> %q0, i64 %q, i32 1
%q2 = insertelement <4 x i64> %q1, i64 %q, i32 2
@@ -145,7 +145,7 @@ entry:
; this used to crash
define <2 x double> @I(double* %ptr) nounwind uwtable readnone ssp {
entry:
- %q = load double* %ptr, align 4
+ %q = load double, double* %ptr, align 4
%vecinit.i = insertelement <2 x double> undef, double %q, i32 0
%vecinit2.i = insertelement <2 x double> %vecinit.i, double %q, i32 1
ret <2 x double> %vecinit2.i
@@ -431,8 +431,8 @@ eintry:
%__b.addr.i = alloca <2 x i64>, align 16
%vCr = alloca <2 x i64>, align 16
store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
- %tmp = load <2 x i64>* %vCr, align 16
- %tmp2 = load i8* %cV_R.addr, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %vCr, align 16
+ %tmp2 = load i8, i8* %cV_R.addr, align 4
%splat.splatinsert = insertelement <16 x i8> undef, i8 %tmp2, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
%tmp3 = bitcast <16 x i8> %splat.splat to <2 x i64>
@@ -450,8 +450,8 @@ eintry:
%__b.addr.i = alloca <4 x i64>, align 16
%vCr = alloca <4 x i64>, align 16
store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
- %tmp = load <4 x i64>* %vCr, align 16
- %tmp2 = load i8* %cV_R.addr, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %vCr, align 16
+ %tmp2 = load i8, i8* %cV_R.addr, align 4
%splat.splatinsert = insertelement <32 x i8> undef, i8 %tmp2, i32 0
%splat.splat = shufflevector <32 x i8> %splat.splatinsert, <32 x i8> undef, <32 x i32> zeroinitializer
%tmp3 = bitcast <32 x i8> %splat.splat to <4 x i64>
@@ -469,8 +469,8 @@ entry:
%__b.addr.i = alloca <2 x i64>, align 16
%vCr = alloca <2 x i64>, align 16
store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
- %tmp = load <2 x i64>* %vCr, align 16
- %tmp2 = load i16* %cV_R.addr, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %vCr, align 16
+ %tmp2 = load i16, i16* %cV_R.addr, align 4
%splat.splatinsert = insertelement <8 x i16> undef, i16 %tmp2, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
%tmp3 = bitcast <8 x i16> %splat.splat to <2 x i64>
@@ -488,8 +488,8 @@ eintry:
%__b.addr.i = alloca <4 x i64>, align 16
%vCr = alloca <4 x i64>, align 16
store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
- %tmp = load <4 x i64>* %vCr, align 16
- %tmp2 = load i16* %cV_R.addr, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %vCr, align 16
+ %tmp2 = load i16, i16* %cV_R.addr, align 4
%splat.splatinsert = insertelement <16 x i16> undef, i16 %tmp2, i32 0
%splat.splat = shufflevector <16 x i16> %splat.splatinsert, <16 x i16> undef, <16 x i32> zeroinitializer
%tmp3 = bitcast <16 x i16> %splat.splat to <4 x i64>
@@ -507,8 +507,8 @@ entry:
%__b.addr.i = alloca <2 x i64>, align 16
%vCr = alloca <2 x i64>, align 16
store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
- %tmp = load <2 x i64>* %vCr, align 16
- %tmp2 = load i32* %cV_R.addr, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %vCr, align 16
+ %tmp2 = load i32, i32* %cV_R.addr, align 4
%splat.splatinsert = insertelement <4 x i32> undef, i32 %tmp2, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
%tmp3 = bitcast <4 x i32> %splat.splat to <2 x i64>
@@ -526,8 +526,8 @@ eintry:
%__b.addr.i = alloca <4 x i64>, align 16
%vCr = alloca <4 x i64>, align 16
store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
- %tmp = load <4 x i64>* %vCr, align 16
- %tmp2 = load i32* %cV_R.addr, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %vCr, align 16
+ %tmp2 = load i32, i32* %cV_R.addr, align 4
%splat.splatinsert = insertelement <8 x i32> undef, i32 %tmp2, i32 0
%splat.splat = shufflevector <8 x i32> %splat.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer
%tmp3 = bitcast <8 x i32> %splat.splat to <4 x i64>
@@ -545,8 +545,8 @@ entry:
%__b.addr.i = alloca <2 x i64>, align 16
%vCr = alloca <2 x i64>, align 16
store <2 x i64> zeroinitializer, <2 x i64>* %vCr, align 16
- %tmp = load <2 x i64>* %vCr, align 16
- %tmp2 = load i64* %cV_R.addr, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %vCr, align 16
+ %tmp2 = load i64, i64* %cV_R.addr, align 4
%splat.splatinsert = insertelement <2 x i64> undef, i64 %tmp2, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
store <2 x i64> %tmp, <2 x i64>* %__a.addr.i, align 16
@@ -563,8 +563,8 @@ eintry:
%__b.addr.i = alloca <4 x i64>, align 16
%vCr = alloca <4 x i64>, align 16
store <4 x i64> zeroinitializer, <4 x i64>* %vCr, align 16
- %tmp = load <4 x i64>* %vCr, align 16
- %tmp2 = load i64* %cV_R.addr, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %vCr, align 16
+ %tmp2 = load i64, i64* %cV_R.addr, align 4
%splat.splatinsert = insertelement <4 x i64> undef, i64 %tmp2, i32 0
%splat.splat = shufflevector <4 x i64> %splat.splatinsert, <4 x i64> undef, <4 x i32> zeroinitializer
store <4 x i64> %tmp, <4 x i64>* %__a.addr.i, align 16
diff --git a/llvm/test/CodeGen/X86/avx512-arith.ll b/llvm/test/CodeGen/X86/avx512-arith.ll
index 94b08215b89..1ecd1007905 100644
--- a/llvm/test/CodeGen/X86/avx512-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512-arith.ll
@@ -56,7 +56,7 @@ define <8 x double> @subpd512fold(<8 x double> %y, <8 x double>* %x) {
; CHECK-NEXT: vsubpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
- %tmp2 = load <8 x double>* %x, align 8
+ %tmp2 = load <8 x double>, <8 x double>* %x, align 8
%sub.i = fsub <8 x double> %y, %tmp2
ret <8 x double> %sub.i
}
@@ -77,7 +77,7 @@ define <16 x float> @subps512fold(<16 x float> %y, <16 x float>* %x) {
; CHECK-NEXT: vsubps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
- %tmp2 = load <16 x float>* %x, align 4
+ %tmp2 = load <16 x float>, <16 x float>* %x, align 4
%sub.i = fsub <16 x float> %y, %tmp2
ret <16 x float> %sub.i
}
@@ -193,7 +193,7 @@ define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
- %tmp = load <8 x i64>* %j, align 4
+ %tmp = load <8 x i64>, <8 x i64>* %j, align 4
%x = add <8 x i64> %i, %tmp
ret <8 x i64> %x
}
@@ -212,7 +212,7 @@ define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
- %tmp = load i64* %j
+ %tmp = load i64, i64* %j
%j.0 = insertelement <8 x i64> undef, i64 %tmp, i32 0
%j.1 = insertelement <8 x i64> %j.0, i64 %tmp, i32 1
%j.2 = insertelement <8 x i64> %j.1, i64 %tmp, i32 2
@@ -239,7 +239,7 @@ define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
- %tmp = load <16 x i32>* %j, align 4
+ %tmp = load <16 x i32>, <16 x i32>* %j, align 4
%x = add <16 x i32> %i, %tmp
ret <16 x i32> %x
}
@@ -287,7 +287,7 @@ define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %j = load <16 x i32>* %j.ptr
+ %j = load <16 x i32>, <16 x i32>* %j.ptr
%x = add <16 x i32> %i, %j
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i
ret <16 x i32> %r
@@ -314,7 +314,7 @@ define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %j = load <16 x i32>* %j.ptr
+ %j = load <16 x i32>, <16 x i32>* %j.ptr
%x = add <16 x i32> %i, %j
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer
ret <16 x i32> %r
@@ -445,7 +445,7 @@ define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
; CHECK-NEXT: vpandd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
- %a = load <16 x i32>* %x, align 4
+ %a = load <16 x i32>, <16 x i32>* %x, align 4
%b = and <16 x i32> %y, %a
ret <16 x i32> %b
}
@@ -456,7 +456,7 @@ define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
; CHECK-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
- %a = load i64* %ap, align 8
+ %a = load i64, i64* %ap, align 8
%b = insertelement <8 x i64> undef, i64 %a, i32 0
%c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
%d = and <8 x i64> %p1, %c
@@ -593,7 +593,7 @@ define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
<8 x double>* %j, <8 x i64> %mask1)
nounwind {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
- %tmp = load <8 x double>* %j, align 8
+ %tmp = load <8 x double>, <8 x double>* %j, align 8
%x = fadd <8 x double> %i, %tmp
%r = select <8 x i1> %mask, <8 x double> %x, <8 x double> %dst
ret <8 x double> %r
@@ -605,7 +605,7 @@ define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
<8 x i64> %mask1) nounwind {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
- %tmp = load <8 x double>* %j, align 8
+ %tmp = load <8 x double>, <8 x double>* %j, align 8
%x = fadd <8 x double> %i, %tmp
%r = select <8 x i1> %mask, <8 x double> %x, <8 x double> zeroinitializer
ret <8 x double> %r
@@ -615,7 +615,7 @@ define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
; CHECK: vaddpd (%rdi){1to8}, %zmm{{.*}}
; CHECK: ret
define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <8 x double> undef, double %tmp, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef,
<8 x i32> zeroinitializer
@@ -629,7 +629,7 @@ define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind
define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i,
double* %j, <8 x i64> %mask1) nounwind {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <8 x double> undef, double %tmp, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef,
<8 x i32> zeroinitializer
@@ -644,7 +644,7 @@ define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double>
define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
<8 x i64> %mask1) nounwind {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <8 x double> undef, double %tmp, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef,
<8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/X86/avx512-build-vector.ll b/llvm/test/CodeGen/X86/avx512-build-vector.ll
index 9e9ad31c916..8373c6da261 100644
--- a/llvm/test/CodeGen/X86/avx512-build-vector.ll
+++ b/llvm/test/CodeGen/X86/avx512-build-vector.ll
@@ -9,7 +9,7 @@ define <16 x i32> @test1(i32* %x) {
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4],ymm1[5,6,7]
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; CHECK-NEXT: retq
- %y = load i32* %x, align 4
+ %y = load i32, i32* %x, align 4
%res = insertelement <16 x i32>zeroinitializer, i32 %y, i32 4
ret <16 x i32>%res
}
diff --git a/llvm/test/CodeGen/X86/avx512-cvt.ll b/llvm/test/CodeGen/X86/avx512-cvt.ll
index 2b672a72d53..842b9f8494b 100644
--- a/llvm/test/CodeGen/X86/avx512-cvt.ll
+++ b/llvm/test/CodeGen/X86/avx512-cvt.ll
@@ -87,7 +87,7 @@ define <8 x double> @fpext00(<8 x float> %b) nounwind {
; CHECK: ret
define double @funcA(i64* nocapture %e) {
entry:
- %tmp1 = load i64* %e, align 8
+ %tmp1 = load i64, i64* %e, align 8
%conv = sitofp i64 %tmp1 to double
ret double %conv
}
@@ -97,7 +97,7 @@ entry:
; CHECK: ret
define double @funcB(i32* %e) {
entry:
- %tmp1 = load i32* %e, align 4
+ %tmp1 = load i32, i32* %e, align 4
%conv = sitofp i32 %tmp1 to double
ret double %conv
}
@@ -107,7 +107,7 @@ entry:
; CHECK: ret
define float @funcC(i32* %e) {
entry:
- %tmp1 = load i32* %e, align 4
+ %tmp1 = load i32, i32* %e, align 4
%conv = sitofp i32 %tmp1 to float
ret float %conv
}
@@ -117,7 +117,7 @@ entry:
; CHECK: ret
define float @i64tof32(i64* %e) {
entry:
- %tmp1 = load i64* %e, align 8
+ %tmp1 = load i64, i64* %e, align 8
%conv = sitofp i64 %tmp1 to float
ret float %conv
}
@@ -129,7 +129,7 @@ define void @fpext() {
entry:
%f = alloca float, align 4
%d = alloca double, align 8
- %tmp = load float* %f, align 4
+ %tmp = load float, float* %f, align 4
%conv = fpext float %tmp to double
store double %conv, double* %d, align 8
ret void
@@ -144,7 +144,7 @@ define void @fpround_scalar() nounwind uwtable {
entry:
%f = alloca float, align 4
%d = alloca double, align 8
- %tmp = load double* %d, align 8
+ %tmp = load double, double* %d, align 8
%conv = fptrunc double %tmp to float
store float %conv, float* %f, align 4
ret void
diff --git a/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index 20bf7e4a16e..0e32a1c2806 100644
--- a/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/llvm/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -170,7 +170,7 @@ define <8 x float> @gather_mask_qps_execdomain(<8 x i64> %ind, <8 x float> %src,
;CHECK: vscatterdpd
;CHECK: ret
define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = load <8 x double>* %src, align 64
+ %x = load <8 x double>, <8 x double>* %src, align 64
call void @llvm.x86.avx512.scatter.dpd.512 (i8* %stbuf, i8 %mask, <8 x i32>%ind, <8 x double> %x, i32 4)
ret void
}
@@ -180,7 +180,7 @@ define void @scatter_mask_dpd_execdomain(<8 x i32> %ind, <8 x double>* %src, i8
;CHECK: vscatterqpd
;CHECK: ret
define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = load <8 x double>* %src, align 64
+ %x = load <8 x double>, <8 x double>* %src, align 64
call void @llvm.x86.avx512.scatter.qpd.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x double> %x, i32 4)
ret void
}
@@ -190,7 +190,7 @@ define void @scatter_mask_qpd_execdomain(<8 x i64> %ind, <8 x double>* %src, i8
;CHECK: vscatterdps
;CHECK: ret
define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i16 %mask, i8* %base, i8* %stbuf) {
- %x = load <16 x float>* %src, align 64
+ %x = load <16 x float>, <16 x float>* %src, align 64
call void @llvm.x86.avx512.scatter.dps.512 (i8* %stbuf, i16 %mask, <16 x i32>%ind, <16 x float> %x, i32 4)
ret void
}
@@ -200,7 +200,7 @@ define void @scatter_mask_dps_execdomain(<16 x i32> %ind, <16 x float>* %src, i1
;CHECK: vscatterqps
;CHECK: ret
define void @scatter_mask_qps_execdomain(<8 x i64> %ind, <8 x float>* %src, i8 %mask, i8* %base, i8* %stbuf) {
- %x = load <8 x float>* %src, align 32
+ %x = load <8 x float>, <8 x float>* %src, align 32
call void @llvm.x86.avx512.scatter.qps.512 (i8* %stbuf, i8 %mask, <8 x i64>%ind, <8 x float> %x, i32 4)
ret void
}
diff --git a/llvm/test/CodeGen/X86/avx512-i1test.ll b/llvm/test/CodeGen/X86/avx512-i1test.ll
index a2377380f0d..ba2f49b7942 100644
--- a/llvm/test/CodeGen/X86/avx512-i1test.ll
+++ b/llvm/test/CodeGen/X86/avx512-i1test.ll
@@ -18,7 +18,7 @@ bb56: ; preds = %L_10
br label %bb33
bb33: ; preds = %bb51, %bb56
- %r111 = load i64* undef, align 8
+ %r111 = load i64, i64* undef, align 8
br i1 undef, label %bb51, label %bb35
bb35: ; preds = %bb33
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index d6b887e4c5c..6498b20d803 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -6,7 +6,7 @@
;CHECK: vinsertf32x4
;CHECK: ret
define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
- %rrr = load float* %br
+ %rrr = load float, float* %br
%rrr2 = insertelement <16 x float> %x, float %rrr, i32 1
%rrr3 = insertelement <16 x float> %rrr2, float %y, i32 14
ret <16 x float> %rrr3
@@ -20,7 +20,7 @@ define <16 x float> @test1(<16 x float> %x, float* %br, float %y) nounwind {
;SKX: vinsertf64x2 $3
;CHECK: ret
define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
- %rrr = load double* %br
+ %rrr = load double, double* %br
%rrr2 = insertelement <8 x double> %x, double %rrr, i32 1
%rrr3 = insertelement <8 x double> %rrr2, double %y, i32 6
ret <8 x double> %rrr3
@@ -171,7 +171,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
;CHECK: kmovw
;CHECK: ret
define i16 @test15(i1 *%addr) {
- %x = load i1 * %addr, align 128
+ %x = load i1 , i1 * %addr, align 128
%x1 = insertelement <16 x i1> undef, i1 %x, i32 10
%x2 = bitcast <16 x i1>%x1 to i16
ret i16 %x2
@@ -183,7 +183,7 @@ define i16 @test15(i1 *%addr) {
;CHECK: korw
;CHECK: ret
define i16 @test16(i1 *%addr, i16 %a) {
- %x = load i1 * %addr, align 128
+ %x = load i1 , i1 * %addr, align 128
%a1 = bitcast i16 %a to <16 x i1>
%x1 = insertelement <16 x i1> %a1, i1 %x, i32 10
%x2 = bitcast <16 x i1>%x1 to i16
@@ -199,7 +199,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
;SKX: korb
;CHECK: ret
define i8 @test17(i1 *%addr, i8 %a) {
- %x = load i1 * %addr, align 128
+ %x = load i1 , i1 * %addr, align 128
%a1 = bitcast i8 %a to <8 x i1>
%x1 = insertelement <8 x i1> %a1, i1 %x, i32 4
%x2 = bitcast <8 x i1>%x1 to i8
diff --git a/llvm/test/CodeGen/X86/avx512-intel-ocl.ll b/llvm/test/CodeGen/X86/avx512-intel-ocl.ll
index 3f2691ba299..2e1b27e4aec 100644
--- a/llvm/test/CodeGen/X86/avx512-intel-ocl.ll
+++ b/llvm/test/CodeGen/X86/avx512-intel-ocl.ll
@@ -30,7 +30,7 @@ define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %2, %1
ret <16 x float> %3
}
@@ -53,7 +53,7 @@ define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %1, %b
%4 = fadd <16 x float> %2, %3
ret <16 x float> %4
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index b6375c1f618..46581f79af0 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -356,7 +356,7 @@ define <8 x double> @test_x86_mask_blend_pd_512(i8 %a0, <8 x double> %a1, <8 x d
define <8 x double> @test_x86_mask_blend_pd_512_memop(<8 x double> %a, <8 x double>* %ptr, i8 %mask) {
; CHECK-LABEL: test_x86_mask_blend_pd_512_memop
; CHECK: vblendmpd (%
- %b = load <8 x double>* %ptr
+ %b = load <8 x double>, <8 x double>* %ptr
%res = call <8 x double> @llvm.x86.avx512.mask.blend.pd.512(<8 x double> %a, <8 x double> %b, i8 %mask) ; <<8 x double>> [#uses=1]
ret <8 x double> %res
}
@@ -1435,7 +1435,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64>, <8 x i64>, <8 x i64>,
define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, <8 x i64>* %ptr) {
; CHECK-LABEL: test_x86_avx512_psrlv_q_memop
; CHECK: vpsrlvq (%
- %b = load <8 x i64>* %ptr
+ %b = load <8 x i64>, <8 x i64>* %ptr
%res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
ret <8 x i64> %res
}
diff --git a/llvm/test/CodeGen/X86/avx512-logic.ll b/llvm/test/CodeGen/X86/avx512-logic.ll
index bee4f52b321..140ce3b1ec5 100644
--- a/llvm/test/CodeGen/X86/avx512-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512-logic.ll
@@ -83,7 +83,7 @@ define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
; CHECK: ret
define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
entry:
- %a = load <16 x i32>* %x, align 4
+ %a = load <16 x i32>, <16 x i32>* %x, align 4
%b = and <16 x i32> %y, %a
ret <16 x i32> %b
}
@@ -93,7 +93,7 @@ entry:
; CHECK: ret
define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
entry:
- %a = load i64* %ap, align 8
+ %a = load i64, i64* %ap, align 8
%b = insertelement <8 x i64> undef, i64 %a, i32 0
%c = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
%d = and <8 x i64> %p1, %c
diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll
index 264d91503ac..c4e62517eb0 100644
--- a/llvm/test/CodeGen/X86/avx512-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll
@@ -34,7 +34,7 @@ define i8 @mask8(i8 %x) {
; CHECK: ret
define void @mask16_mem(i16* %ptr) {
- %x = load i16* %ptr, align 4
+ %x = load i16, i16* %ptr, align 4
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <16 x i1> %m1 to i16
@@ -51,7 +51,7 @@ define void @mask16_mem(i16* %ptr) {
; SKX-NEXT: kmovb %k{{[0-7]}}, ([[ARG1]])
define void @mask8_mem(i8* %ptr) {
- %x = load i8* %ptr, align 4
+ %x = load i8, i8* %ptr, align 4
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <8 x i1> %m1 to i8
@@ -128,7 +128,7 @@ entry:
%maskPtr = alloca <8 x i1>
store <8 x i1> <i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1>* %maskPtr
- %mask = load <8 x i1>* %maskPtr
+ %mask = load <8 x i1>, <8 x i1>* %maskPtr
%mask_convert = bitcast <8 x i1> %mask to i8
ret i8 %mask_convert
} \ No newline at end of file
diff --git a/llvm/test/CodeGen/X86/avx512-mov.ll b/llvm/test/CodeGen/X86/avx512-mov.ll
index 93875e839e2..0cd8458f73f 100644
--- a/llvm/test/CodeGen/X86/avx512-mov.ll
+++ b/llvm/test/CodeGen/X86/avx512-mov.ll
@@ -28,7 +28,7 @@ define <2 x i64> @test3(i64 %x) {
; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test4(i32* %x) {
- %y = load i32* %x
+ %y = load i32, i32* %x
%res = insertelement <4 x i32>undef, i32 %y, i32 0
ret <4 x i32>%res
}
@@ -53,7 +53,7 @@ define void @test6(double %x, double* %y) {
; CHECK: vmovss (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define float @test7(i32* %x) {
- %y = load i32* %x
+ %y = load i32, i32* %x
%res = bitcast i32 %y to float
ret float %res
}
@@ -78,7 +78,7 @@ define i64 @test9(<2 x i64> %x) {
; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test10(i32* %x) {
- %y = load i32* %x, align 4
+ %y = load i32, i32* %x, align 4
%res = insertelement <4 x i32>zeroinitializer, i32 %y, i32 0
ret <4 x i32>%res
}
@@ -87,7 +87,7 @@ define <4 x i32> @test10(i32* %x) {
; CHECK: vmovss (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x float> @test11(float* %x) {
- %y = load float* %x, align 4
+ %y = load float, float* %x, align 4
%res = insertelement <4 x float>zeroinitializer, float %y, i32 0
ret <4 x float>%res
}
@@ -96,7 +96,7 @@ define <4 x float> @test11(float* %x) {
; CHECK: vmovsd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <2 x double> @test12(double* %x) {
- %y = load double* %x, align 8
+ %y = load double, double* %x, align 8
%res = insertelement <2 x double>zeroinitializer, double %y, i32 0
ret <2 x double>%res
}
@@ -121,7 +121,7 @@ define <4 x i32> @test14(i32 %x) {
; CHECK: vmovd (%rdi), %xmm0 ## encoding: [0x62
; CHECK: ret
define <4 x i32> @test15(i32* %x) {
- %y = load i32* %x, align 4
+ %y = load i32, i32* %x, align 4
%res = insertelement <4 x i32>zeroinitializer, i32 %y, i32 0
ret <4 x i32>%res
}
@@ -131,7 +131,7 @@ define <4 x i32> @test15(i32* %x) {
; CHECK: ret
define <16 x i32> @test16(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x i32>*
- %res = load <16 x i32>* %vaddr, align 1
+ %res = load <16 x i32>, <16 x i32>* %vaddr, align 1
ret <16 x i32>%res
}
@@ -140,7 +140,7 @@ define <16 x i32> @test16(i8 * %addr) {
; CHECK: ret
define <16 x i32> @test17(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x i32>*
- %res = load <16 x i32>* %vaddr, align 64
+ %res = load <16 x i32>, <16 x i32>* %vaddr, align 64
ret <16 x i32>%res
}
@@ -176,7 +176,7 @@ define void @test20(i8 * %addr, <16 x i32> %data) {
; CHECK: ret
define <8 x i64> @test21(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i64>*
- %res = load <8 x i64>* %vaddr, align 64
+ %res = load <8 x i64>, <8 x i64>* %vaddr, align 64
ret <8 x i64>%res
}
@@ -194,7 +194,7 @@ define void @test22(i8 * %addr, <8 x i64> %data) {
; CHECK: ret
define <8 x i64> @test23(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i64>*
- %res = load <8 x i64>* %vaddr, align 1
+ %res = load <8 x i64>, <8 x i64>* %vaddr, align 1
ret <8 x i64>%res
}
@@ -212,7 +212,7 @@ define void @test24(i8 * %addr, <8 x double> %data) {
; CHECK: ret
define <8 x double> @test25(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x double>*
- %res = load <8 x double>* %vaddr, align 64
+ %res = load <8 x double>, <8 x double>* %vaddr, align 64
ret <8 x double>%res
}
@@ -230,7 +230,7 @@ define void @test26(i8 * %addr, <16 x float> %data) {
; CHECK: ret
define <16 x float> @test27(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x float>*
- %res = load <16 x float>* %vaddr, align 64
+ %res = load <16 x float>, <16 x float>* %vaddr, align 64
ret <16 x float>%res
}
@@ -248,7 +248,7 @@ define void @test28(i8 * %addr, <8 x double> %data) {
; CHECK: ret
define <8 x double> @test29(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x double>*
- %res = load <8 x double>* %vaddr, align 1
+ %res = load <8 x double>, <8 x double>* %vaddr, align 1
ret <8 x double>%res
}
@@ -266,7 +266,7 @@ define void @test30(i8 * %addr, <16 x float> %data) {
; CHECK: ret
define <16 x float> @test31(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x float>*
- %res = load <16 x float>* %vaddr, align 1
+ %res = load <16 x float>, <16 x float>* %vaddr, align 1
ret <16 x float>%res
}
@@ -276,7 +276,7 @@ define <16 x float> @test31(i8 * %addr) {
define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
- %r = load <16 x i32>* %vaddr, align 64
+ %r = load <16 x i32>, <16 x i32>* %vaddr, align 64
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> %old
ret <16 x i32>%res
}
@@ -287,7 +287,7 @@ define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
- %r = load <16 x i32>* %vaddr, align 1
+ %r = load <16 x i32>, <16 x i32>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> %old
ret <16 x i32>%res
}
@@ -298,7 +298,7 @@ define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
define <16 x i32> @test34(i8 * %addr, <16 x i32> %mask1) {
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
- %r = load <16 x i32>* %vaddr, align 64
+ %r = load <16 x i32>, <16 x i32>* %vaddr, align 64
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> zeroinitializer
ret <16 x i32>%res
}
@@ -309,7 +309,7 @@ define <16 x i32> @test34(i8 * %addr, <16 x i32> %mask1) {
define <16 x i32> @test35(i8 * %addr, <16 x i32> %mask1) {
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
- %r = load <16 x i32>* %vaddr, align 1
+ %r = load <16 x i32>, <16 x i32>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i32> %r, <16 x i32> zeroinitializer
ret <16 x i32>%res
}
@@ -320,7 +320,7 @@ define <16 x i32> @test35(i8 * %addr, <16 x i32> %mask1) {
define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
- %r = load <8 x i64>* %vaddr, align 64
+ %r = load <8 x i64>, <8 x i64>* %vaddr, align 64
%res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> %old
ret <8 x i64>%res
}
@@ -331,7 +331,7 @@ define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
- %r = load <8 x i64>* %vaddr, align 1
+ %r = load <8 x i64>, <8 x i64>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> %old
ret <8 x i64>%res
}
@@ -342,7 +342,7 @@ define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
define <8 x i64> @test38(i8 * %addr, <8 x i64> %mask1) {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
- %r = load <8 x i64>* %vaddr, align 64
+ %r = load <8 x i64>, <8 x i64>* %vaddr, align 64
%res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> zeroinitializer
ret <8 x i64>%res
}
@@ -353,7 +353,7 @@ define <8 x i64> @test38(i8 * %addr, <8 x i64> %mask1) {
define <8 x i64> @test39(i8 * %addr, <8 x i64> %mask1) {
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
- %r = load <8 x i64>* %vaddr, align 1
+ %r = load <8 x i64>, <8 x i64>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i64> %r, <8 x i64> zeroinitializer
ret <8 x i64>%res
}
@@ -364,7 +364,7 @@ define <8 x i64> @test39(i8 * %addr, <8 x i64> %mask1) {
define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
- %r = load <16 x float>* %vaddr, align 64
+ %r = load <16 x float>, <16 x float>* %vaddr, align 64
%res = select <16 x i1> %mask, <16 x float> %r, <16 x float> %old
ret <16 x float>%res
}
@@ -375,7 +375,7 @@ define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1)
define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1) {
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
- %r = load <16 x float>* %vaddr, align 1
+ %r = load <16 x float>, <16 x float>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x float> %r, <16 x float> %old
ret <16 x float>%res
}
@@ -386,7 +386,7 @@ define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1)
define <16 x float> @test42(i8 * %addr, <16 x float> %mask1) {
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
- %r = load <16 x float>* %vaddr, align 64
+ %r = load <16 x float>, <16 x float>* %vaddr, align 64
%res = select <16 x i1> %mask, <16 x float> %r, <16 x float> zeroinitializer
ret <16 x float>%res
}
@@ -397,7 +397,7 @@ define <16 x float> @test42(i8 * %addr, <16 x float> %mask1) {
define <16 x float> @test43(i8 * %addr, <16 x float> %mask1) {
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
- %r = load <16 x float>* %vaddr, align 1
+ %r = load <16 x float>, <16 x float>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x float> %r, <16 x float> zeroinitializer
ret <16 x float>%res
}
@@ -408,7 +408,7 @@ define <16 x float> @test43(i8 * %addr, <16 x float> %mask1) {
define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
- %r = load <8 x double>* %vaddr, align 64
+ %r = load <8 x double>, <8 x double>* %vaddr, align 64
%res = select <8 x i1> %mask, <8 x double> %r, <8 x double> %old
ret <8 x double>%res
}
@@ -419,7 +419,7 @@ define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1)
define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1) {
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
- %r = load <8 x double>* %vaddr, align 1
+ %r = load <8 x double>, <8 x double>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x double> %r, <8 x double> %old
ret <8 x double>%res
}
@@ -430,7 +430,7 @@ define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1)
define <8 x double> @test46(i8 * %addr, <8 x double> %mask1) {
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
- %r = load <8 x double>* %vaddr, align 64
+ %r = load <8 x double>, <8 x double>* %vaddr, align 64
%res = select <8 x i1> %mask, <8 x double> %r, <8 x double> zeroinitializer
ret <8 x double>%res
}
@@ -441,7 +441,7 @@ define <8 x double> @test46(i8 * %addr, <8 x double> %mask1) {
define <8 x double> @test47(i8 * %addr, <8 x double> %mask1) {
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
- %r = load <8 x double>* %vaddr, align 1
+ %r = load <8 x double>, <8 x double>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x double> %r, <8 x double> zeroinitializer
ret <8 x double>%res
}
diff --git a/llvm/test/CodeGen/X86/avx512-round.ll b/llvm/test/CodeGen/X86/avx512-round.ll
index ffeb2a85e91..c4f417e75ab 100644
--- a/llvm/test/CodeGen/X86/avx512-round.ll
+++ b/llvm/test/CodeGen/X86/avx512-round.ll
@@ -99,7 +99,7 @@ declare float @llvm.floor.f32(float %p)
define float @floor_f32m(float* %aptr) {
; CHECK-LABEL: floor_f32m
; CHECK: vrndscaless $1, (%rdi), {{.*}}encoding: [0x62,0xf3,0x7d,0x08,0x0a,0x07,0x01]
- %a = load float* %aptr, align 4
+ %a = load float, float* %aptr, align 4
%res = call float @llvm.floor.f32(float %a)
ret float %res
}
diff --git a/llvm/test/CodeGen/X86/avx512-shift.ll b/llvm/test/CodeGen/X86/avx512-shift.ll
index 8cdcf8ad062..0636cd242f8 100644
--- a/llvm/test/CodeGen/X86/avx512-shift.ll
+++ b/llvm/test/CodeGen/X86/avx512-shift.ll
@@ -76,7 +76,7 @@ define <8 x i64> @variable_sra2(<8 x i64> %x, <8 x i64> %y) {
; CHECK: vpsravd (%
; CHECK: ret
define <16 x i32> @variable_sra01_load(<16 x i32> %x, <16 x i32>* %y) {
- %y1 = load <16 x i32>* %y
+ %y1 = load <16 x i32>, <16 x i32>* %y
%k = ashr <16 x i32> %x, %y1
ret <16 x i32> %k
}
@@ -85,7 +85,7 @@ define <16 x i32> @variable_sra01_load(<16 x i32> %x, <16 x i32>* %y) {
; CHECK: vpsllvd (%
; CHECK: ret
define <16 x i32> @variable_shl1_load(<16 x i32> %x, <16 x i32>* %y) {
- %y1 = load <16 x i32>* %y
+ %y1 = load <16 x i32>, <16 x i32>* %y
%k = shl <16 x i32> %x, %y1
ret <16 x i32> %k
}
@@ -93,7 +93,7 @@ define <16 x i32> @variable_shl1_load(<16 x i32> %x, <16 x i32>* %y) {
; CHECK: vpsrlvd (%
; CHECK: ret
define <16 x i32> @variable_srl0_load(<16 x i32> %x, <16 x i32>* %y) {
- %y1 = load <16 x i32>* %y
+ %y1 = load <16 x i32>, <16 x i32>* %y
%k = lshr <16 x i32> %x, %y1
ret <16 x i32> %k
}
@@ -102,7 +102,7 @@ define <16 x i32> @variable_srl0_load(<16 x i32> %x, <16 x i32>* %y) {
; CHECK: vpsrlvq (%
; CHECK: ret
define <8 x i64> @variable_srl3_load(<8 x i64> %x, <8 x i64>* %y) {
- %y1 = load <8 x i64>* %y
+ %y1 = load <8 x i64>, <8 x i64>* %y
%k = lshr <8 x i64> %x, %y1
ret <8 x i64> %k
}
diff --git a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
index 5bb82338d08..cc81d682079 100644
--- a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -64,7 +64,7 @@ define <16 x float> @_ss16xfloat_maskz(float %a, <16 x i32> %mask1) {
;CHECK: vbroadcastss (%{{.*}}, %zmm
;CHECK: ret
define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
- %a = load float* %a.ptr
+ %a = load float, float* %a.ptr
%b = insertelement <16 x float> undef, float %a, i32 0
%c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
ret <16 x float> %c
@@ -74,7 +74,7 @@ define <16 x float> @_ss16xfloat_load(float* %a.ptr) {
;CHECK: vbroadcastss (%rdi), %zmm0 {%k1}
;CHECK: ret
define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16 x i32> %mask1) {
- %a = load float* %a.ptr
+ %a = load float, float* %a.ptr
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%b = insertelement <16 x float> undef, float %a, i32 0
%c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
@@ -86,7 +86,7 @@ define <16 x float> @_ss16xfloat_mask_load(float* %a.ptr, <16 x float> %i, <16
;CHECK: vbroadcastss (%rdi), %zmm0 {%k1} {z}
;CHECK: ret
define <16 x float> @_ss16xfloat_maskz_load(float* %a.ptr, <16 x i32> %mask1) {
- %a = load float* %a.ptr
+ %a = load float, float* %a.ptr
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%b = insertelement <16 x float> undef, float %a, i32 0
%c = shufflevector <16 x float> %b, <16 x float> undef, <16 x i32> zeroinitializer
@@ -130,7 +130,7 @@ define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
;CHECK: vbroadcastsd (%rdi), %zmm
;CHECK: ret
define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
- %a = load double* %a.ptr
+ %a = load double, double* %a.ptr
%b = insertelement <8 x double> undef, double %a, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
ret <8 x double> %c
@@ -140,7 +140,7 @@ define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
;CHECK: vbroadcastsd (%rdi), %zmm0 {%k1}
;CHECK: ret
define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
- %a = load double* %a.ptr
+ %a = load double, double* %a.ptr
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%b = insertelement <8 x double> undef, double %a, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
@@ -152,7 +152,7 @@ define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1)
; CHECK-LABEL: _sd8xdouble_maskz_load:
; CHECK: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
; CHECK: ret
- %a = load double* %a.ptr
+ %a = load double, double* %a.ptr
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%b = insertelement <8 x double> undef, double %a, i32 0
%c = shufflevector <8 x double> %b, <8 x double> undef, <8 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index b16f5c9663c..26e2c776b25 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -31,7 +31,7 @@ define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwin
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %y = load <16 x i32>* %yp, align 4
+ %y = load <16 x i32>, <16 x i32>* %yp, align 4
%mask = icmp eq <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
ret <16 x i32> %max
@@ -215,7 +215,7 @@ define <16 x i32> @test17(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %y = load <16 x i32>* %y.ptr, align 4
+ %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sgt <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
ret <16 x i32> %max
@@ -228,7 +228,7 @@ define <16 x i32> @test18(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %y = load <16 x i32>* %y.ptr, align 4
+ %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sle <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
ret <16 x i32> %max
@@ -241,7 +241,7 @@ define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %y = load <16 x i32>* %y.ptr, align 4
+ %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp ule <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
ret <16 x i32> %max
@@ -286,7 +286,7 @@ define <8 x i64> @test22(<8 x i64> %x, <8 x i64>* %y.ptr, <8 x i64> %x1, <8 x i6
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sgt <8 x i64> %x1, %y1
- %y = load <8 x i64>* %y.ptr, align 4
+ %y = load <8 x i64>, <8 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <8 x i64> %x, %y
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %x1
@@ -302,7 +302,7 @@ define <16 x i32> @test23(<16 x i32> %x, <16 x i32>* %y.ptr, <16 x i32> %x1, <16
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i32> %x1, %y1
- %y = load <16 x i32>* %y.ptr, align 4
+ %y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask0 = icmp ule <16 x i32> %x, %y
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
%max = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %x1
@@ -316,7 +316,7 @@ define <8 x i64> @test24(<8 x i64> %x, <8 x i64> %x1, i64* %yb.ptr) nounwind {
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
%mask = icmp eq <8 x i64> %x, %y
@@ -331,7 +331,7 @@ define <16 x i32> @test25(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1) nounwind
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
%mask = icmp sle <16 x i32> %x, %y
@@ -348,7 +348,7 @@ define <16 x i32> @test26(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1, <16 x i32
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i32> %x1, %y1
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
%y = shufflevector <16 x i32> %y.0, <16 x i32> undef, <16 x i32> zeroinitializer
%mask0 = icmp sgt <16 x i32> %x, %y
@@ -366,7 +366,7 @@ define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y
; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i64> %x1, %y1
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
%y = shufflevector <8 x i64> %y.0, <8 x i64> undef, <8 x i32> zeroinitializer
%mask0 = icmp sle <8 x i64> %x, %y
diff --git a/llvm/test/CodeGen/X86/avx512bw-arith.ll b/llvm/test/CodeGen/X86/avx512bw-arith.ll
index 94f68a2ddc2..52ebf27b6b7 100644
--- a/llvm/test/CodeGen/X86/avx512bw-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-arith.ll
@@ -12,7 +12,7 @@ define <64 x i8> @vpaddb512_test(<64 x i8> %i, <64 x i8> %j) nounwind readnone {
; CHECK: vpaddb (%rdi), %zmm{{.*}}
; CHECK: ret
define <64 x i8> @vpaddb512_fold_test(<64 x i8> %i, <64 x i8>* %j) nounwind {
- %tmp = load <64 x i8>* %j, align 4
+ %tmp = load <64 x i8>, <64 x i8>* %j, align 4
%x = add <64 x i8> %i, %tmp
ret <64 x i8> %x
}
@@ -29,7 +29,7 @@ define <32 x i16> @vpaddw512_test(<32 x i16> %i, <32 x i16> %j) nounwind readnon
; CHECK: vpaddw (%rdi), %zmm{{.*}}
; CHECK: ret
define <32 x i16> @vpaddw512_fold_test(<32 x i16> %i, <32 x i16>* %j) nounwind {
- %tmp = load <32 x i16>* %j, align 4
+ %tmp = load <32 x i16>, <32 x i16>* %j, align 4
%x = add <32 x i16> %i, %tmp
ret <32 x i16> %x
}
@@ -59,7 +59,7 @@ define <32 x i16> @vpaddw512_maskz_test(<32 x i16> %i, <32 x i16> %j, <32 x i16>
; CHECK: ret
define <32 x i16> @vpaddw512_mask_fold_test(<32 x i16> %i, <32 x i16>* %j.ptr, <32 x i16> %mask1) nounwind readnone {
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
- %j = load <32 x i16>* %j.ptr
+ %j = load <32 x i16>, <32 x i16>* %j.ptr
%x = add <32 x i16> %i, %j
%r = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %i
ret <32 x i16> %r
@@ -70,7 +70,7 @@ define <32 x i16> @vpaddw512_mask_fold_test(<32 x i16> %i, <32 x i16>* %j.ptr, <
; CHECK: ret
define <32 x i16> @vpaddw512_maskz_fold_test(<32 x i16> %i, <32 x i16>* %j.ptr, <32 x i16> %mask1) nounwind readnone {
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
- %j = load <32 x i16>* %j.ptr
+ %j = load <32 x i16>, <32 x i16>* %j.ptr
%x = add <32 x i16> %i, %j
%r = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> zeroinitializer
ret <32 x i16> %r
diff --git a/llvm/test/CodeGen/X86/avx512bw-mask-op.ll b/llvm/test/CodeGen/X86/avx512bw-mask-op.ll
index 9d7630c5d0a..0208011cf89 100644
--- a/llvm/test/CodeGen/X86/avx512bw-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-mask-op.ll
@@ -35,7 +35,7 @@ define i64 @mask64(i64 %x) {
}
define void @mask32_mem(i32* %ptr) {
- %x = load i32* %ptr, align 4
+ %x = load i32, i32* %ptr, align 4
%m0 = bitcast i32 %x to <32 x i1>
%m1 = xor <32 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1,
i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1,
@@ -52,7 +52,7 @@ define void @mask32_mem(i32* %ptr) {
}
define void @mask64_mem(i64* %ptr) {
- %x = load i64* %ptr, align 4
+ %x = load i64, i64* %ptr, align 4
%m0 = bitcast i64 %x to <64 x i1>
%m1 = xor <64 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1,
i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1,
diff --git a/llvm/test/CodeGen/X86/avx512bw-mov.ll b/llvm/test/CodeGen/X86/avx512bw-mov.ll
index 2ff6d280ab8..519b649ff53 100644
--- a/llvm/test/CodeGen/X86/avx512bw-mov.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-mov.ll
@@ -5,7 +5,7 @@
; CHECK: ret
define <64 x i8> @test1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <64 x i8>*
- %res = load <64 x i8>* %vaddr, align 1
+ %res = load <64 x i8>, <64 x i8>* %vaddr, align 1
ret <64 x i8>%res
}
@@ -24,7 +24,7 @@ define void @test2(i8 * %addr, <64 x i8> %data) {
define <64 x i8> @test3(i8 * %addr, <64 x i8> %old, <64 x i8> %mask1) {
%mask = icmp ne <64 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <64 x i8>*
- %r = load <64 x i8>* %vaddr, align 1
+ %r = load <64 x i8>, <64 x i8>* %vaddr, align 1
%res = select <64 x i1> %mask, <64 x i8> %r, <64 x i8> %old
ret <64 x i8>%res
}
@@ -35,7 +35,7 @@ define <64 x i8> @test3(i8 * %addr, <64 x i8> %old, <64 x i8> %mask1) {
define <64 x i8> @test4(i8 * %addr, <64 x i8> %mask1) {
%mask = icmp ne <64 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <64 x i8>*
- %r = load <64 x i8>* %vaddr, align 1
+ %r = load <64 x i8>, <64 x i8>* %vaddr, align 1
%res = select <64 x i1> %mask, <64 x i8> %r, <64 x i8> zeroinitializer
ret <64 x i8>%res
}
@@ -45,7 +45,7 @@ define <64 x i8> @test4(i8 * %addr, <64 x i8> %mask1) {
; CHECK: ret
define <32 x i16> @test5(i8 * %addr) {
%vaddr = bitcast i8* %addr to <32 x i16>*
- %res = load <32 x i16>* %vaddr, align 1
+ %res = load <32 x i16>, <32 x i16>* %vaddr, align 1
ret <32 x i16>%res
}
@@ -64,7 +64,7 @@ define void @test6(i8 * %addr, <32 x i16> %data) {
define <32 x i16> @test7(i8 * %addr, <32 x i16> %old, <32 x i16> %mask1) {
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i16>*
- %r = load <32 x i16>* %vaddr, align 1
+ %r = load <32 x i16>, <32 x i16>* %vaddr, align 1
%res = select <32 x i1> %mask, <32 x i16> %r, <32 x i16> %old
ret <32 x i16>%res
}
@@ -75,7 +75,7 @@ define <32 x i16> @test7(i8 * %addr, <32 x i16> %old, <32 x i16> %mask1) {
define <32 x i16> @test8(i8 * %addr, <32 x i16> %mask1) {
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i16>*
- %r = load <32 x i16>* %vaddr, align 1
+ %r = load <32 x i16>, <32 x i16>* %vaddr, align 1
%res = select <32 x i1> %mask, <32 x i16> %r, <32 x i16> zeroinitializer
ret <32 x i16>%res
}
diff --git a/llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll
index 6ba4db68662..141f5cc0921 100644
--- a/llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-vec-cmp.ll
@@ -45,7 +45,7 @@ define <64 x i8> @test4(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
; CHECK: vmovdqu16
; CHECK: ret
define <32 x i16> @test5(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %yp) nounwind {
- %y = load <32 x i16>* %yp, align 4
+ %y = load <32 x i16>, <32 x i16>* %yp, align 4
%mask = icmp eq <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
ret <32 x i16> %max
@@ -56,7 +56,7 @@ define <32 x i16> @test5(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %yp) nounwin
; CHECK: vmovdqu16
; CHECK: ret
define <32 x i16> @test6(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
- %y = load <32 x i16>* %y.ptr, align 4
+ %y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp sgt <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
ret <32 x i16> %max
@@ -67,7 +67,7 @@ define <32 x i16> @test6(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
; CHECK: vmovdqu16
; CHECK: ret
define <32 x i16> @test7(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
- %y = load <32 x i16>* %y.ptr, align 4
+ %y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp sle <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
ret <32 x i16> %max
@@ -78,7 +78,7 @@ define <32 x i16> @test7(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
; CHECK: vmovdqu16
; CHECK: ret
define <32 x i16> @test8(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) nounwind {
- %y = load <32 x i16>* %y.ptr, align 4
+ %y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp ule <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
ret <32 x i16> %max
@@ -114,7 +114,7 @@ define <64 x i8> @test10(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1, <64 x i8> %y
; CHECK: ret
define <64 x i8> @test11(<64 x i8> %x, <64 x i8>* %y.ptr, <64 x i8> %x1, <64 x i8> %y1) nounwind {
%mask1 = icmp sgt <64 x i8> %x1, %y1
- %y = load <64 x i8>* %y.ptr, align 4
+ %y = load <64 x i8>, <64 x i8>* %y.ptr, align 4
%mask0 = icmp sgt <64 x i8> %x, %y
%mask = select <64 x i1> %mask0, <64 x i1> %mask1, <64 x i1> zeroinitializer
%max = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %x1
@@ -127,7 +127,7 @@ define <64 x i8> @test11(<64 x i8> %x, <64 x i8>* %y.ptr, <64 x i8> %x1, <64 x i
; CHECK: ret
define <32 x i16> @test12(<32 x i16> %x, <32 x i16>* %y.ptr, <32 x i16> %x1, <32 x i16> %y1) nounwind {
%mask1 = icmp sge <32 x i16> %x1, %y1
- %y = load <32 x i16>* %y.ptr, align 4
+ %y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask0 = icmp ule <32 x i16> %x, %y
%mask = select <32 x i1> %mask0, <32 x i1> %mask1, <32 x i1> zeroinitializer
%max = select <32 x i1> %mask, <32 x i16> %x, <32 x i16> %x1
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-arith.ll b/llvm/test/CodeGen/X86/avx512bwvl-arith.ll
index 96f01409f5b..c0650e17610 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-arith.ll
@@ -14,7 +14,7 @@ define <32 x i8> @vpaddb256_test(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; CHECK: vpaddb (%rdi), %ymm{{.*}}
; CHECK: ret
define <32 x i8> @vpaddb256_fold_test(<32 x i8> %i, <32 x i8>* %j) nounwind {
- %tmp = load <32 x i8>* %j, align 4
+ %tmp = load <32 x i8>, <32 x i8>* %j, align 4
%x = add <32 x i8> %i, %tmp
ret <32 x i8> %x
}
@@ -31,7 +31,7 @@ define <16 x i16> @vpaddw256_test(<16 x i16> %i, <16 x i16> %j) nounwind readnon
; CHECK: vpaddw (%rdi), %ymm{{.*}}
; CHECK: ret
define <16 x i16> @vpaddw256_fold_test(<16 x i16> %i, <16 x i16>* %j) nounwind {
- %tmp = load <16 x i16>* %j, align 4
+ %tmp = load <16 x i16>, <16 x i16>* %j, align 4
%x = add <16 x i16> %i, %tmp
ret <16 x i16> %x
}
@@ -61,7 +61,7 @@ define <16 x i16> @vpaddw256_maskz_test(<16 x i16> %i, <16 x i16> %j, <16 x i16>
; CHECK: ret
define <16 x i16> @vpaddw256_mask_fold_test(<16 x i16> %i, <16 x i16>* %j.ptr, <16 x i16> %mask1) nounwind readnone {
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
- %j = load <16 x i16>* %j.ptr
+ %j = load <16 x i16>, <16 x i16>* %j.ptr
%x = add <16 x i16> %i, %j
%r = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %i
ret <16 x i16> %r
@@ -72,7 +72,7 @@ define <16 x i16> @vpaddw256_mask_fold_test(<16 x i16> %i, <16 x i16>* %j.ptr, <
; CHECK: ret
define <16 x i16> @vpaddw256_maskz_fold_test(<16 x i16> %i, <16 x i16>* %j.ptr, <16 x i16> %mask1) nounwind readnone {
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
- %j = load <16 x i16>* %j.ptr
+ %j = load <16 x i16>, <16 x i16>* %j.ptr
%x = add <16 x i16> %i, %j
%r = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> zeroinitializer
ret <16 x i16> %r
@@ -116,7 +116,7 @@ define <16 x i8> @vpaddb128_test(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; CHECK: vpaddb (%rdi), %xmm{{.*}}
; CHECK: ret
define <16 x i8> @vpaddb128_fold_test(<16 x i8> %i, <16 x i8>* %j) nounwind {
- %tmp = load <16 x i8>* %j, align 4
+ %tmp = load <16 x i8>, <16 x i8>* %j, align 4
%x = add <16 x i8> %i, %tmp
ret <16 x i8> %x
}
@@ -133,7 +133,7 @@ define <8 x i16> @vpaddw128_test(<8 x i16> %i, <8 x i16> %j) nounwind readnone {
; CHECK: vpaddw (%rdi), %xmm{{.*}}
; CHECK: ret
define <8 x i16> @vpaddw128_fold_test(<8 x i16> %i, <8 x i16>* %j) nounwind {
- %tmp = load <8 x i16>* %j, align 4
+ %tmp = load <8 x i16>, <8 x i16>* %j, align 4
%x = add <8 x i16> %i, %tmp
ret <8 x i16> %x
}
@@ -163,7 +163,7 @@ define <8 x i16> @vpaddw128_maskz_test(<8 x i16> %i, <8 x i16> %j, <8 x i16> %ma
; CHECK: ret
define <8 x i16> @vpaddw128_mask_fold_test(<8 x i16> %i, <8 x i16>* %j.ptr, <8 x i16> %mask1) nounwind readnone {
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
- %j = load <8 x i16>* %j.ptr
+ %j = load <8 x i16>, <8 x i16>* %j.ptr
%x = add <8 x i16> %i, %j
%r = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %i
ret <8 x i16> %r
@@ -174,7 +174,7 @@ define <8 x i16> @vpaddw128_mask_fold_test(<8 x i16> %i, <8 x i16>* %j.ptr, <8 x
; CHECK: ret
define <8 x i16> @vpaddw128_maskz_fold_test(<8 x i16> %i, <8 x i16>* %j.ptr, <8 x i16> %mask1) nounwind readnone {
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
- %j = load <8 x i16>* %j.ptr
+ %j = load <8 x i16>, <8 x i16>* %j.ptr
%x = add <8 x i16> %i, %j
%r = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> zeroinitializer
ret <8 x i16> %r
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll
index dbb911792af..cffa0a5e9b7 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll
@@ -830,7 +830,7 @@ define <2 x double> @test_mask_vfmsubadd128_pd(<2 x double> %a0, <2 x double> %a
define <2 x double> @test_mask_vfmsubadd128rm_pd(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmsubadd128rm_pd
; CHECK: vfmsubadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa7,0x07]
- %a2 = load <2 x double>* %ptr_a2
+ %a2 = load <2 x double>, <2 x double>* %ptr_a2
%res = call <2 x double> @llvm.x86.fma.mask.vfmsubadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
ret <2 x double> %res
}
@@ -838,7 +838,7 @@ declare <8 x double> @llvm.x86.fma.mask.vfmsubadd.pd.512(<8 x double>, <8 x doub
define <8 x double> @test_mask_vfmsubaddrm_pd(<8 x double> %a0, <8 x double> %a1, <8 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmsubaddrm_pd
; CHECK: vfmsubadd213pd (%rdi), %zmm1, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x49,0xa7,0x07]
- %a2 = load <8 x double>* %ptr_a2, align 8
+ %a2 = load <8 x double>, <8 x double>* %ptr_a2, align 8
%res = call <8 x double> @llvm.x86.fma.mask.vfmsubadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind
ret <8 x double> %res
}
@@ -860,7 +860,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rz(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmk
; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
- %a2 = load <4 x float>* %ptr_a2
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2
%res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
ret <4 x float> %res
}
@@ -868,7 +868,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmka
; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x09,0xa8,0x07]
- %a2 = load <4 x float>* %ptr_a2, align 8
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 8
%res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind
ret <4 x float> %res
}
@@ -876,7 +876,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmka(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmkz
; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0x07]
- %a2 = load <4 x float>* %ptr_a2
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2
%res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
ret <4 x float> %res
}
@@ -884,7 +884,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmkz(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmkza
; CHECK: vfmadd213ps (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0x71,0xa8,0x07]
- %a2 = load <4 x float>* %ptr_a2, align 4
+ %a2 = load <4 x float>, <4 x float>* %ptr_a2, align 4
%res = call <4 x float> @llvm.x86.fma.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind
ret <4 x float> %res
}
@@ -892,7 +892,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmkza(<4 x float> %a0, <4 x float> %a
define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmb
; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
- %q = load float* %ptr_a2
+ %q = load float, float* %ptr_a2
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -904,7 +904,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmb(<4 x float> %a0, <4 x float> %a1,
define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmba
; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x75,0x19,0xa8,0x07]
- %q = load float* %ptr_a2, align 4
+ %q = load float, float* %ptr_a2, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -916,7 +916,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmba(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmbz
; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
- %q = load float* %ptr_a2
+ %q = load float, float* %ptr_a2
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -928,7 +928,7 @@ define <4 x float> @test_mask_vfmadd128_ps_rmbz(<4 x float> %a0, <4 x float> %a1
define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a1, float* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_ps_rmbza
; CHECK: vfmadd213ps (%rdi){1to4}, %xmm1, %xmm0 ## encoding: [0x62,0xf2,0x75,0x18,0xa8,0x07]
- %q = load float* %ptr_a2, align 4
+ %q = load float, float* %ptr_a2, align 4
%vecinit.i = insertelement <4 x float> undef, float %q, i32 0
%vecinit2.i = insertelement <4 x float> %vecinit.i, float %q, i32 1
%vecinit4.i = insertelement <4 x float> %vecinit2.i, float %q, i32 2
@@ -954,7 +954,7 @@ define <2 x double> @test_mask_vfmadd128_pd_rz(<2 x double> %a0, <2 x double> %a
define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd128_pd_rmk
; CHECK: vfmadd213pd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0xa8,0x07]
- %a2 = load <2 x double>* %ptr_a2
+ %a2 = load <2 x double>, <2 x double>* %ptr_a2
%res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind
ret <2 x double> %res
}
@@ -962,7 +962,7 @@ define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %
define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd128_pd_rmkz
; CHECK: vfmadd213pd (%rdi), %xmm1, %xmm0 ## encoding: [0xc4,0xe2,0xf1,0xa8,0x07]
- %a2 = load <2 x double>* %ptr_a2
+ %a2 = load <2 x double>, <2 x double>* %ptr_a2
%res = call <2 x double> @llvm.x86.fma.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind
ret <2 x double> %res
}
@@ -984,7 +984,7 @@ define <4 x double> @test_mask_vfmadd256_pd_rz(<4 x double> %a0, <4 x double> %a
define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2, i8 %mask) {
; CHECK-LABEL: test_mask_vfmadd256_pd_rmk
; CHECK: vfmadd213pd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0xa8,0x07]
- %a2 = load <4 x double>* %ptr_a2
+ %a2 = load <4 x double>, <4 x double>* %ptr_a2
%res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind
ret <4 x double> %res
}
@@ -992,7 +992,7 @@ define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %
define <4 x double> @test_mask_vfmadd256_pd_rmkz(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2) {
; CHECK-LABEL: test_mask_vfmadd256_pd_rmkz
; CHECK: vfmadd213pd (%rdi), %ymm1, %ymm0 ## encoding: [0xc4,0xe2,0xf5,0xa8,0x07]
- %a2 = load <4 x double>* %ptr_a2
+ %a2 = load <4 x double>, <4 x double>* %ptr_a2
%res = call <4 x double> @llvm.x86.fma.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind
ret <4 x double> %res
}
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-mov.ll b/llvm/test/CodeGen/X86/avx512bwvl-mov.ll
index 835844fc821..8a9a4fa5e5e 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-mov.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-mov.ll
@@ -5,7 +5,7 @@
; CHECK: ret
define <32 x i8> @test_256_1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <32 x i8>*
- %res = load <32 x i8>* %vaddr, align 1
+ %res = load <32 x i8>, <32 x i8>* %vaddr, align 1
ret <32 x i8>%res
}
@@ -24,7 +24,7 @@ define void @test_256_2(i8 * %addr, <32 x i8> %data) {
define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) {
%mask = icmp ne <32 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i8>*
- %r = load <32 x i8>* %vaddr, align 1
+ %r = load <32 x i8>, <32 x i8>* %vaddr, align 1
%res = select <32 x i1> %mask, <32 x i8> %r, <32 x i8> %old
ret <32 x i8>%res
}
@@ -35,7 +35,7 @@ define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) {
define <32 x i8> @test_256_4(i8 * %addr, <32 x i8> %mask1) {
%mask = icmp ne <32 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i8>*
- %r = load <32 x i8>* %vaddr, align 1
+ %r = load <32 x i8>, <32 x i8>* %vaddr, align 1
%res = select <32 x i1> %mask, <32 x i8> %r, <32 x i8> zeroinitializer
ret <32 x i8>%res
}
@@ -45,7 +45,7 @@ define <32 x i8> @test_256_4(i8 * %addr, <32 x i8> %mask1) {
; CHECK: ret
define <16 x i16> @test_256_5(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x i16>*
- %res = load <16 x i16>* %vaddr, align 1
+ %res = load <16 x i16>, <16 x i16>* %vaddr, align 1
ret <16 x i16>%res
}
@@ -64,7 +64,7 @@ define void @test_256_6(i8 * %addr, <16 x i16> %data) {
define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) {
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i16>*
- %r = load <16 x i16>* %vaddr, align 1
+ %r = load <16 x i16>, <16 x i16>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i16> %r, <16 x i16> %old
ret <16 x i16>%res
}
@@ -75,7 +75,7 @@ define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) {
define <16 x i16> @test_256_8(i8 * %addr, <16 x i16> %mask1) {
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i16>*
- %r = load <16 x i16>* %vaddr, align 1
+ %r = load <16 x i16>, <16 x i16>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i16> %r, <16 x i16> zeroinitializer
ret <16 x i16>%res
}
@@ -85,7 +85,7 @@ define <16 x i16> @test_256_8(i8 * %addr, <16 x i16> %mask1) {
; CHECK: ret
define <16 x i8> @test_128_1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <16 x i8>*
- %res = load <16 x i8>* %vaddr, align 1
+ %res = load <16 x i8>, <16 x i8>* %vaddr, align 1
ret <16 x i8>%res
}
@@ -104,7 +104,7 @@ define void @test_128_2(i8 * %addr, <16 x i8> %data) {
define <16 x i8> @test_128_3(i8 * %addr, <16 x i8> %old, <16 x i8> %mask1) {
%mask = icmp ne <16 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i8>*
- %r = load <16 x i8>* %vaddr, align 1
+ %r = load <16 x i8>, <16 x i8>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i8> %r, <16 x i8> %old
ret <16 x i8>%res
}
@@ -115,7 +115,7 @@ define <16 x i8> @test_128_3(i8 * %addr, <16 x i8> %old, <16 x i8> %mask1) {
define <16 x i8> @test_128_4(i8 * %addr, <16 x i8> %mask1) {
%mask = icmp ne <16 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i8>*
- %r = load <16 x i8>* %vaddr, align 1
+ %r = load <16 x i8>, <16 x i8>* %vaddr, align 1
%res = select <16 x i1> %mask, <16 x i8> %r, <16 x i8> zeroinitializer
ret <16 x i8>%res
}
@@ -125,7 +125,7 @@ define <16 x i8> @test_128_4(i8 * %addr, <16 x i8> %mask1) {
; CHECK: ret
define <8 x i16> @test_128_5(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i16>*
- %res = load <8 x i16>* %vaddr, align 1
+ %res = load <8 x i16>, <8 x i16>* %vaddr, align 1
ret <8 x i16>%res
}
@@ -144,7 +144,7 @@ define void @test_128_6(i8 * %addr, <8 x i16> %data) {
define <8 x i16> @test_128_7(i8 * %addr, <8 x i16> %old, <8 x i16> %mask1) {
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i16>*
- %r = load <8 x i16>* %vaddr, align 1
+ %r = load <8 x i16>, <8 x i16>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i16> %r, <8 x i16> %old
ret <8 x i16>%res
}
@@ -155,7 +155,7 @@ define <8 x i16> @test_128_7(i8 * %addr, <8 x i16> %old, <8 x i16> %mask1) {
define <8 x i16> @test_128_8(i8 * %addr, <8 x i16> %mask1) {
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i16>*
- %r = load <8 x i16>* %vaddr, align 1
+ %r = load <8 x i16>, <8 x i16>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i16> %r, <8 x i16> zeroinitializer
ret <8 x i16>%res
}
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
index 2d13a166a72..9bf02fa41d9 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
@@ -45,7 +45,7 @@ define <32 x i8> @test256_4(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind
; CHECK: vmovdqu16
; CHECK: ret
define <16 x i16> @test256_5(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %yp) nounwind {
- %y = load <16 x i16>* %yp, align 4
+ %y = load <16 x i16>, <16 x i16>* %yp, align 4
%mask = icmp eq <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
ret <16 x i16> %max
@@ -56,7 +56,7 @@ define <16 x i16> @test256_5(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %yp) nou
; CHECK: vmovdqu16
; CHECK: ret
define <16 x i16> @test256_6(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
- %y = load <16 x i16>* %y.ptr, align 4
+ %y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp sgt <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
ret <16 x i16> %max
@@ -67,7 +67,7 @@ define <16 x i16> @test256_6(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
; CHECK: vmovdqu16
; CHECK: ret
define <16 x i16> @test256_7(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
- %y = load <16 x i16>* %y.ptr, align 4
+ %y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp sle <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
ret <16 x i16> %max
@@ -78,7 +78,7 @@ define <16 x i16> @test256_7(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
; CHECK: vmovdqu16
; CHECK: ret
define <16 x i16> @test256_8(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr) nounwind {
- %y = load <16 x i16>* %y.ptr, align 4
+ %y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp ule <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
ret <16 x i16> %max
@@ -114,7 +114,7 @@ define <32 x i8> @test256_10(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1, <32 x i8
; CHECK: ret
define <32 x i8> @test256_11(<32 x i8> %x, <32 x i8>* %y.ptr, <32 x i8> %x1, <32 x i8> %y1) nounwind {
%mask1 = icmp sgt <32 x i8> %x1, %y1
- %y = load <32 x i8>* %y.ptr, align 4
+ %y = load <32 x i8>, <32 x i8>* %y.ptr, align 4
%mask0 = icmp sgt <32 x i8> %x, %y
%mask = select <32 x i1> %mask0, <32 x i1> %mask1, <32 x i1> zeroinitializer
%max = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %x1
@@ -127,7 +127,7 @@ define <32 x i8> @test256_11(<32 x i8> %x, <32 x i8>* %y.ptr, <32 x i8> %x1, <32
; CHECK: ret
define <16 x i16> @test256_12(<16 x i16> %x, <16 x i16>* %y.ptr, <16 x i16> %x1, <16 x i16> %y1) nounwind {
%mask1 = icmp sge <16 x i16> %x1, %y1
- %y = load <16 x i16>* %y.ptr, align 4
+ %y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask0 = icmp ule <16 x i16> %x, %y
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
%max = select <16 x i1> %mask, <16 x i16> %x, <16 x i16> %x1
@@ -179,7 +179,7 @@ define <16 x i8> @test128_4(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind
; CHECK: vmovdqu16
; CHECK: ret
define <8 x i16> @test128_5(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %yp) nounwind {
- %y = load <8 x i16>* %yp, align 4
+ %y = load <8 x i16>, <8 x i16>* %yp, align 4
%mask = icmp eq <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
ret <8 x i16> %max
@@ -190,7 +190,7 @@ define <8 x i16> @test128_5(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %yp) nounwin
; CHECK: vmovdqu16
; CHECK: ret
define <8 x i16> @test128_6(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
- %y = load <8 x i16>* %y.ptr, align 4
+ %y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp sgt <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
ret <8 x i16> %max
@@ -201,7 +201,7 @@ define <8 x i16> @test128_6(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
; CHECK: vmovdqu16
; CHECK: ret
define <8 x i16> @test128_7(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
- %y = load <8 x i16>* %y.ptr, align 4
+ %y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp sle <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
ret <8 x i16> %max
@@ -212,7 +212,7 @@ define <8 x i16> @test128_7(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
; CHECK: vmovdqu16
; CHECK: ret
define <8 x i16> @test128_8(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) nounwind {
- %y = load <8 x i16>* %y.ptr, align 4
+ %y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp ule <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
ret <8 x i16> %max
@@ -248,7 +248,7 @@ define <16 x i8> @test128_10(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1, <16 x i8
; CHECK: ret
define <16 x i8> @test128_11(<16 x i8> %x, <16 x i8>* %y.ptr, <16 x i8> %x1, <16 x i8> %y1) nounwind {
%mask1 = icmp sgt <16 x i8> %x1, %y1
- %y = load <16 x i8>* %y.ptr, align 4
+ %y = load <16 x i8>, <16 x i8>* %y.ptr, align 4
%mask0 = icmp sgt <16 x i8> %x, %y
%mask = select <16 x i1> %mask0, <16 x i1> %mask1, <16 x i1> zeroinitializer
%max = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %x1
@@ -261,7 +261,7 @@ define <16 x i8> @test128_11(<16 x i8> %x, <16 x i8>* %y.ptr, <16 x i8> %x1, <16
; CHECK: ret
define <8 x i16> @test128_12(<8 x i16> %x, <8 x i16>* %y.ptr, <8 x i16> %x1, <8 x i16> %y1) nounwind {
%mask1 = icmp sge <8 x i16> %x1, %y1
- %y = load <8 x i16>* %y.ptr, align 4
+ %y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask0 = icmp ule <8 x i16> %x, %y
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
%max = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %x1
diff --git a/llvm/test/CodeGen/X86/avx512dq-mask-op.ll b/llvm/test/CodeGen/X86/avx512dq-mask-op.ll
index 32a2633f8d0..b4d11bc0b77 100644
--- a/llvm/test/CodeGen/X86/avx512dq-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512dq-mask-op.ll
@@ -11,7 +11,7 @@ define i8 @mask8(i8 %x) {
}
define void @mask8_mem(i8* %ptr) {
- %x = load i8* %ptr, align 4
+ %x = load i8, i8* %ptr, align 4
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
%ret = bitcast <8 x i1> %m1 to i8
diff --git a/llvm/test/CodeGen/X86/avx512er-intrinsics.ll b/llvm/test/CodeGen/X86/avx512er-intrinsics.ll
index ce402b48478..dcde9c4153e 100644
--- a/llvm/test/CodeGen/X86/avx512er-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512er-intrinsics.ll
@@ -99,7 +99,7 @@ declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2
define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr ) {
; CHECK: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
- %mem = load double * %ptr, align 8
+ %mem = load double , double * %ptr, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
%res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
ret <2 x double> %res
@@ -108,7 +108,7 @@ define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr )
define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr ) {
; CHECK: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
%ptr1 = getelementptr double, double* %ptr, i32 18
- %mem = load double * %ptr1, align 8
+ %mem = load double , double * %ptr1, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
%res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
ret <2 x double> %res
diff --git a/llvm/test/CodeGen/X86/avx512vl-arith.ll b/llvm/test/CodeGen/X86/avx512vl-arith.ll
index 1f7da7814cc..ef01d8656da 100644
--- a/llvm/test/CodeGen/X86/avx512vl-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-arith.ll
@@ -14,7 +14,7 @@ define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; CHECK: vpaddq (%rdi), %ymm{{.*}}
; CHECK: ret
define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind {
- %tmp = load <4 x i64>* %j, align 4
+ %tmp = load <4 x i64>, <4 x i64>* %j, align 4
%x = add <4 x i64> %i, %tmp
ret <4 x i64> %x
}
@@ -31,7 +31,7 @@ define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind {
; CHECK: vpaddq (%rdi){1to4}, %ymm{{.*}}
; CHECK: ret
define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind {
- %j = load i64* %j.ptr
+ %j = load i64, i64* %j.ptr
%j.0 = insertelement <4 x i64> undef, i64 %j, i32 0
%j.v = shufflevector <4 x i64> %j.0, <4 x i64> undef, <4 x i32> zeroinitializer
%x = add <4 x i64> %i, %j.v
@@ -50,7 +50,7 @@ define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; CHECK: vpaddd (%rdi), %ymm{{.*}}
; CHECK: ret
define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind {
- %tmp = load <8 x i32>* %j, align 4
+ %tmp = load <8 x i32>, <8 x i32>* %j, align 4
%x = add <8 x i32> %i, %tmp
ret <8 x i32> %x
}
@@ -88,7 +88,7 @@ define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %ma
; CHECK: ret
define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
- %j = load <8 x i32>* %j.ptr
+ %j = load <8 x i32>, <8 x i32>* %j.ptr
%x = add <8 x i32> %i, %j
%r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i
ret <8 x i32> %r
@@ -109,7 +109,7 @@ define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1)
; CHECK: ret
define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
- %j = load <8 x i32>* %j.ptr
+ %j = load <8 x i32>, <8 x i32>* %j.ptr
%x = add <8 x i32> %i, %j
%r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer
ret <8 x i32> %r
@@ -341,7 +341,7 @@ define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %
<4 x double>* %j, <4 x i64> %mask1)
nounwind {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
- %tmp = load <4 x double>* %j
+ %tmp = load <4 x double>, <4 x double>* %j
%x = fadd <4 x double> %i, %tmp
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst
ret <4 x double> %r
@@ -353,7 +353,7 @@ define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %
define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j,
<4 x i64> %mask1) nounwind {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
- %tmp = load <4 x double>* %j
+ %tmp = load <4 x double>, <4 x double>* %j
%x = fadd <4 x double> %i, %tmp
%r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer
ret <4 x double> %r
@@ -363,7 +363,7 @@ define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %
; CHECK: vaddpd (%rdi){1to4}, %ymm{{.*}}
; CHECK: ret
define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind {
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef,
<4 x i32> zeroinitializer
@@ -377,7 +377,7 @@ define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nou
define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i,
double* %j, <4 x i64> %mask1) nounwind {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef,
<4 x i32> zeroinitializer
@@ -392,7 +392,7 @@ define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x doub
define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j,
<4 x i64> %mask1) nounwind {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%b = insertelement <4 x double> undef, double %tmp, i32 0
%c = shufflevector <4 x double> %b, <4 x double> undef,
<4 x i32> zeroinitializer
@@ -415,7 +415,7 @@ define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone {
; CHECK: vpaddq (%rdi), %xmm{{.*}}
; CHECK: ret
define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind {
- %tmp = load <2 x i64>* %j, align 4
+ %tmp = load <2 x i64>, <2 x i64>* %j, align 4
%x = add <2 x i64> %i, %tmp
ret <2 x i64> %x
}
@@ -424,7 +424,7 @@ define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind {
; CHECK: vpaddq (%rdi){1to2}, %xmm{{.*}}
; CHECK: ret
define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind {
- %tmp = load i64* %j
+ %tmp = load i64, i64* %j
%j.0 = insertelement <2 x i64> undef, i64 %tmp, i32 0
%j.1 = insertelement <2 x i64> %j.0, i64 %tmp, i32 1
%x = add <2 x i64> %i, %j.1
@@ -443,7 +443,7 @@ define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone {
; CHECK: vpaddd (%rdi), %xmm{{.*}}
; CHECK: ret
define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind {
- %tmp = load <4 x i32>* %j, align 4
+ %tmp = load <4 x i32>, <4 x i32>* %j, align 4
%x = add <4 x i32> %i, %tmp
ret <4 x i32> %x
}
@@ -481,7 +481,7 @@ define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %ma
; CHECK: ret
define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
- %j = load <4 x i32>* %j.ptr
+ %j = load <4 x i32>, <4 x i32>* %j.ptr
%x = add <4 x i32> %i, %j
%r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i
ret <4 x i32> %r
@@ -502,7 +502,7 @@ define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1)
; CHECK: ret
define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
- %j = load <4 x i32>* %j.ptr
+ %j = load <4 x i32>, <4 x i32>* %j.ptr
%x = add <4 x i32> %i, %j
%r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer
ret <4 x i32> %r
@@ -735,7 +735,7 @@ define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %
<2 x double>* %j, <2 x i64> %mask1)
nounwind {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
- %tmp = load <2 x double>* %j
+ %tmp = load <2 x double>, <2 x double>* %j
%x = fadd <2 x double> %i, %tmp
%r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst
ret <2 x double> %r
@@ -747,7 +747,7 @@ define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %
define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j,
<2 x i64> %mask1) nounwind {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
- %tmp = load <2 x double>* %j
+ %tmp = load <2 x double>, <2 x double>* %j
%x = fadd <2 x double> %i, %tmp
%r = select <2 x i1> %mask, <2 x double> %x, <2 x double> zeroinitializer
ret <2 x double> %r
@@ -757,7 +757,7 @@ define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %
; CHECK: vaddpd (%rdi){1to2}, %xmm{{.*}}
; CHECK: ret
define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind {
- %tmp = load double* %j
+ %tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
%j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
%x = fadd <2 x double> %j.1, %i
@@ -771,7 +771,7 @@ define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x doub
double* %j, <2 x i64> %mask1)
nounwind {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
%j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
%x = fadd <2 x double> %j.1, %i
@@ -785,7 +785,7 @@ define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x doub
define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j,
<2 x i64> %mask1) nounwind {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
- %tmp = load double* %j
+ %tmp = load double, double* %j
%j.0 = insertelement <2 x double> undef, double %tmp, i64 0
%j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1
%x = fadd <2 x double> %j.1, %i
diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
index fe347bd31a8..38a7e7afa5c 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -805,7 +805,7 @@ define <4 x double> @test_x86_mask_blend_pd_256(i8 %a0, <4 x double> %a1, <4 x d
define <4 x double> @test_x86_mask_blend_pd_256_memop(<4 x double> %a, <4 x double>* %ptr, i8 %mask) {
; CHECK-LABEL: test_x86_mask_blend_pd_256_memop
; CHECK: vblendmpd (%
- %b = load <4 x double>* %ptr
+ %b = load <4 x double>, <4 x double>* %ptr
%res = call <4 x double> @llvm.x86.avx512.mask.blend.pd.256(<4 x double> %a, <4 x double> %b, i8 %mask) ; <<4 x double>> [#uses=1]
ret <4 x double> %res
}
@@ -843,7 +843,7 @@ define <2 x double> @test_x86_mask_blend_pd_128(i8 %a0, <2 x double> %a1, <2 x d
define <2 x double> @test_x86_mask_blend_pd_128_memop(<2 x double> %a, <2 x double>* %ptr, i8 %mask) {
; CHECK-LABEL: test_x86_mask_blend_pd_128_memop
; CHECK: vblendmpd (%
- %b = load <2 x double>* %ptr
+ %b = load <2 x double>, <2 x double>* %ptr
%res = call <2 x double> @llvm.x86.avx512.mask.blend.pd.128(<2 x double> %a, <2 x double> %b, i8 %mask) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
}
diff --git a/llvm/test/CodeGen/X86/avx512vl-mov.ll b/llvm/test/CodeGen/X86/avx512vl-mov.ll
index 32246568ac2..18fa0a142a2 100644
--- a/llvm/test/CodeGen/X86/avx512vl-mov.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-mov.ll
@@ -5,7 +5,7 @@
; CHECK: ret
define <8 x i32> @test_256_1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i32>*
- %res = load <8 x i32>* %vaddr, align 1
+ %res = load <8 x i32>, <8 x i32>* %vaddr, align 1
ret <8 x i32>%res
}
@@ -14,7 +14,7 @@ define <8 x i32> @test_256_1(i8 * %addr) {
; CHECK: ret
define <8 x i32> @test_256_2(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x i32>*
- %res = load <8 x i32>* %vaddr, align 32
+ %res = load <8 x i32>, <8 x i32>* %vaddr, align 32
ret <8 x i32>%res
}
@@ -50,7 +50,7 @@ define void @test_256_5(i8 * %addr, <8 x i32> %data) {
; CHECK: ret
define <4 x i64> @test_256_6(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x i64>*
- %res = load <4 x i64>* %vaddr, align 32
+ %res = load <4 x i64>, <4 x i64>* %vaddr, align 32
ret <4 x i64>%res
}
@@ -68,7 +68,7 @@ define void @test_256_7(i8 * %addr, <4 x i64> %data) {
; CHECK: ret
define <4 x i64> @test_256_8(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x i64>*
- %res = load <4 x i64>* %vaddr, align 1
+ %res = load <4 x i64>, <4 x i64>* %vaddr, align 1
ret <4 x i64>%res
}
@@ -86,7 +86,7 @@ define void @test_256_9(i8 * %addr, <4 x double> %data) {
; CHECK: ret
define <4 x double> @test_256_10(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x double>*
- %res = load <4 x double>* %vaddr, align 32
+ %res = load <4 x double>, <4 x double>* %vaddr, align 32
ret <4 x double>%res
}
@@ -104,7 +104,7 @@ define void @test_256_11(i8 * %addr, <8 x float> %data) {
; CHECK: ret
define <8 x float> @test_256_12(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x float>*
- %res = load <8 x float>* %vaddr, align 32
+ %res = load <8 x float>, <8 x float>* %vaddr, align 32
ret <8 x float>%res
}
@@ -122,7 +122,7 @@ define void @test_256_13(i8 * %addr, <4 x double> %data) {
; CHECK: ret
define <4 x double> @test_256_14(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x double>*
- %res = load <4 x double>* %vaddr, align 1
+ %res = load <4 x double>, <4 x double>* %vaddr, align 1
ret <4 x double>%res
}
@@ -140,7 +140,7 @@ define void @test_256_15(i8 * %addr, <8 x float> %data) {
; CHECK: ret
define <8 x float> @test_256_16(i8 * %addr) {
%vaddr = bitcast i8* %addr to <8 x float>*
- %res = load <8 x float>* %vaddr, align 1
+ %res = load <8 x float>, <8 x float>* %vaddr, align 1
ret <8 x float>%res
}
@@ -150,7 +150,7 @@ define <8 x float> @test_256_16(i8 * %addr) {
define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
- %r = load <8 x i32>* %vaddr, align 32
+ %r = load <8 x i32>, <8 x i32>* %vaddr, align 32
%res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old
ret <8 x i32>%res
}
@@ -161,7 +161,7 @@ define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
- %r = load <8 x i32>* %vaddr, align 1
+ %r = load <8 x i32>, <8 x i32>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old
ret <8 x i32>%res
}
@@ -172,7 +172,7 @@ define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
- %r = load <8 x i32>* %vaddr, align 32
+ %r = load <8 x i32>, <8 x i32>* %vaddr, align 32
%res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer
ret <8 x i32>%res
}
@@ -183,7 +183,7 @@ define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) {
define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) {
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
- %r = load <8 x i32>* %vaddr, align 1
+ %r = load <8 x i32>, <8 x i32>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer
ret <8 x i32>%res
}
@@ -194,7 +194,7 @@ define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) {
define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
- %r = load <4 x i64>* %vaddr, align 32
+ %r = load <4 x i64>, <4 x i64>* %vaddr, align 32
%res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old
ret <4 x i64>%res
}
@@ -205,7 +205,7 @@ define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
- %r = load <4 x i64>* %vaddr, align 1
+ %r = load <4 x i64>, <4 x i64>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old
ret <4 x i64>%res
}
@@ -216,7 +216,7 @@ define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
- %r = load <4 x i64>* %vaddr, align 32
+ %r = load <4 x i64>, <4 x i64>* %vaddr, align 32
%res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer
ret <4 x i64>%res
}
@@ -227,7 +227,7 @@ define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) {
define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
- %r = load <4 x i64>* %vaddr, align 1
+ %r = load <4 x i64>, <4 x i64>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer
ret <4 x i64>%res
}
@@ -238,7 +238,7 @@ define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) {
define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
- %r = load <8 x float>* %vaddr, align 32
+ %r = load <8 x float>, <8 x float>* %vaddr, align 32
%res = select <8 x i1> %mask, <8 x float> %r, <8 x float> %old
ret <8 x float>%res
}
@@ -249,7 +249,7 @@ define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1
define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1) {
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
- %r = load <8 x float>* %vaddr, align 1
+ %r = load <8 x float>, <8 x float>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x float> %r, <8 x float> %old
ret <8 x float>%res
}
@@ -260,7 +260,7 @@ define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1
define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) {
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
- %r = load <8 x float>* %vaddr, align 32
+ %r = load <8 x float>, <8 x float>* %vaddr, align 32
%res = select <8 x i1> %mask, <8 x float> %r, <8 x float> zeroinitializer
ret <8 x float>%res
}
@@ -271,7 +271,7 @@ define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) {
define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) {
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
- %r = load <8 x float>* %vaddr, align 1
+ %r = load <8 x float>, <8 x float>* %vaddr, align 1
%res = select <8 x i1> %mask, <8 x float> %r, <8 x float> zeroinitializer
ret <8 x float>%res
}
@@ -282,7 +282,7 @@ define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) {
define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
- %r = load <4 x double>* %vaddr, align 32
+ %r = load <4 x double>, <4 x double>* %vaddr, align 32
%res = select <4 x i1> %mask, <4 x double> %r, <4 x double> %old
ret <4 x double>%res
}
@@ -293,7 +293,7 @@ define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1
define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
- %r = load <4 x double>* %vaddr, align 1
+ %r = load <4 x double>, <4 x double>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x double> %r, <4 x double> %old
ret <4 x double>%res
}
@@ -304,7 +304,7 @@ define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1
define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
- %r = load <4 x double>* %vaddr, align 32
+ %r = load <4 x double>, <4 x double>* %vaddr, align 32
%res = select <4 x i1> %mask, <4 x double> %r, <4 x double> zeroinitializer
ret <4 x double>%res
}
@@ -315,7 +315,7 @@ define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) {
define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) {
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
- %r = load <4 x double>* %vaddr, align 1
+ %r = load <4 x double>, <4 x double>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x double> %r, <4 x double> zeroinitializer
ret <4 x double>%res
}
@@ -325,7 +325,7 @@ define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) {
; CHECK: ret
define <4 x i32> @test_128_1(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x i32>*
- %res = load <4 x i32>* %vaddr, align 1
+ %res = load <4 x i32>, <4 x i32>* %vaddr, align 1
ret <4 x i32>%res
}
@@ -334,7 +334,7 @@ define <4 x i32> @test_128_1(i8 * %addr) {
; CHECK: ret
define <4 x i32> @test_128_2(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x i32>*
- %res = load <4 x i32>* %vaddr, align 16
+ %res = load <4 x i32>, <4 x i32>* %vaddr, align 16
ret <4 x i32>%res
}
@@ -370,7 +370,7 @@ define void @test_128_5(i8 * %addr, <4 x i32> %data) {
; CHECK: ret
define <2 x i64> @test_128_6(i8 * %addr) {
%vaddr = bitcast i8* %addr to <2 x i64>*
- %res = load <2 x i64>* %vaddr, align 16
+ %res = load <2 x i64>, <2 x i64>* %vaddr, align 16
ret <2 x i64>%res
}
@@ -388,7 +388,7 @@ define void @test_128_7(i8 * %addr, <2 x i64> %data) {
; CHECK: ret
define <2 x i64> @test_128_8(i8 * %addr) {
%vaddr = bitcast i8* %addr to <2 x i64>*
- %res = load <2 x i64>* %vaddr, align 1
+ %res = load <2 x i64>, <2 x i64>* %vaddr, align 1
ret <2 x i64>%res
}
@@ -406,7 +406,7 @@ define void @test_128_9(i8 * %addr, <2 x double> %data) {
; CHECK: ret
define <2 x double> @test_128_10(i8 * %addr) {
%vaddr = bitcast i8* %addr to <2 x double>*
- %res = load <2 x double>* %vaddr, align 16
+ %res = load <2 x double>, <2 x double>* %vaddr, align 16
ret <2 x double>%res
}
@@ -424,7 +424,7 @@ define void @test_128_11(i8 * %addr, <4 x float> %data) {
; CHECK: ret
define <4 x float> @test_128_12(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x float>*
- %res = load <4 x float>* %vaddr, align 16
+ %res = load <4 x float>, <4 x float>* %vaddr, align 16
ret <4 x float>%res
}
@@ -442,7 +442,7 @@ define void @test_128_13(i8 * %addr, <2 x double> %data) {
; CHECK: ret
define <2 x double> @test_128_14(i8 * %addr) {
%vaddr = bitcast i8* %addr to <2 x double>*
- %res = load <2 x double>* %vaddr, align 1
+ %res = load <2 x double>, <2 x double>* %vaddr, align 1
ret <2 x double>%res
}
@@ -460,7 +460,7 @@ define void @test_128_15(i8 * %addr, <4 x float> %data) {
; CHECK: ret
define <4 x float> @test_128_16(i8 * %addr) {
%vaddr = bitcast i8* %addr to <4 x float>*
- %res = load <4 x float>* %vaddr, align 1
+ %res = load <4 x float>, <4 x float>* %vaddr, align 1
ret <4 x float>%res
}
@@ -470,7 +470,7 @@ define <4 x float> @test_128_16(i8 * %addr) {
define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
- %r = load <4 x i32>* %vaddr, align 16
+ %r = load <4 x i32>, <4 x i32>* %vaddr, align 16
%res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old
ret <4 x i32>%res
}
@@ -481,7 +481,7 @@ define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
- %r = load <4 x i32>* %vaddr, align 1
+ %r = load <4 x i32>, <4 x i32>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old
ret <4 x i32>%res
}
@@ -492,7 +492,7 @@ define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
- %r = load <4 x i32>* %vaddr, align 16
+ %r = load <4 x i32>, <4 x i32>* %vaddr, align 16
%res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer
ret <4 x i32>%res
}
@@ -503,7 +503,7 @@ define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) {
define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
- %r = load <4 x i32>* %vaddr, align 1
+ %r = load <4 x i32>, <4 x i32>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer
ret <4 x i32>%res
}
@@ -514,7 +514,7 @@ define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) {
define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
- %r = load <2 x i64>* %vaddr, align 16
+ %r = load <2 x i64>, <2 x i64>* %vaddr, align 16
%res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old
ret <2 x i64>%res
}
@@ -525,7 +525,7 @@ define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
- %r = load <2 x i64>* %vaddr, align 1
+ %r = load <2 x i64>, <2 x i64>* %vaddr, align 1
%res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old
ret <2 x i64>%res
}
@@ -536,7 +536,7 @@ define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
- %r = load <2 x i64>* %vaddr, align 16
+ %r = load <2 x i64>, <2 x i64>* %vaddr, align 16
%res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer
ret <2 x i64>%res
}
@@ -547,7 +547,7 @@ define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) {
define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
- %r = load <2 x i64>* %vaddr, align 1
+ %r = load <2 x i64>, <2 x i64>* %vaddr, align 1
%res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer
ret <2 x i64>%res
}
@@ -558,7 +558,7 @@ define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) {
define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
- %r = load <4 x float>* %vaddr, align 16
+ %r = load <4 x float>, <4 x float>* %vaddr, align 16
%res = select <4 x i1> %mask, <4 x float> %r, <4 x float> %old
ret <4 x float>%res
}
@@ -569,7 +569,7 @@ define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1)
define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
- %r = load <4 x float>* %vaddr, align 1
+ %r = load <4 x float>, <4 x float>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x float> %r, <4 x float> %old
ret <4 x float>%res
}
@@ -580,7 +580,7 @@ define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1)
define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
- %r = load <4 x float>* %vaddr, align 16
+ %r = load <4 x float>, <4 x float>* %vaddr, align 16
%res = select <4 x i1> %mask, <4 x float> %r, <4 x float> zeroinitializer
ret <4 x float>%res
}
@@ -591,7 +591,7 @@ define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) {
define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) {
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
- %r = load <4 x float>* %vaddr, align 1
+ %r = load <4 x float>, <4 x float>* %vaddr, align 1
%res = select <4 x i1> %mask, <4 x float> %r, <4 x float> zeroinitializer
ret <4 x float>%res
}
@@ -602,7 +602,7 @@ define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) {
define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
- %r = load <2 x double>* %vaddr, align 16
+ %r = load <2 x double>, <2 x double>* %vaddr, align 16
%res = select <2 x i1> %mask, <2 x double> %r, <2 x double> %old
ret <2 x double>%res
}
@@ -613,7 +613,7 @@ define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1
define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
- %r = load <2 x double>* %vaddr, align 1
+ %r = load <2 x double>, <2 x double>* %vaddr, align 1
%res = select <2 x i1> %mask, <2 x double> %r, <2 x double> %old
ret <2 x double>%res
}
@@ -624,7 +624,7 @@ define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1
define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
- %r = load <2 x double>* %vaddr, align 16
+ %r = load <2 x double>, <2 x double>* %vaddr, align 16
%res = select <2 x i1> %mask, <2 x double> %r, <2 x double> zeroinitializer
ret <2 x double>%res
}
@@ -635,7 +635,7 @@ define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) {
define <2 x double> @test_128_32(i8 * %addr, <2 x i64> %mask1) {
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
- %r = load <2 x double>* %vaddr, align 1
+ %r = load <2 x double>, <2 x double>* %vaddr, align 1
%res = select <2 x i1> %mask, <2 x double> %r, <2 x double> zeroinitializer
ret <2 x double>%res
}
diff --git a/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
index b6b508559ca..aed8cb1cf55 100644
--- a/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -45,7 +45,7 @@ define <4 x i64> @test256_4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwind {
- %y = load <8 x i32>* %yp, align 4
+ %y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
ret <8 x i32> %max
@@ -56,7 +56,7 @@ define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwin
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
- %y = load <8 x i32>* %y.ptr, align 4
+ %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sgt <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
ret <8 x i32> %max
@@ -67,7 +67,7 @@ define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
- %y = load <8 x i32>* %y.ptr, align 4
+ %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sle <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
ret <8 x i32> %max
@@ -78,7 +78,7 @@ define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nounwind {
- %y = load <8 x i32>* %y.ptr, align 4
+ %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp ule <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
ret <8 x i32> %max
@@ -114,7 +114,7 @@ define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64
; CHECK: ret
define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4 x i64> %y1) nounwind {
%mask1 = icmp sgt <4 x i64> %x1, %y1
- %y = load <4 x i64>* %y.ptr, align 4
+ %y = load <4 x i64>, <4 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <4 x i64> %x, %y
%mask = select <4 x i1> %mask0, <4 x i1> %mask1, <4 x i1> zeroinitializer
%max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %x1
@@ -127,7 +127,7 @@ define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4
; CHECK: ret
define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8 x i32> %y1) nounwind {
%mask1 = icmp sge <8 x i32> %x1, %y1
- %y = load <8 x i32>* %y.ptr, align 4
+ %y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask0 = icmp ule <8 x i32> %x, %y
%mask = select <8 x i1> %mask0, <8 x i1> %mask1, <8 x i1> zeroinitializer
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %x1
@@ -139,7 +139,7 @@ define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8
; CHECK: vmovdqa64
; CHECK: ret
define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind {
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
%y = shufflevector <4 x i64> %y.0, <4 x i64> undef, <4 x i32> zeroinitializer
%mask = icmp eq <4 x i64> %x, %y
@@ -152,7 +152,7 @@ define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind
; CHECK: vmovdqa32
; CHECK: ret
define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind {
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
%y = shufflevector <8 x i32> %y.0, <8 x i32> undef, <8 x i32> zeroinitializer
%mask = icmp sle <8 x i32> %x, %y
@@ -166,7 +166,7 @@ define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind
; CHECK: ret
define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32> %y1) nounwind {
%mask1 = icmp sge <8 x i32> %x1, %y1
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
%y = shufflevector <8 x i32> %y.0, <8 x i32> undef, <8 x i32> zeroinitializer
%mask0 = icmp sgt <8 x i32> %x, %y
@@ -181,7 +181,7 @@ define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32
; CHECK: ret
define <4 x i64> @test256_16(<4 x i64> %x, i64* %yb.ptr, <4 x i64> %x1, <4 x i64> %y1) nounwind {
%mask1 = icmp sge <4 x i64> %x1, %y1
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
%y = shufflevector <4 x i64> %y.0, <4 x i64> undef, <4 x i32> zeroinitializer
%mask0 = icmp sgt <4 x i64> %x, %y
@@ -235,7 +235,7 @@ define <2 x i64> @test128_4(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwind {
- %y = load <4 x i32>* %yp, align 4
+ %y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
ret <4 x i32> %max
@@ -246,7 +246,7 @@ define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwin
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
- %y = load <4 x i32>* %y.ptr, align 4
+ %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sgt <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
ret <4 x i32> %max
@@ -257,7 +257,7 @@ define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
- %y = load <4 x i32>* %y.ptr, align 4
+ %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sle <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
ret <4 x i32> %max
@@ -268,7 +268,7 @@ define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nounwind {
- %y = load <4 x i32>* %y.ptr, align 4
+ %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ule <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
ret <4 x i32> %max
@@ -304,7 +304,7 @@ define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64
; CHECK: ret
define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2 x i64> %y1) nounwind {
%mask1 = icmp sgt <2 x i64> %x1, %y1
- %y = load <2 x i64>* %y.ptr, align 4
+ %y = load <2 x i64>, <2 x i64>* %y.ptr, align 4
%mask0 = icmp sgt <2 x i64> %x, %y
%mask = select <2 x i1> %mask0, <2 x i1> %mask1, <2 x i1> zeroinitializer
%max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %x1
@@ -317,7 +317,7 @@ define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2
; CHECK: ret
define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4 x i32> %y1) nounwind {
%mask1 = icmp sge <4 x i32> %x1, %y1
- %y = load <4 x i32>* %y.ptr, align 4
+ %y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask0 = icmp ule <4 x i32> %x, %y
%mask = select <4 x i1> %mask0, <4 x i1> %mask1, <4 x i1> zeroinitializer
%max = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %x1
@@ -329,7 +329,7 @@ define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4
; CHECK: vmovdqa64
; CHECK: ret
define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind {
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
%y = insertelement <2 x i64> %y.0, i64 %yb, i32 1
%mask = icmp eq <2 x i64> %x, %y
@@ -342,7 +342,7 @@ define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind
; CHECK: vmovdqa32
; CHECK: ret
define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind {
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
%y = shufflevector <4 x i32> %y.0, <4 x i32> undef, <4 x i32> zeroinitializer
%mask = icmp sle <4 x i32> %x, %y
@@ -356,7 +356,7 @@ define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind
; CHECK: ret
define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32> %y1) nounwind {
%mask1 = icmp sge <4 x i32> %x1, %y1
- %yb = load i32* %yb.ptr, align 4
+ %yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
%y = shufflevector <4 x i32> %y.0, <4 x i32> undef, <4 x i32> zeroinitializer
%mask0 = icmp sgt <4 x i32> %x, %y
@@ -371,7 +371,7 @@ define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32
; CHECK: ret
define <2 x i64> @test128_16(<2 x i64> %x, i64* %yb.ptr, <2 x i64> %x1, <2 x i64> %y1) nounwind {
%mask1 = icmp sge <2 x i64> %x1, %y1
- %yb = load i64* %yb.ptr, align 4
+ %yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
%y = insertelement <2 x i64> %y.0, i64 %yb, i32 1
%mask0 = icmp sgt <2 x i64> %x, %y
diff --git a/llvm/test/CodeGen/X86/bitcast-mmx.ll b/llvm/test/CodeGen/X86/bitcast-mmx.ll
index de1cb5ad830..4107f3914f8 100644
--- a/llvm/test/CodeGen/X86/bitcast-mmx.ll
+++ b/llvm/test/CodeGen/X86/bitcast-mmx.ll
@@ -64,8 +64,8 @@ define i64 @t3(<1 x i64>* %y, i32* %n) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %y to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %n, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %n, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
diff --git a/llvm/test/CodeGen/X86/block-placement.ll b/llvm/test/CodeGen/X86/block-placement.ll
index eaba537709b..e0276e42d4d 100644
--- a/llvm/test/CodeGen/X86/block-placement.ll
+++ b/llvm/test/CodeGen/X86/block-placement.ll
@@ -25,7 +25,7 @@ define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
entry:
%gep1 = getelementptr i32, i32* %a, i32 1
- %val1 = load i32* %gep1
+ %val1 = load i32, i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then1, label %else1, !prof !0
@@ -35,7 +35,7 @@ then1:
else1:
%gep2 = getelementptr i32, i32* %a, i32 2
- %val2 = load i32* %gep2
+ %val2 = load i32, i32* %gep2
%cond2 = icmp ugt i32 %val2, 2
br i1 %cond2, label %then2, label %else2, !prof !0
@@ -45,7 +45,7 @@ then2:
else2:
%gep3 = getelementptr i32, i32* %a, i32 3
- %val3 = load i32* %gep3
+ %val3 = load i32, i32* %gep3
%cond3 = icmp ugt i32 %val3, 3
br i1 %cond3, label %then3, label %else3, !prof !0
@@ -55,7 +55,7 @@ then3:
else3:
%gep4 = getelementptr i32, i32* %a, i32 4
- %val4 = load i32* %gep4
+ %val4 = load i32, i32* %gep4
%cond4 = icmp ugt i32 %val4, 4
br i1 %cond4, label %then4, label %else4, !prof !0
@@ -65,7 +65,7 @@ then4:
else4:
%gep5 = getelementptr i32, i32* %a, i32 3
- %val5 = load i32* %gep5
+ %val5 = load i32, i32* %gep5
%cond5 = icmp ugt i32 %val5, 3
br i1 %cond5, label %then5, label %exit, !prof !0
@@ -114,7 +114,7 @@ unlikely2:
body3:
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
@@ -167,7 +167,7 @@ bail3:
body4:
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
@@ -198,7 +198,7 @@ body0:
body1:
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%bailcond1 = icmp eq i32 %sum, 42
br label %body0
@@ -223,7 +223,7 @@ body0:
%iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
%base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%bailcond1 = icmp eq i32 %sum, 42
br i1 %bailcond1, label %exit, label %body1
@@ -253,7 +253,7 @@ body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
@@ -280,7 +280,7 @@ entry:
loop.body.1:
%iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %bidx = load i32* %arrayidx
+ %bidx = load i32, i32* %arrayidx
br label %inner.loop.body
inner.loop.body:
@@ -288,7 +288,7 @@ inner.loop.body:
%base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
%scaled_idx = mul i32 %bidx, %iv
%inner.arrayidx = getelementptr inbounds i32, i32* %b, i32 %scaled_idx
- %0 = load i32* %inner.arrayidx
+ %0 = load i32, i32* %inner.arrayidx
%sum = add nsw i32 %0, %base
%inner.next = add i32 %iv, 1
%inner.exitcond = icmp eq i32 %inner.next, %i
@@ -322,13 +322,13 @@ loop.body1:
br i1 undef, label %loop.body3, label %loop.body2
loop.body2:
- %ptr = load i32** undef, align 4
+ %ptr = load i32*, i32** undef, align 4
br label %loop.body3
loop.body3:
%myptr = phi i32* [ %ptr2, %loop.body5 ], [ %ptr, %loop.body2 ], [ undef, %loop.body1 ]
%bcmyptr = bitcast i32* %myptr to i32*
- %val = load i32* %bcmyptr, align 4
+ %val = load i32, i32* %bcmyptr, align 4
%comp = icmp eq i32 %val, 48
br i1 %comp, label %loop.body4, label %loop.body5
@@ -336,7 +336,7 @@ loop.body4:
br i1 undef, label %loop.header, label %loop.body5
loop.body5:
- %ptr2 = load i32** undef, align 4
+ %ptr2 = load i32*, i32** undef, align 4
br label %loop.body3
}
@@ -366,7 +366,7 @@ loop.header:
br i1 %comp0, label %bail, label %loop.body1
loop.body1:
- %val0 = load i32** undef, align 4
+ %val0 = load i32*, i32** undef, align 4
br i1 undef, label %loop.body2, label %loop.inner1.begin
loop.body2:
@@ -375,7 +375,7 @@ loop.body2:
loop.body3:
%ptr1 = getelementptr inbounds i32, i32* %val0, i32 0
%castptr1 = bitcast i32* %ptr1 to i32**
- %val1 = load i32** %castptr1, align 4
+ %val1 = load i32*, i32** %castptr1, align 4
br label %loop.inner1.begin
loop.inner1.begin:
@@ -387,7 +387,7 @@ loop.inner1.begin:
loop.inner1.end:
%ptr2 = getelementptr inbounds i32, i32* %valphi, i32 0
%castptr2 = bitcast i32* %ptr2 to i32**
- %val2 = load i32** %castptr2, align 4
+ %val2 = load i32*, i32** %castptr2, align 4
br label %loop.inner1.begin
loop.body4.dead:
@@ -486,7 +486,7 @@ entry:
br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
entry.if.then_crit_edge:
- %.pre14 = load i8* undef, align 1
+ %.pre14 = load i8, i8* undef, align 1
br label %if.then
lor.lhs.false:
@@ -616,7 +616,7 @@ body:
br label %loop2a
loop1:
- %next.load = load i32** undef
+ %next.load = load i32*, i32** undef
br i1 %comp.a, label %loop2a, label %loop2b
loop2a:
@@ -728,199 +728,199 @@ define void @many_unanalyzable_branches() {
entry:
br label %0
- %val0 = load volatile float* undef
+ %val0 = load volatile float, float* undef
%cmp0 = fcmp une float %val0, undef
br i1 %cmp0, label %1, label %0
- %val1 = load volatile float* undef
+ %val1 = load volatile float, float* undef
%cmp1 = fcmp une float %val1, undef
br i1 %cmp1, label %2, label %1
- %val2 = load volatile float* undef
+ %val2 = load volatile float, float* undef
%cmp2 = fcmp une float %val2, undef
br i1 %cmp2, label %3, label %2
- %val3 = load volatile float* undef
+ %val3 = load volatile float, float* undef
%cmp3 = fcmp une float %val3, undef
br i1 %cmp3, label %4, label %3
- %val4 = load volatile float* undef
+ %val4 = load volatile float, float* undef
%cmp4 = fcmp une float %val4, undef
br i1 %cmp4, label %5, label %4
- %val5 = load volatile float* undef
+ %val5 = load volatile float, float* undef
%cmp5 = fcmp une float %val5, undef
br i1 %cmp5, label %6, label %5
- %val6 = load volatile float* undef
+ %val6 = load volatile float, float* undef
%cmp6 = fcmp une float %val6, undef
br i1 %cmp6, label %7, label %6
- %val7 = load volatile float* undef
+ %val7 = load volatile float, float* undef
%cmp7 = fcmp une float %val7, undef
br i1 %cmp7, label %8, label %7
- %val8 = load volatile float* undef
+ %val8 = load volatile float, float* undef
%cmp8 = fcmp une float %val8, undef
br i1 %cmp8, label %9, label %8
- %val9 = load volatile float* undef
+ %val9 = load volatile float, float* undef
%cmp9 = fcmp une float %val9, undef
br i1 %cmp9, label %10, label %9
- %val10 = load volatile float* undef
+ %val10 = load volatile float, float* undef
%cmp10 = fcmp une float %val10, undef
br i1 %cmp10, label %11, label %10
- %val11 = load volatile float* undef
+ %val11 = load volatile float, float* undef
%cmp11 = fcmp une float %val11, undef
br i1 %cmp11, label %12, label %11
- %val12 = load volatile float* undef
+ %val12 = load volatile float, float* undef
%cmp12 = fcmp une float %val12, undef
br i1 %cmp12, label %13, label %12
- %val13 = load volatile float* undef
+ %val13 = load volatile float, float* undef
%cmp13 = fcmp une float %val13, undef
br i1 %cmp13, label %14, label %13
- %val14 = load volatile float* undef
+ %val14 = load volatile float, float* undef
%cmp14 = fcmp une float %val14, undef
br i1 %cmp14, label %15, label %14
- %val15 = load volatile float* undef
+ %val15 = load volatile float, float* undef
%cmp15 = fcmp une float %val15, undef
br i1 %cmp15, label %16, label %15
- %val16 = load volatile float* undef
+ %val16 = load volatile float, float* undef
%cmp16 = fcmp une float %val16, undef
br i1 %cmp16, label %17, label %16
- %val17 = load volatile float* undef
+ %val17 = load volatile float, float* undef
%cmp17 = fcmp une float %val17, undef
br i1 %cmp17, label %18, label %17
- %val18 = load volatile float* undef
+ %val18 = load volatile float, float* undef
%cmp18 = fcmp une float %val18, undef
br i1 %cmp18, label %19, label %18
- %val19 = load volatile float* undef
+ %val19 = load volatile float, float* undef
%cmp19 = fcmp une float %val19, undef
br i1 %cmp19, label %20, label %19
- %val20 = load volatile float* undef
+ %val20 = load volatile float, float* undef
%cmp20 = fcmp une float %val20, undef
br i1 %cmp20, label %21, label %20
- %val21 = load volatile float* undef
+ %val21 = load volatile float, float* undef
%cmp21 = fcmp une float %val21, undef
br i1 %cmp21, label %22, label %21
- %val22 = load volatile float* undef
+ %val22 = load volatile float, float* undef
%cmp22 = fcmp une float %val22, undef
br i1 %cmp22, label %23, label %22
- %val23 = load volatile float* undef
+ %val23 = load volatile float, float* undef
%cmp23 = fcmp une float %val23, undef
br i1 %cmp23, label %24, label %23
- %val24 = load volatile float* undef
+ %val24 = load volatile float, float* undef
%cmp24 = fcmp une float %val24, undef
br i1 %cmp24, label %25, label %24
- %val25 = load volatile float* undef
+ %val25 = load volatile float, float* undef
%cmp25 = fcmp une float %val25, undef
br i1 %cmp25, label %26, label %25
- %val26 = load volatile float* undef
+ %val26 = load volatile float, float* undef
%cmp26 = fcmp une float %val26, undef
br i1 %cmp26, label %27, label %26
- %val27 = load volatile float* undef
+ %val27 = load volatile float, float* undef
%cmp27 = fcmp une float %val27, undef
br i1 %cmp27, label %28, label %27
- %val28 = load volatile float* undef
+ %val28 = load volatile float, float* undef
%cmp28 = fcmp une float %val28, undef
br i1 %cmp28, label %29, label %28
- %val29 = load volatile float* undef
+ %val29 = load volatile float, float* undef
%cmp29 = fcmp une float %val29, undef
br i1 %cmp29, label %30, label %29
- %val30 = load volatile float* undef
+ %val30 = load volatile float, float* undef
%cmp30 = fcmp une float %val30, undef
br i1 %cmp30, label %31, label %30
- %val31 = load volatile float* undef
+ %val31 = load volatile float, float* undef
%cmp31 = fcmp une float %val31, undef
br i1 %cmp31, label %32, label %31
- %val32 = load volatile float* undef
+ %val32 = load volatile float, float* undef
%cmp32 = fcmp une float %val32, undef
br i1 %cmp32, label %33, label %32
- %val33 = load volatile float* undef
+ %val33 = load volatile float, float* undef
%cmp33 = fcmp une float %val33, undef
br i1 %cmp33, label %34, label %33
- %val34 = load volatile float* undef
+ %val34 = load volatile float, float* undef
%cmp34 = fcmp une float %val34, undef
br i1 %cmp34, label %35, label %34
- %val35 = load volatile float* undef
+ %val35 = load volatile float, float* undef
%cmp35 = fcmp une float %val35, undef
br i1 %cmp35, label %36, label %35
- %val36 = load volatile float* undef
+ %val36 = load volatile float, float* undef
%cmp36 = fcmp une float %val36, undef
br i1 %cmp36, label %37, label %36
- %val37 = load volatile float* undef
+ %val37 = load volatile float, float* undef
%cmp37 = fcmp une float %val37, undef
br i1 %cmp37, label %38, label %37
- %val38 = load volatile float* undef
+ %val38 = load volatile float, float* undef
%cmp38 = fcmp une float %val38, undef
br i1 %cmp38, label %39, label %38
- %val39 = load volatile float* undef
+ %val39 = load volatile float, float* undef
%cmp39 = fcmp une float %val39, undef
br i1 %cmp39, label %40, label %39
- %val40 = load volatile float* undef
+ %val40 = load volatile float, float* undef
%cmp40 = fcmp une float %val40, undef
br i1 %cmp40, label %41, label %40
- %val41 = load volatile float* undef
+ %val41 = load volatile float, float* undef
%cmp41 = fcmp une float %val41, undef
br i1 %cmp41, label %42, label %41
- %val42 = load volatile float* undef
+ %val42 = load volatile float, float* undef
%cmp42 = fcmp une float %val42, undef
br i1 %cmp42, label %43, label %42
- %val43 = load volatile float* undef
+ %val43 = load volatile float, float* undef
%cmp43 = fcmp une float %val43, undef
br i1 %cmp43, label %44, label %43
- %val44 = load volatile float* undef
+ %val44 = load volatile float, float* undef
%cmp44 = fcmp une float %val44, undef
br i1 %cmp44, label %45, label %44
- %val45 = load volatile float* undef
+ %val45 = load volatile float, float* undef
%cmp45 = fcmp une float %val45, undef
br i1 %cmp45, label %46, label %45
- %val46 = load volatile float* undef
+ %val46 = load volatile float, float* undef
%cmp46 = fcmp une float %val46, undef
br i1 %cmp46, label %47, label %46
- %val47 = load volatile float* undef
+ %val47 = load volatile float, float* undef
%cmp47 = fcmp une float %val47, undef
br i1 %cmp47, label %48, label %47
- %val48 = load volatile float* undef
+ %val48 = load volatile float, float* undef
%cmp48 = fcmp une float %val48, undef
br i1 %cmp48, label %49, label %48
- %val49 = load volatile float* undef
+ %val49 = load volatile float, float* undef
%cmp49 = fcmp une float %val49, undef
br i1 %cmp49, label %50, label %49
- %val50 = load volatile float* undef
+ %val50 = load volatile float, float* undef
%cmp50 = fcmp une float %val50, undef
br i1 %cmp50, label %51, label %50
- %val51 = load volatile float* undef
+ %val51 = load volatile float, float* undef
%cmp51 = fcmp une float %val51, undef
br i1 %cmp51, label %52, label %51
- %val52 = load volatile float* undef
+ %val52 = load volatile float, float* undef
%cmp52 = fcmp une float %val52, undef
br i1 %cmp52, label %53, label %52
- %val53 = load volatile float* undef
+ %val53 = load volatile float, float* undef
%cmp53 = fcmp une float %val53, undef
br i1 %cmp53, label %54, label %53
- %val54 = load volatile float* undef
+ %val54 = load volatile float, float* undef
%cmp54 = fcmp une float %val54, undef
br i1 %cmp54, label %55, label %54
- %val55 = load volatile float* undef
+ %val55 = load volatile float, float* undef
%cmp55 = fcmp une float %val55, undef
br i1 %cmp55, label %56, label %55
- %val56 = load volatile float* undef
+ %val56 = load volatile float, float* undef
%cmp56 = fcmp une float %val56, undef
br i1 %cmp56, label %57, label %56
- %val57 = load volatile float* undef
+ %val57 = load volatile float, float* undef
%cmp57 = fcmp une float %val57, undef
br i1 %cmp57, label %58, label %57
- %val58 = load volatile float* undef
+ %val58 = load volatile float, float* undef
%cmp58 = fcmp une float %val58, undef
br i1 %cmp58, label %59, label %58
- %val59 = load volatile float* undef
+ %val59 = load volatile float, float* undef
%cmp59 = fcmp une float %val59, undef
br i1 %cmp59, label %60, label %59
- %val60 = load volatile float* undef
+ %val60 = load volatile float, float* undef
%cmp60 = fcmp une float %val60, undef
br i1 %cmp60, label %61, label %60
- %val61 = load volatile float* undef
+ %val61 = load volatile float, float* undef
%cmp61 = fcmp une float %val61, undef
br i1 %cmp61, label %62, label %61
- %val62 = load volatile float* undef
+ %val62 = load volatile float, float* undef
%cmp62 = fcmp une float %val62, undef
br i1 %cmp62, label %63, label %62
- %val63 = load volatile float* undef
+ %val63 = load volatile float, float* undef
%cmp63 = fcmp une float %val63, undef
br i1 %cmp63, label %64, label %63
- %val64 = load volatile float* undef
+ %val64 = load volatile float, float* undef
%cmp64 = fcmp une float %val64, undef
br i1 %cmp64, label %65, label %64
@@ -979,14 +979,14 @@ if.then:
%dec = add nsw i32 %l.0, -1
%idxprom = sext i32 %dec to i64
%arrayidx = getelementptr inbounds double, double* %ra, i64 %idxprom
- %0 = load double* %arrayidx, align 8
+ %0 = load double, double* %arrayidx, align 8
br label %if.end10
if.else:
%idxprom1 = sext i32 %ir.0 to i64
%arrayidx2 = getelementptr inbounds double, double* %ra, i64 %idxprom1
- %1 = load double* %arrayidx2, align 8
- %2 = load double* %arrayidx3, align 8
+ %1 = load double, double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx3, align 8
store double %2, double* %arrayidx2, align 8
%dec6 = add nsw i32 %ir.0, -1
%cmp7 = icmp eq i32 %dec6, 1
@@ -1020,11 +1020,11 @@ while.body:
land.lhs.true:
%idxprom13 = sext i32 %j.0 to i64
%arrayidx14 = getelementptr inbounds double, double* %ra, i64 %idxprom13
- %3 = load double* %arrayidx14, align 8
+ %3 = load double, double* %arrayidx14, align 8
%add15 = add nsw i32 %j.0, 1
%idxprom16 = sext i32 %add15 to i64
%arrayidx17 = getelementptr inbounds double, double* %ra, i64 %idxprom16
- %4 = load double* %arrayidx17, align 8
+ %4 = load double, double* %arrayidx17, align 8
%cmp18 = fcmp olt double %3, %4
br i1 %cmp18, label %if.then19, label %if.end20
@@ -1035,7 +1035,7 @@ if.end20:
%j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
%idxprom21 = sext i32 %j.1 to i64
%arrayidx22 = getelementptr inbounds double, double* %ra, i64 %idxprom21
- %5 = load double* %arrayidx22, align 8
+ %5 = load double, double* %arrayidx22, align 8
%cmp23 = fcmp olt double %rra.0, %5
br i1 %cmp23, label %if.then24, label %while.cond
@@ -1066,7 +1066,7 @@ define i32 @test_cold_calls(i32* %a) {
entry:
%gep1 = getelementptr i32, i32* %a, i32 1
- %val1 = load i32* %gep1
+ %val1 = load i32, i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then, label %else
@@ -1076,7 +1076,7 @@ then:
else:
%gep2 = getelementptr i32, i32* %a, i32 2
- %val2 = load i32* %gep2
+ %val2 = load i32, i32* %gep2
br label %exit
exit:
diff --git a/llvm/test/CodeGen/X86/bmi.ll b/llvm/test/CodeGen/X86/bmi.ll
index a70720926de..ccc4533a32b 100644
--- a/llvm/test/CodeGen/X86/bmi.ll
+++ b/llvm/test/CodeGen/X86/bmi.ll
@@ -27,7 +27,7 @@ define i32 @t3(i32 %x) nounwind {
}
define i32 @tzcnt32_load(i32* %x) nounwind {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = tail call i32 @llvm.cttz.i32(i32 %x1, i1 false )
ret i32 %tmp
; CHECK-LABEL: tzcnt32_load:
@@ -78,7 +78,7 @@ define i32 @andn32(i32 %x, i32 %y) nounwind readnone {
}
define i32 @andn32_load(i32 %x, i32* %y) nounwind readnone {
- %y1 = load i32* %y
+ %y1 = load i32, i32* %y
%tmp1 = xor i32 %x, -1
%tmp2 = and i32 %y1, %tmp1
ret i32 %tmp2
@@ -102,7 +102,7 @@ define i32 @bextr32(i32 %x, i32 %y) nounwind readnone {
}
define i32 @bextr32_load(i32* %x, i32 %y) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = tail call i32 @llvm.x86.bmi.bextr.32(i32 %x1, i32 %y)
ret i32 %tmp
; CHECK-LABEL: bextr32_load:
@@ -120,7 +120,7 @@ define i32 @bextr32b(i32 %x) nounwind uwtable readnone ssp {
}
define i32 @bextr32b_load(i32* %x) nounwind uwtable readnone ssp {
- %1 = load i32* %x
+ %1 = load i32, i32* %x
%2 = lshr i32 %1, 4
%3 = and i32 %2, 4095
ret i32 %3
@@ -153,7 +153,7 @@ define i32 @bzhi32(i32 %x, i32 %y) nounwind readnone {
}
define i32 @bzhi32_load(i32* %x, i32 %y) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %x1, i32 %y)
ret i32 %tmp
; CHECK-LABEL: bzhi32_load:
@@ -184,7 +184,7 @@ entry:
define i32 @bzhi32b_load(i32* %w, i8 zeroext %index) #0 {
entry:
- %x = load i32* %w
+ %x = load i32, i32* %w
%conv = zext i8 %index to i32
%shl = shl i32 1, %conv
%sub = add nsw i32 %shl, -1
@@ -242,7 +242,7 @@ define i32 @blsi32(i32 %x) nounwind readnone {
}
define i32 @blsi32_load(i32* %x) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = sub i32 0, %x1
%tmp2 = and i32 %x1, %tmp
ret i32 %tmp2
@@ -267,7 +267,7 @@ define i32 @blsmsk32(i32 %x) nounwind readnone {
}
define i32 @blsmsk32_load(i32* %x) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = sub i32 %x1, 1
%tmp2 = xor i32 %x1, %tmp
ret i32 %tmp2
@@ -292,7 +292,7 @@ define i32 @blsr32(i32 %x) nounwind readnone {
}
define i32 @blsr32_load(i32* %x) nounwind readnone {
- %x1 = load i32* %x
+ %x1 = load i32, i32* %x
%tmp = sub i32 %x1, 1
%tmp2 = and i32 %x1, %tmp
ret i32 %tmp2
@@ -316,7 +316,7 @@ define i32 @pdep32(i32 %x, i32 %y) nounwind readnone {
}
define i32 @pdep32_load(i32 %x, i32* %y) nounwind readnone {
- %y1 = load i32* %y
+ %y1 = load i32, i32* %y
%tmp = tail call i32 @llvm.x86.bmi.pdep.32(i32 %x, i32 %y1)
ret i32 %tmp
; CHECK-LABEL: pdep32_load:
@@ -342,7 +342,7 @@ define i32 @pext32(i32 %x, i32 %y) nounwind readnone {
}
define i32 @pext32_load(i32 %x, i32* %y) nounwind readnone {
- %y1 = load i32* %y
+ %y1 = load i32, i32* %y
%tmp = tail call i32 @llvm.x86.bmi.pext.32(i32 %x, i32 %y1)
ret i32 %tmp
; CHECK-LABEL: pext32_load:
diff --git a/llvm/test/CodeGen/X86/break-anti-dependencies.ll b/llvm/test/CodeGen/X86/break-anti-dependencies.ll
index 614d0adc727..c54ac108819 100644
--- a/llvm/test/CodeGen/X86/break-anti-dependencies.ll
+++ b/llvm/test/CodeGen/X86/break-anti-dependencies.ll
@@ -10,14 +10,14 @@
define void @goo(double* %r, double* %p, double* %q) nounwind {
entry:
- %0 = load double* %p, align 8
+ %0 = load double, double* %p, align 8
%1 = fadd double %0, 1.100000e+00
%2 = fmul double %1, 1.200000e+00
%3 = fadd double %2, 1.300000e+00
%4 = fmul double %3, 1.400000e+00
%5 = fadd double %4, 1.500000e+00
%6 = fptosi double %5 to i32
- %7 = load double* %r, align 8
+ %7 = load double, double* %r, align 8
%8 = fadd double %7, 7.100000e+00
%9 = fmul double %8, 7.200000e+00
%10 = fadd double %9, 7.300000e+00
diff --git a/llvm/test/CodeGen/X86/break-false-dep.ll b/llvm/test/CodeGen/X86/break-false-dep.ll
index 03a5f201ee4..699de22d5b5 100644
--- a/llvm/test/CodeGen/X86/break-false-dep.ll
+++ b/llvm/test/CodeGen/X86/break-false-dep.ll
@@ -8,7 +8,7 @@ entry:
; SSE: movss ([[A0:%rdi|%rcx]]), %xmm0
; SSE: cvtss2sd %xmm0, %xmm0
- %0 = load float* %x, align 4
+ %0 = load float, float* %x, align 4
%1 = fpext float %0 to double
ret double %1
}
@@ -17,7 +17,7 @@ define float @t2(double* nocapture %x) nounwind readonly ssp optsize {
entry:
; SSE-LABEL: t2:
; SSE: cvtsd2ss ([[A0]]), %xmm0
- %0 = load double* %x, align 8
+ %0 = load double, double* %x, align 8
%1 = fptrunc double %0 to float
ret float %1
}
@@ -27,7 +27,7 @@ entry:
; SSE-LABEL: squirtf:
; SSE: movss ([[A0]]), %xmm0
; SSE: sqrtss %xmm0, %xmm0
- %z = load float* %x
+ %z = load float, float* %x
%t = call float @llvm.sqrt.f32(float %z)
ret float %t
}
@@ -37,7 +37,7 @@ entry:
; SSE-LABEL: squirt:
; SSE: movsd ([[A0]]), %xmm0
; SSE: sqrtsd %xmm0, %xmm0
- %z = load double* %x
+ %z = load double, double* %x
%t = call double @llvm.sqrt.f64(double %z)
ret double %t
}
@@ -46,7 +46,7 @@ define float @squirtf_size(float* %x) nounwind optsize {
entry:
; SSE-LABEL: squirtf_size:
; SSE: sqrtss ([[A0]]), %xmm0
- %z = load float* %x
+ %z = load float, float* %x
%t = call float @llvm.sqrt.f32(float %z)
ret float %t
}
@@ -55,7 +55,7 @@ define double @squirt_size(double* %x) nounwind optsize {
entry:
; SSE-LABEL: squirt_size:
; SSE: sqrtsd ([[A0]]), %xmm0
- %z = load double* %x
+ %z = load double, double* %x
%t = call double @llvm.sqrt.f64(double %z)
ret double %t
}
@@ -120,13 +120,13 @@ for.end: ; preds = %for.body, %entry
; SSE: cvtsi2sdq %{{r[0-9a-x]+}}, %[[REG]]
define i64 @loopdep2(i64* nocapture %x, double* nocapture %y) nounwind {
entry:
- %vx = load i64* %x
+ %vx = load i64, i64* %x
br label %loop
loop:
%i = phi i64 [ 1, %entry ], [ %inc, %loop ]
%s1 = phi i64 [ %vx, %entry ], [ %s2, %loop ]
%fi = sitofp i64 %i to double
- %vy = load double* %y
+ %vy = load double, double* %y
%fipy = fadd double %fi, %vy
%iipy = fptosi double %fipy to i64
%s2 = add i64 %s1, %iipy
@@ -159,16 +159,16 @@ for.cond1.preheader: ; preds = %for.inc14, %entry
for.body3:
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
%arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @v, i64 0, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%conv = sitofp i32 %0 to double
%arrayidx5 = getelementptr inbounds [1024 x double], [1024 x double]* @x, i64 0, i64 %indvars.iv
- %1 = load double* %arrayidx5, align 8
+ %1 = load double, double* %arrayidx5, align 8
%mul = fmul double %conv, %1
%arrayidx7 = getelementptr inbounds [1024 x double], [1024 x double]* @y, i64 0, i64 %indvars.iv
- %2 = load double* %arrayidx7, align 8
+ %2 = load double, double* %arrayidx7, align 8
%mul8 = fmul double %mul, %2
%arrayidx10 = getelementptr inbounds [1024 x double], [1024 x double]* @z, i64 0, i64 %indvars.iv
- %3 = load double* %arrayidx10, align 8
+ %3 = load double, double* %arrayidx10, align 8
%mul11 = fmul double %mul8, %3
%arrayidx13 = getelementptr inbounds [1024 x double], [1024 x double]* @w, i64 0, i64 %indvars.iv
store double %mul11, double* %arrayidx13, align 8
diff --git a/llvm/test/CodeGen/X86/bswap.ll b/llvm/test/CodeGen/X86/bswap.ll
index e6a456c39dd..48dc18e0ac1 100644
--- a/llvm/test/CodeGen/X86/bswap.ll
+++ b/llvm/test/CodeGen/X86/bswap.ll
@@ -91,7 +91,7 @@ define i64 @not_bswap() {
; CHECK64-LABEL: not_bswap:
; CHECK64-NOT: bswapq
; CHECK64: ret
- %init = load i16* @var16
+ %init = load i16, i16* @var16
%big = zext i16 %init to i64
%hishifted = lshr i64 %big, 8
@@ -115,7 +115,7 @@ define i64 @not_useful_bswap() {
; CHECK64-NOT: bswapq
; CHECK64: ret
- %init = load i8* @var8
+ %init = load i8, i8* @var8
%big = zext i8 %init to i64
%hishifted = lshr i64 %big, 8
@@ -140,7 +140,7 @@ define i64 @finally_useful_bswap() {
; CHECK64: shrq $48, [[REG]]
; CHECK64: ret
- %init = load i16* @var16
+ %init = load i16, i16* @var16
%big = zext i16 %init to i64
%hishifted = lshr i64 %big, 8
diff --git a/llvm/test/CodeGen/X86/byval-align.ll b/llvm/test/CodeGen/X86/byval-align.ll
index 275bf2d68ea..fca53498387 100644
--- a/llvm/test/CodeGen/X86/byval-align.ll
+++ b/llvm/test/CodeGen/X86/byval-align.ll
@@ -14,14 +14,14 @@ entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%obj1 = bitcast %struct.S* %obj to i8* ; <i8*> [#uses=1]
store i8* %obj1, i8** %ptr, align 8
- %0 = load i8** %ptr, align 8 ; <i8*> [#uses=1]
+ %0 = load i8*, i8** %ptr, align 8 ; <i8*> [#uses=1]
%1 = ptrtoint i8* %0 to i64 ; <i64> [#uses=1]
store i64 %1, i64* %p, align 8
- %2 = load i8** %ptr, align 8 ; <i8*> [#uses=1]
+ %2 = load i8*, i8** %ptr, align 8 ; <i8*> [#uses=1]
%3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([10 x i8]* @.str, i64 0, i64 0), i8* %2) nounwind ; <i32> [#uses=0]
- %4 = load i64* %p, align 8 ; <i64> [#uses=1]
+ %4 = load i64, i64* %p, align 8 ; <i64> [#uses=1]
%5 = and i64 %4, 140737488355264 ; <i64> [#uses=1]
- %6 = load i64* %p, align 8 ; <i64> [#uses=1]
+ %6 = load i64, i64* %p, align 8 ; <i64> [#uses=1]
%7 = icmp ne i64 %5, %6 ; <i1> [#uses=1]
br i1 %7, label %bb, label %bb2
diff --git a/llvm/test/CodeGen/X86/byval.ll b/llvm/test/CodeGen/X86/byval.ll
index 7d85dbd9f97..f29511a54c4 100644
--- a/llvm/test/CodeGen/X86/byval.ll
+++ b/llvm/test/CodeGen/X86/byval.ll
@@ -12,6 +12,6 @@
define i64 @f(%struct.s* byval %a) {
entry:
%tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
- %tmp3 = load i64* %tmp2, align 8
+ %tmp3 = load i64, i64* %tmp2, align 8
ret i64 %tmp3
}
diff --git a/llvm/test/CodeGen/X86/call-push.ll b/llvm/test/CodeGen/X86/call-push.ll
index aab63c5a3e6..6bcb5d66561 100644
--- a/llvm/test/CodeGen/X86/call-push.ll
+++ b/llvm/test/CodeGen/X86/call-push.ll
@@ -12,7 +12,7 @@ define i32 @decode_byte(%struct.decode_t* %decode) nounwind {
entry:
%tmp2 = getelementptr %struct.decode_t, %struct.decode_t* %decode, i32 0, i32 4 ; <i16*> [#uses=1]
%tmp23 = bitcast i16* %tmp2 to i32* ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp23 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp23 ; <i32> [#uses=1]
%tmp514 = lshr i32 %tmp4, 24 ; <i32> [#uses=1]
%tmp56 = trunc i32 %tmp514 to i8 ; <i8> [#uses=1]
%tmp7 = icmp eq i8 %tmp56, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/cas.ll b/llvm/test/CodeGen/X86/cas.ll
index ec519c646f6..7807bb97f5b 100644
--- a/llvm/test/CodeGen/X86/cas.ll
+++ b/llvm/test/CodeGen/X86/cas.ll
@@ -24,14 +24,14 @@ entry:
store float* %p, float** %p.addr, align 8
store float* %expected, float** %expected.addr, align 8
store float %desired, float* %desired.addr, align 4
- %0 = load float** %expected.addr, align 8
- %1 = load float** %expected.addr, align 8
- %2 = load float* %1, align 4
- %3 = load float* %desired.addr, align 4
- %4 = load float** %p.addr, align 8
+ %0 = load float*, float** %expected.addr, align 8
+ %1 = load float*, float** %expected.addr, align 8
+ %2 = load float, float* %1, align 4
+ %3 = load float, float* %desired.addr, align 4
+ %4 = load float*, float** %p.addr, align 8
%5 = call i8 asm sideeffect "lock; cmpxchg $3, $4; mov $2, $1; sete $0", "={ax},=*rm,{ax},q,*m,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(float* %0, float %2, float %3, float* %4) nounwind
store i8 %5, i8* %success, align 1
- %6 = load i8* %success, align 1
+ %6 = load i8, i8* %success, align 1
%tobool = trunc i8 %6 to i1
ret i1 %tobool
}
@@ -52,16 +52,16 @@ entry:
store i8* %expected, i8** %expected.addr, align 8
%frombool = zext i1 %desired to i8
store i8 %frombool, i8* %desired.addr, align 1
- %0 = load i8** %expected.addr, align 8
- %1 = load i8** %expected.addr, align 8
- %2 = load i8* %1, align 1
+ %0 = load i8*, i8** %expected.addr, align 8
+ %1 = load i8*, i8** %expected.addr, align 8
+ %2 = load i8, i8* %1, align 1
%tobool = trunc i8 %2 to i1
- %3 = load i8* %desired.addr, align 1
+ %3 = load i8, i8* %desired.addr, align 1
%tobool1 = trunc i8 %3 to i1
- %4 = load i8** %p.addr, align 8
+ %4 = load i8*, i8** %p.addr, align 8
%5 = call i8 asm sideeffect "lock; cmpxchg $3, $4; mov $2, $1; sete $0", "={ax},=*rm,{ax},q,*m,~{memory},~{cc},~{dirflag},~{fpsr},~{flags}"(i8* %0, i1 %tobool, i1 %tobool1, i8* %4) nounwind
store i8 %5, i8* %success, align 1
- %6 = load i8* %success, align 1
+ %6 = load i8, i8* %success, align 1
%tobool2 = trunc i8 %6 to i1
ret i1 %tobool2
}
diff --git a/llvm/test/CodeGen/X86/chain_order.ll b/llvm/test/CodeGen/X86/chain_order.ll
index 3264c0edd72..442786a47ca 100644
--- a/llvm/test/CodeGen/X86/chain_order.ll
+++ b/llvm/test/CodeGen/X86/chain_order.ll
@@ -12,13 +12,13 @@
; A test from pifft (after SLP-vectorization) that fails when we drop the chain on newly merged loads.
define void @cftx020(double* nocapture %a) {
entry:
- %0 = load double* %a, align 8
+ %0 = load double, double* %a, align 8
%arrayidx1 = getelementptr inbounds double, double* %a, i64 2
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
%arrayidx2 = getelementptr inbounds double, double* %a, i64 1
- %2 = load double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %a, i64 3
- %3 = load double* %arrayidx3, align 8
+ %3 = load double, double* %arrayidx3, align 8
%4 = insertelement <2 x double> undef, double %0, i32 0
%5 = insertelement <2 x double> %4, double %3, i32 1
%6 = insertelement <2 x double> undef, double %1, i32 0
diff --git a/llvm/test/CodeGen/X86/change-compare-stride-1.ll b/llvm/test/CodeGen/X86/change-compare-stride-1.ll
index 5f0684e7097..c5480ba2b49 100644
--- a/llvm/test/CodeGen/X86/change-compare-stride-1.ll
+++ b/llvm/test/CodeGen/X86/change-compare-stride-1.ll
@@ -41,38 +41,38 @@ bb2: ; preds = %bb2, %bb2.outer
%4 = add i32 %3, -481 ; <i32> [#uses=1]
%5 = zext i32 %4 to i64 ; <i64> [#uses=1]
%6 = getelementptr i8, i8* %in, i64 %5 ; <i8*> [#uses=1]
- %7 = load i8* %6, align 1 ; <i8> [#uses=1]
+ %7 = load i8, i8* %6, align 1 ; <i8> [#uses=1]
%8 = add i32 %3, -480 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
%10 = getelementptr i8, i8* %in, i64 %9 ; <i8*> [#uses=1]
- %11 = load i8* %10, align 1 ; <i8> [#uses=1]
+ %11 = load i8, i8* %10, align 1 ; <i8> [#uses=1]
%12 = add i32 %3, -479 ; <i32> [#uses=1]
%13 = zext i32 %12 to i64 ; <i64> [#uses=1]
%14 = getelementptr i8, i8* %in, i64 %13 ; <i8*> [#uses=1]
- %15 = load i8* %14, align 1 ; <i8> [#uses=1]
+ %15 = load i8, i8* %14, align 1 ; <i8> [#uses=1]
%16 = add i32 %3, -1 ; <i32> [#uses=1]
%17 = zext i32 %16 to i64 ; <i64> [#uses=1]
%18 = getelementptr i8, i8* %in, i64 %17 ; <i8*> [#uses=1]
- %19 = load i8* %18, align 1 ; <i8> [#uses=1]
+ %19 = load i8, i8* %18, align 1 ; <i8> [#uses=1]
%20 = zext i32 %3 to i64 ; <i64> [#uses=1]
%21 = getelementptr i8, i8* %in, i64 %20 ; <i8*> [#uses=1]
- %22 = load i8* %21, align 1 ; <i8> [#uses=1]
+ %22 = load i8, i8* %21, align 1 ; <i8> [#uses=1]
%23 = add i32 %3, 1 ; <i32> [#uses=1]
%24 = zext i32 %23 to i64 ; <i64> [#uses=1]
%25 = getelementptr i8, i8* %in, i64 %24 ; <i8*> [#uses=1]
- %26 = load i8* %25, align 1 ; <i8> [#uses=1]
+ %26 = load i8, i8* %25, align 1 ; <i8> [#uses=1]
%27 = add i32 %3, 481 ; <i32> [#uses=1]
%28 = zext i32 %27 to i64 ; <i64> [#uses=1]
%29 = getelementptr i8, i8* %in, i64 %28 ; <i8*> [#uses=1]
- %30 = load i8* %29, align 1 ; <i8> [#uses=1]
+ %30 = load i8, i8* %29, align 1 ; <i8> [#uses=1]
%31 = add i32 %3, 480 ; <i32> [#uses=1]
%32 = zext i32 %31 to i64 ; <i64> [#uses=1]
%33 = getelementptr i8, i8* %in, i64 %32 ; <i8*> [#uses=1]
- %34 = load i8* %33, align 1 ; <i8> [#uses=1]
+ %34 = load i8, i8* %33, align 1 ; <i8> [#uses=1]
%35 = add i32 %3, 479 ; <i32> [#uses=1]
%36 = zext i32 %35 to i64 ; <i64> [#uses=1]
%37 = getelementptr i8, i8* %in, i64 %36 ; <i8*> [#uses=1]
- %38 = load i8* %37, align 1 ; <i8> [#uses=1]
+ %38 = load i8, i8* %37, align 1 ; <i8> [#uses=1]
%39 = add i8 %11, %7 ; <i8> [#uses=1]
%40 = add i8 %39, %15 ; <i8> [#uses=1]
%41 = add i8 %40, %19 ; <i8> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/clobber-fi0.ll b/llvm/test/CodeGen/X86/clobber-fi0.ll
index 4876c351a41..02f1a1616db 100644
--- a/llvm/test/CodeGen/X86/clobber-fi0.ll
+++ b/llvm/test/CodeGen/X86/clobber-fi0.ll
@@ -20,17 +20,17 @@ bb:
br label %bb4
bb4: ; preds = %bb4, %bb
- %tmp6 = load i32* %tmp2, align 4 ; [#uses=1 type=i32]
+ %tmp6 = load i32, i32* %tmp2, align 4 ; [#uses=1 type=i32]
%tmp7 = add i32 %tmp6, -1 ; [#uses=2 type=i32]
store i32 %tmp7, i32* %tmp2, align 4
%tmp8 = icmp eq i32 %tmp7, 0 ; [#uses=1 type=i1]
- %tmp9 = load i32* %tmp ; [#uses=1 type=i32]
+ %tmp9 = load i32, i32* %tmp ; [#uses=1 type=i32]
%tmp10 = add i32 %tmp9, -1 ; [#uses=1 type=i32]
store i32 %tmp10, i32* %tmp3
br i1 %tmp8, label %bb11, label %bb4
bb11: ; preds = %bb4
- %tmp12 = load i32* %tmp, align 4 ; [#uses=1 type=i32]
+ %tmp12 = load i32, i32* %tmp, align 4 ; [#uses=1 type=i32]
ret i32 %tmp12
}
diff --git a/llvm/test/CodeGen/X86/cmov-into-branch.ll b/llvm/test/CodeGen/X86/cmov-into-branch.ll
index cad8dd307b3..909440800a5 100644
--- a/llvm/test/CodeGen/X86/cmov-into-branch.ll
+++ b/llvm/test/CodeGen/X86/cmov-into-branch.ll
@@ -2,7 +2,7 @@
; cmp with single-use load, should not form cmov.
define i32 @test1(double %a, double* nocapture %b, i32 %x, i32 %y) {
- %load = load double* %b, align 8
+ %load = load double, double* %b, align 8
%cmp = fcmp olt double %load, %a
%cond = select i1 %cmp, i32 %x, i32 %y
ret i32 %cond
@@ -25,7 +25,7 @@ define i32 @test2(double %a, double %b, i32 %x, i32 %y) {
; Multiple uses of %a, should not form cmov.
define i32 @test3(i32 %a, i32* nocapture %b, i32 %x) {
- %load = load i32* %b, align 4
+ %load = load i32, i32* %b, align 4
%cmp = icmp ult i32 %load, %a
%cond = select i1 %cmp, i32 %a, i32 %x
ret i32 %cond
@@ -38,7 +38,7 @@ define i32 @test3(i32 %a, i32* nocapture %b, i32 %x) {
; Multiple uses of the load.
define i32 @test4(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
- %load = load i32* %b, align 4
+ %load = load i32, i32* %b, align 4
%cmp = icmp ult i32 %load, %a
%cond = select i1 %cmp, i32 %x, i32 %y
%add = add i32 %cond, %load
@@ -50,7 +50,7 @@ define i32 @test4(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
; Multiple uses of the cmp.
define i32 @test5(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
- %load = load i32* %b, align 4
+ %load = load i32, i32* %b, align 4
%cmp = icmp ult i32 %load, %a
%cmp1 = icmp ugt i32 %load, %a
%cond = select i1 %cmp1, i32 %a, i32 %y
diff --git a/llvm/test/CodeGen/X86/cmov.ll b/llvm/test/CodeGen/X86/cmov.ll
index 355c6b4165b..aad04fa74d4 100644
--- a/llvm/test/CodeGen/X86/cmov.ll
+++ b/llvm/test/CodeGen/X86/cmov.ll
@@ -12,7 +12,7 @@ entry:
%0 = lshr i32 %x, %n ; <i32> [#uses=1]
%1 = and i32 %0, 1 ; <i32> [#uses=1]
%toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- %v = load i32* %vp
+ %v = load i32, i32* %vp
%.0 = select i1 %toBool, i32 %v, i32 12 ; <i32> [#uses=1]
ret i32 %.0
}
@@ -27,7 +27,7 @@ entry:
%0 = lshr i32 %x, %n ; <i32> [#uses=1]
%1 = and i32 %0, 1 ; <i32> [#uses=1]
%toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
- %v = load i32* %vp
+ %v = load i32, i32* %vp
%.0 = select i1 %toBool, i32 12, i32 %v ; <i32> [#uses=1]
ret i32 %.0
}
@@ -71,7 +71,7 @@ define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
define i32 @test4() nounwind {
entry:
- %0 = load i8* @g_3, align 1 ; <i8> [#uses=2]
+ %0 = load i8, i8* @g_3, align 1 ; <i8> [#uses=2]
%1 = sext i8 %0 to i32 ; <i32> [#uses=1]
%.lobit.i = lshr i8 %0, 7 ; <i8> [#uses=1]
%tmp.i = zext i8 %.lobit.i to i32 ; <i32> [#uses=1]
@@ -79,12 +79,12 @@ entry:
%iftmp.17.0.i.i = ashr i32 %1, %tmp.not.i ; <i32> [#uses=1]
%retval56.i.i = trunc i32 %iftmp.17.0.i.i to i8 ; <i8> [#uses=1]
%2 = icmp eq i8 %retval56.i.i, 0 ; <i1> [#uses=2]
- %g_96.promoted.i = load i8* @g_96 ; <i8> [#uses=3]
+ %g_96.promoted.i = load i8, i8* @g_96 ; <i8> [#uses=3]
%3 = icmp eq i8 %g_96.promoted.i, 0 ; <i1> [#uses=2]
br i1 %3, label %func_4.exit.i, label %bb.i.i.i
bb.i.i.i: ; preds = %entry
- %4 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0]
+ %4 = load volatile i8, i8* @g_100, align 1 ; <i8> [#uses=0]
br label %func_4.exit.i
; CHECK-LABEL: test4:
@@ -101,7 +101,7 @@ func_4.exit.i: ; preds = %bb.i.i.i, %entry
br i1 %brmerge.i, label %func_1.exit, label %bb.i.i
bb.i.i: ; preds = %func_4.exit.i
- %5 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0]
+ %5 = load volatile i8, i8* @g_100, align 1 ; <i8> [#uses=0]
br label %func_1.exit
func_1.exit: ; preds = %bb.i.i, %func_4.exit.i
@@ -125,7 +125,7 @@ entry:
; CHECK: orl $-2, %eax
; CHECK: ret
- %0 = load i32* %P, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %P, align 4 ; <i32> [#uses=1]
%1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
%iftmp.0.0 = select i1 %1, i32 -1, i32 -2 ; <i32> [#uses=1]
ret i32 %iftmp.0.0
@@ -138,7 +138,7 @@ entry:
; CHECK: movzbl %al, %eax
; CHECK: leal 4(%rax,%rax,8), %eax
; CHECK: ret
- %0 = load i32* %P, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %P, align 4 ; <i32> [#uses=1]
%1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
%iftmp.0.0 = select i1 %1, i32 4, i32 13 ; <i32> [#uses=1]
ret i32 %iftmp.0.0
diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll
index a32c4b7befb..818138a613e 100644
--- a/llvm/test/CodeGen/X86/cmp.ll
+++ b/llvm/test/CodeGen/X86/cmp.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -show-mc-encoding | FileCheck %s
define i32 @test1(i32 %X, i32* %y) nounwind {
- %tmp = load i32* %y ; <i32> [#uses=1]
+ %tmp = load i32, i32* %y ; <i32> [#uses=1]
%tmp.upgrd.1 = icmp eq i32 %tmp, 0 ; <i1> [#uses=1]
br i1 %tmp.upgrd.1, label %ReturnBlock, label %cond_true
@@ -15,7 +15,7 @@ ReturnBlock: ; preds = %0
}
define i32 @test2(i32 %X, i32* %y) nounwind {
- %tmp = load i32* %y ; <i32> [#uses=1]
+ %tmp = load i32, i32* %y ; <i32> [#uses=1]
%tmp1 = shl i32 %tmp, 3 ; <i32> [#uses=1]
%tmp1.upgrd.2 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tmp1.upgrd.2, label %ReturnBlock, label %cond_true
@@ -30,7 +30,7 @@ ReturnBlock: ; preds = %0
}
define i8 @test2b(i8 %X, i8* %y) nounwind {
- %tmp = load i8* %y ; <i8> [#uses=1]
+ %tmp = load i8, i8* %y ; <i8> [#uses=1]
%tmp1 = shl i8 %tmp, 3 ; <i8> [#uses=1]
%tmp1.upgrd.2 = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tmp1.upgrd.2, label %ReturnBlock, label %cond_true
@@ -90,7 +90,7 @@ declare i32 @foo(...)
define i32 @test6() nounwind align 2 {
%A = alloca {i64, i64}, align 8
%B = getelementptr inbounds {i64, i64}, {i64, i64}* %A, i64 0, i32 1
- %C = load i64* %B
+ %C = load i64, i64* %B
%D = icmp eq i64 %C, 0
br i1 %D, label %T, label %F
T:
diff --git a/llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll b/llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll
index b7995dbdf25..61123930887 100644
--- a/llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll
+++ b/llvm/test/CodeGen/X86/cmpxchg-clobber-flags.ll
@@ -42,7 +42,7 @@ loop_start:
br label %while.condthread-pre-split.i
while.condthread-pre-split.i:
- %.pr.i = load i32* %p, align 4
+ %.pr.i = load i32, i32* %p, align 4
br label %while.cond.i
while.cond.i:
diff --git a/llvm/test/CodeGen/X86/cmpxchg-i1.ll b/llvm/test/CodeGen/X86/cmpxchg-i1.ll
index a21ab593b07..5f5869f78bb 100644
--- a/llvm/test/CodeGen/X86/cmpxchg-i1.ll
+++ b/llvm/test/CodeGen/X86/cmpxchg-i1.ll
@@ -68,7 +68,7 @@ define i32 @cmpxchg_use_eflags_and_val(i32* %addr, i32 %offset) {
; Result already in %eax
; CHECK: retq
entry:
- %init = load atomic i32* %addr seq_cst, align 4
+ %init = load atomic i32, i32* %addr seq_cst, align 4
br label %loop
loop:
diff --git a/llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll b/llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll
index 4dd30013eca..278e6a4ed75 100644
--- a/llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll
+++ b/llvm/test/CodeGen/X86/cmpxchg-i128-i1.ll
@@ -62,7 +62,7 @@ define i128 @cmpxchg_use_eflags_and_val(i128* %addr, i128 %offset) {
; CHECK-NOT: cmpq
; CHECK: jne
entry:
- %init = load atomic i128* %addr seq_cst, align 16
+ %init = load atomic i128, i128* %addr seq_cst, align 16
br label %loop
loop:
diff --git a/llvm/test/CodeGen/X86/coalesce-esp.ll b/llvm/test/CodeGen/X86/coalesce-esp.ll
index 7b89185fdb1..e0257e68f0f 100644
--- a/llvm/test/CodeGen/X86/coalesce-esp.ll
+++ b/llvm/test/CodeGen/X86/coalesce-esp.ll
@@ -20,7 +20,7 @@ bb4: ; preds = %bb7.backedge, %entry
%scevgep24.sum = sub i32 undef, %indvar ; <i32> [#uses=2]
%scevgep25 = getelementptr i32, i32* %0, i32 %scevgep24.sum ; <i32*> [#uses=1]
%scevgep27 = getelementptr i32, i32* undef, i32 %scevgep24.sum ; <i32*> [#uses=1]
- %1 = load i32* %scevgep27, align 4 ; <i32> [#uses=0]
+ %1 = load i32, i32* %scevgep27, align 4 ; <i32> [#uses=0]
br i1 undef, label %bb7.backedge, label %bb5
bb5: ; preds = %bb4
diff --git a/llvm/test/CodeGen/X86/coalesce-implicitdef.ll b/llvm/test/CodeGen/X86/coalesce-implicitdef.ll
index 9be045271d8..a0766f99496 100644
--- a/llvm/test/CodeGen/X86/coalesce-implicitdef.ll
+++ b/llvm/test/CodeGen/X86/coalesce-implicitdef.ll
@@ -71,7 +71,7 @@ for.inc27.backedge: ; preds = %while.end, %if.then
br i1 %tobool17, label %for.inc27.if.end30.loopexit56_crit_edge, label %while.condthread-pre-split
if.then22: ; preds = %while.end
- %1 = load i16* %p2.1, align 2
+ %1 = load i16, i16* %p2.1, align 2
%tobool23 = icmp eq i16 %1, 0
br i1 %tobool23, label %for.inc27.backedge, label %label.loopexit
@@ -89,7 +89,7 @@ for.inc27.if.end30.loopexit56_crit_edge: ; preds = %for.inc27.backedge
if.end30: ; preds = %for.inc27.if.end30.loopexit56_crit_edge, %label.loopexit, %label.preheader, %for.inc
%i.0.load46 = phi i32 [ 0, %for.inc ], [ %i.0.load4669, %label.preheader ], [ %i.0.load4669, %label.loopexit ], [ %i.0.load4669, %for.inc27.if.end30.loopexit56_crit_edge ]
%pi.4 = phi i32* [ %i, %for.inc ], [ %pi.3.ph, %label.preheader ], [ %pi.3.ph, %label.loopexit ], [ %pi.3.ph, %for.inc27.if.end30.loopexit56_crit_edge ]
- %2 = load i32* %pi.4, align 4
+ %2 = load i32, i32* %pi.4, align 4
%tobool31 = icmp eq i32 %2, 0
br i1 %tobool31, label %for.inc34, label %label.preheader
@@ -100,7 +100,7 @@ for.inc34: ; preds = %if.end30
for.end36: ; preds = %for.cond
store i32 1, i32* %i, align 4
- %3 = load i32* @c, align 4
+ %3 = load i32, i32* @c, align 4
%tobool37 = icmp eq i32 %3, 0
br i1 %tobool37, label %label.preheader, label %land.rhs
@@ -111,15 +111,15 @@ land.rhs: ; preds = %for.end36
label.preheader: ; preds = %for.end36, %if.end30, %land.rhs
%i.0.load4669 = phi i32 [ 1, %land.rhs ], [ %i.0.load46, %if.end30 ], [ 1, %for.end36 ]
%pi.3.ph = phi i32* [ %pi.0, %land.rhs ], [ %pi.4, %if.end30 ], [ %pi.0, %for.end36 ]
- %4 = load i32* @b, align 4
+ %4 = load i32, i32* @b, align 4
%inc285863 = add nsw i32 %4, 1
store i32 %inc285863, i32* @b, align 4
%tobool175964 = icmp eq i32 %inc285863, 0
br i1 %tobool175964, label %if.end30, label %while.condthread-pre-split.lr.ph.lr.ph
while.condthread-pre-split.lr.ph.lr.ph: ; preds = %label.preheader
- %.pr50 = load i32* @d, align 4
+ %.pr50 = load i32, i32* @d, align 4
%tobool19 = icmp eq i32 %.pr50, 0
- %a.promoted.pre = load i32* @a, align 4
+ %a.promoted.pre = load i32, i32* @a, align 4
br label %while.condthread-pre-split
}
diff --git a/llvm/test/CodeGen/X86/coalescer-commute1.ll b/llvm/test/CodeGen/X86/coalescer-commute1.ll
index 227cd72d154..dccf3b906fd 100644
--- a/llvm/test/CodeGen/X86/coalescer-commute1.ll
+++ b/llvm/test/CodeGen/X86/coalescer-commute1.ll
@@ -6,14 +6,14 @@
define void @runcont(i32* %source) nounwind {
entry:
- %tmp10 = load i32* @NNTOT, align 4 ; <i32> [#uses=1]
+ %tmp10 = load i32, i32* @NNTOT, align 4 ; <i32> [#uses=1]
br label %bb
bb: ; preds = %bb, %entry
%neuron.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%thesum.0 = phi float [ 0.000000e+00, %entry ], [ %tmp6, %bb ] ; <float> [#uses=1]
%tmp2 = getelementptr i32, i32* %source, i32 %neuron.0 ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
+ %tmp3 = load i32, i32* %tmp2, align 4 ; <i32> [#uses=1]
%tmp34 = sitofp i32 %tmp3 to float ; <float> [#uses=1]
%tmp6 = fadd float %tmp34, %thesum.0 ; <float> [#uses=2]
%indvar.next = add i32 %neuron.0, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/coalescer-commute4.ll b/llvm/test/CodeGen/X86/coalescer-commute4.ll
index 06e542b91aa..d4af1a62dca 100644
--- a/llvm/test/CodeGen/X86/coalescer-commute4.ll
+++ b/llvm/test/CodeGen/X86/coalescer-commute4.ll
@@ -14,10 +14,10 @@ bb: ; preds = %bb, %bb.preheader
%i.0.reg2mem.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%res.0.reg2mem.0 = phi float [ 0.000000e+00, %bb.preheader ], [ %tmp14, %bb ] ; <float> [#uses=1]
%tmp3 = getelementptr i32, i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp3, align 4 ; <i32> [#uses=1]
%tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
%tmp8 = getelementptr float, float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
- %tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1]
+ %tmp9 = load float, float* %tmp8, align 4 ; <float> [#uses=1]
%tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1]
%tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2]
%indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/coalescer-cross.ll b/llvm/test/CodeGen/X86/coalescer-cross.ll
index 0211d2d795c..92aedbef5dd 100644
--- a/llvm/test/CodeGen/X86/coalescer-cross.ll
+++ b/llvm/test/CodeGen/X86/coalescer-cross.ll
@@ -31,12 +31,12 @@ entry:
%1 = uitofp i32 %0 to double ; <double> [#uses=1]
%2 = fdiv double %1, 1.000000e+06 ; <double> [#uses=1]
%3 = getelementptr %struct.lua_State, %struct.lua_State* %L, i32 0, i32 4 ; <%struct.TValue**> [#uses=3]
- %4 = load %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=2]
+ %4 = load %struct.TValue*, %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=2]
%5 = getelementptr %struct.TValue, %struct.TValue* %4, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
store double %2, double* %5, align 4
%6 = getelementptr %struct.TValue, %struct.TValue* %4, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 3, i32* %6, align 4
- %7 = load %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=1]
+ %7 = load %struct.TValue*, %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=1]
%8 = getelementptr %struct.TValue, %struct.TValue* %7, i32 1 ; <%struct.TValue*> [#uses=1]
store %struct.TValue* %8, %struct.TValue** %3, align 4
ret i32 1
diff --git a/llvm/test/CodeGen/X86/coalescer-dce2.ll b/llvm/test/CodeGen/X86/coalescer-dce2.ll
index bbbf09b267b..116a7048436 100644
--- a/llvm/test/CodeGen/X86/coalescer-dce2.ll
+++ b/llvm/test/CodeGen/X86/coalescer-dce2.ll
@@ -14,19 +14,19 @@ target triple = "x86_64-apple-macosx10.7.0"
define void @fn1() nounwind uwtable ssp {
entry:
- %0 = load i32* @d, align 4
+ %0 = load i32, i32* @d, align 4
%tobool72 = icmp eq i32 %0, 0
br i1 %tobool72, label %for.end32, label %for.cond1.preheader.lr.ph
for.cond1.preheader.lr.ph: ; preds = %entry
- %1 = load i32* @c, align 4
+ %1 = load i32, i32* @c, align 4
%tobool2 = icmp eq i32 %1, 0
- %2 = load i32* @b, align 4
+ %2 = load i32, i32* @b, align 4
%cmp = icmp sgt i32 %2, 0
%conv = zext i1 %cmp to i32
- %3 = load i32* @g, align 4
+ %3 = load i32, i32* @g, align 4
%tobool4 = icmp eq i32 %3, 0
- %4 = load i16* @a, align 2
+ %4 = load i16, i16* @a, align 2
%tobool9 = icmp eq i16 %4, 0
br label %for.cond1.preheader
@@ -41,7 +41,7 @@ for.cond1.preheader.split.us: ; preds = %for.cond1.preheader
br i1 %tobool9, label %cond.end.us.us, label %cond.end.us
cond.false18.us.us: ; preds = %if.end.us.us
- %5 = load i32* @f, align 4
+ %5 = load i32, i32* @f, align 4
%sext76 = shl i32 %5, 16
%phitmp75 = ashr exact i32 %sext76, 16
br label %cond.end.us.us
@@ -74,7 +74,7 @@ land.lhs.true12.us: ; preds = %if.end6.us
br i1 %cmp14.us, label %cond.end21.us, label %cond.false18.us
if.end6.us: ; preds = %if.end.us
- %6 = load i32* @f, align 4
+ %6 = load i32, i32* @f, align 4
%conv7.us = trunc i32 %6 to i16
%tobool11.us = icmp eq i16 %conv7.us, 0
br i1 %tobool11.us, label %cond.false18.us, label %land.lhs.true12.us
@@ -95,7 +95,7 @@ for.cond1.preheader.split.for.cond1.preheader.split.split_crit_edge: ; preds = %
br i1 %tobool4, label %if.end6.us65, label %for.cond25.loopexit.us-lcssa.us-lcssa
cond.false18.us40: ; preds = %if.end.us50
- %7 = load i32* @f, align 4
+ %7 = load i32, i32* @f, align 4
%sext = shl i32 %7, 16
%phitmp = ashr exact i32 %sext, 16
br label %if.end.us50
diff --git a/llvm/test/CodeGen/X86/coalescer-identity.ll b/llvm/test/CodeGen/X86/coalescer-identity.ll
index 1aac09594c4..8d581160aa8 100644
--- a/llvm/test/CodeGen/X86/coalescer-identity.ll
+++ b/llvm/test/CodeGen/X86/coalescer-identity.ll
@@ -12,10 +12,10 @@ target triple = "x86_64-apple-macosx10.8.0"
define void @func() nounwind uwtable ssp {
for.body.lr.ph:
- %0 = load i32* @g2, align 4
+ %0 = load i32, i32* @g2, align 4
%tobool6 = icmp eq i32 %0, 0
- %s.promoted = load i16* @s, align 2
- %.pre = load i32* @g1, align 4
+ %s.promoted = load i16, i16* @s, align 2
+ %.pre = load i32, i32* @g1, align 4
br i1 %tobool6, label %for.body.us, label %for.body
for.body.us: ; preds = %for.body.lr.ph, %for.inc.us
diff --git a/llvm/test/CodeGen/X86/code_placement.ll b/llvm/test/CodeGen/X86/code_placement.ll
index ca5b75f7e0d..7d235848005 100644
--- a/llvm/test/CodeGen/X86/code_placement.ll
+++ b/llvm/test/CodeGen/X86/code_placement.ll
@@ -6,9 +6,9 @@
define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind ssp {
entry:
- %0 = load i32* %rk, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %rk, align 4 ; <i32> [#uses=1]
%1 = getelementptr i32, i32* %rk, i64 1 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 4 ; <i32> [#uses=1]
%tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
%tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
br label %bb
@@ -24,36 +24,36 @@ bb: ; preds = %bb1, %entry
%3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
%4 = zext i32 %3 to i64 ; <i64> [#uses=1]
%5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* %5, align 4 ; <i32> [#uses=1]
%7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
%8 = and i32 %7, 255 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
%10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
+ %11 = load i32, i32* %10, align 4 ; <i32> [#uses=1]
%ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
%12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
%13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
- %14 = load i32* %13, align 4 ; <i32> [#uses=1]
+ %14 = load i32, i32* %13, align 4 ; <i32> [#uses=1]
%15 = xor i32 %11, %6 ; <i32> [#uses=1]
%16 = xor i32 %15, %14 ; <i32> [#uses=3]
%17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
%18 = zext i32 %17 to i64 ; <i64> [#uses=1]
%19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
- %20 = load i32* %19, align 4 ; <i32> [#uses=1]
+ %20 = load i32, i32* %19, align 4 ; <i32> [#uses=1]
%21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
%22 = zext i32 %21 to i64 ; <i64> [#uses=1]
%23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
- %24 = load i32* %23, align 4 ; <i32> [#uses=1]
+ %24 = load i32, i32* %23, align 4 ; <i32> [#uses=1]
%ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
%25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
%26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
- %27 = load i32* %26, align 4 ; <i32> [#uses=1]
+ %27 = load i32, i32* %26, align 4 ; <i32> [#uses=1]
%28 = xor i32 %24, %20 ; <i32> [#uses=1]
%29 = xor i32 %28, %27 ; <i32> [#uses=4]
%30 = lshr i32 %16, 24 ; <i32> [#uses=1]
%31 = zext i32 %30 to i64 ; <i64> [#uses=1]
%32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
- %33 = load i32* %32, align 4 ; <i32> [#uses=2]
+ %33 = load i32, i32* %32, align 4 ; <i32> [#uses=2]
%exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
br i1 %exitcond, label %bb2, label %bb1
@@ -65,22 +65,22 @@ bb1: ; preds = %bb
%37 = and i32 %36, 255 ; <i32> [#uses=1]
%38 = zext i32 %37 to i64 ; <i64> [#uses=1]
%39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
- %40 = load i32* %39, align 4 ; <i32> [#uses=1]
- %41 = load i32* %35, align 4 ; <i32> [#uses=1]
+ %40 = load i32, i32* %39, align 4 ; <i32> [#uses=1]
+ %41 = load i32, i32* %35, align 4 ; <i32> [#uses=1]
%42 = xor i32 %40, %33 ; <i32> [#uses=1]
%43 = xor i32 %42, %41 ; <i32> [#uses=1]
%44 = lshr i32 %29, 24 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
%46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
- %47 = load i32* %46, align 4 ; <i32> [#uses=1]
+ %47 = load i32, i32* %46, align 4 ; <i32> [#uses=1]
%48 = and i32 %16, 255 ; <i32> [#uses=1]
%49 = zext i32 %48 to i64 ; <i64> [#uses=1]
%50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
- %51 = load i32* %50, align 4 ; <i32> [#uses=1]
+ %51 = load i32, i32* %50, align 4 ; <i32> [#uses=1]
%ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
%52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
%53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
- %54 = load i32* %53, align 4 ; <i32> [#uses=1]
+ %54 = load i32, i32* %53, align 4 ; <i32> [#uses=1]
%55 = xor i32 %51, %47 ; <i32> [#uses=1]
%56 = xor i32 %55, %54 ; <i32> [#uses=1]
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
@@ -96,26 +96,26 @@ bb2: ; preds = %bb
%60 = and i32 %59, 255 ; <i32> [#uses=1]
%61 = zext i32 %60 to i64 ; <i64> [#uses=1]
%62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
- %63 = load i32* %62, align 4 ; <i32> [#uses=1]
+ %63 = load i32, i32* %62, align 4 ; <i32> [#uses=1]
%64 = and i32 %63, 16711680 ; <i32> [#uses=1]
%65 = or i32 %64, %58 ; <i32> [#uses=1]
- %66 = load i32* %57, align 4 ; <i32> [#uses=1]
+ %66 = load i32, i32* %57, align 4 ; <i32> [#uses=1]
%67 = xor i32 %65, %66 ; <i32> [#uses=2]
%68 = lshr i32 %29, 8 ; <i32> [#uses=1]
%69 = zext i32 %68 to i64 ; <i64> [#uses=1]
%70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
- %71 = load i32* %70, align 4 ; <i32> [#uses=1]
+ %71 = load i32, i32* %70, align 4 ; <i32> [#uses=1]
%72 = and i32 %71, -16777216 ; <i32> [#uses=1]
%73 = and i32 %16, 255 ; <i32> [#uses=1]
%74 = zext i32 %73 to i64 ; <i64> [#uses=1]
%75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
- %76 = load i32* %75, align 4 ; <i32> [#uses=1]
+ %76 = load i32, i32* %75, align 4 ; <i32> [#uses=1]
%77 = and i32 %76, 16711680 ; <i32> [#uses=1]
%78 = or i32 %77, %72 ; <i32> [#uses=1]
%ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
%79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
%80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
- %81 = load i32* %80, align 4 ; <i32> [#uses=1]
+ %81 = load i32, i32* %80, align 4 ; <i32> [#uses=1]
%82 = xor i32 %78, %81 ; <i32> [#uses=2]
%83 = lshr i32 %67, 24 ; <i32> [#uses=1]
%84 = trunc i32 %83 to i8 ; <i8> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll b/llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
index 3ad994962ea..ac5cbb912aa 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
+++ b/llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
@@ -20,7 +20,7 @@ define i8 @twoArgsPromotion(i32 %arg1, i32 %arg2) {
%add = add nsw i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%base = inttoptr i64 %sextadd to i8*
- %res = load i8* %base
+ %res = load i8, i8* %base
ret i8 %res
}
@@ -36,7 +36,7 @@ define i8 @twoArgsNoPromotion(i32 %arg1, i32 %arg2, i8* %base) {
%add = add nsw i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -49,7 +49,7 @@ define i8 @noPromotion(i32 %arg1, i32 %arg2, i8* %base) {
%add = add i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -63,7 +63,7 @@ define i8 @oneArgPromotion(i32 %arg1, i8* %base) {
%add = add nsw i32 %arg1, 1
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -78,7 +78,7 @@ define i8 @oneArgPromotionZExt(i8 %arg1, i8* %base) {
%add = add nsw i32 %zext, 1
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -100,7 +100,7 @@ define i8 @oneArgPromotionCstZExt(i8* %base) {
%add = add nsw i32 %cst, 1
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -117,7 +117,7 @@ define i8 @oneArgPromotionBlockTrunc1(i32 %arg1, i8* %base) {
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -136,7 +136,7 @@ define i8 @oneArgPromotionBlockTrunc2(i16 %arg1, i8* %base) {
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -153,7 +153,7 @@ define i8 @oneArgPromotionPassTruncKeepSExt(i1 %arg1, i8* %base) {
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -165,14 +165,14 @@ define i8 @oneArgPromotionPassTruncKeepSExt(i1 %arg1, i8* %base) {
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, i8* [[GEP]]
; CHECK: add i8 [[LOAD]], [[TRUNC]]
; CHECK: ret
define i8 @oneArgPromotionTruncInsert(i8 %arg1, i8* %base) {
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
%finalres = add i8 %res, %add
ret i8 %finalres
}
@@ -189,7 +189,7 @@ define i8 @oneArgPromotionLargerType(i128 %arg1, i8* %base) {
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
%finalres = add i8 %res, %add
ret i8 %finalres
}
@@ -203,7 +203,7 @@ define i8 @oneArgPromotionLargerType(i128 %arg1, i8* %base) {
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, i8* [[GEP]]
; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = add i8 [[LOAD]], [[TRUNC]]
; CHECK: add i8 [[ADDRES]], [[TRUNC]]
; CHECK: ret
@@ -211,7 +211,7 @@ define i8 @oneArgPromotionTruncInsertSeveralUse(i8 %arg1, i8* %base) {
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
%almostfinalres = add i8 %res, %add
%finalres = add i8 %almostfinalres, %add
ret i8 %finalres
@@ -223,7 +223,7 @@ define i8 @oneArgPromotionTruncInsertSeveralUse(i8 %arg1, i8* %base) {
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
+; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, i8* [[GEP]]
; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = zext i8 [[LOAD]] to i64
; CHECK: add i64 [[ADDRES]], [[PROMOTED]]
; CHECK: ret
@@ -231,7 +231,7 @@ define i64 @oneArgPromotionSExtSeveralUse(i8 %arg1, i8* %base) {
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
%almostfinalres = zext i8 %res to i64
%finalres = add i64 %almostfinalres, %sextadd
ret i64 %finalres
@@ -264,7 +264,7 @@ define i8 @twoArgsPromotionNest(i32 %arg1, i32 %arg2, i8* %base) {
%promotableadd2 = add nsw i32 %promotableadd1, %promotableadd1
%sextadd = sext i32 %promotableadd2 to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -287,7 +287,7 @@ define i8 @twoArgsNoPromotionRemove(i1 %arg1, i8 %arg2, i8* %base) {
%add = add nsw i8 %trunc, %arg2
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -313,11 +313,11 @@ define i8 @twoArgsNoPromotionRemove(i1 %arg1, i8 %arg2, i8* %base) {
; BB then
; CHECK: [[BASE1:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTADD]], 48
; CHECK: [[ADDR1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[BASE1]] to i32*
-; CHECK: load i32* [[ADDR1]]
+; CHECK: load i32, i32* [[ADDR1]]
; BB else
; CHECK: [[BASE2:%[a-zA-Z_0-9-]+]] = add i64 [[SEXTADD]], 48
; CHECK: [[ADDR2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[BASE2]] to i32*
-; CHECK: load i32* [[ADDR2]]
+; CHECK: load i32, i32* [[ADDR2]]
; CHECK: ret
; CHECK-GEP-LABEL: @checkProfitability
; CHECK-GEP-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg1 to i64
@@ -330,13 +330,13 @@ define i8 @twoArgsNoPromotionRemove(i1 %arg1, i8 %arg2, i8* %base) {
; CHECK-GEP: [[BCC1:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE1]] to i8*
; CHECK-GEP: [[FULL1:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC1]], i64 48
; CHECK-GEP: [[ADDR1:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL1]] to i32*
-; CHECK-GEP: load i32* [[ADDR1]]
+; CHECK-GEP: load i32, i32* [[ADDR1]]
; BB else
; CHECK-GEP: [[BASE2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
; CHECK-GEP: [[BCC2:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE2]] to i8*
; CHECK-GEP: [[FULL2:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC2]], i64 48
; CHECK-GEP: [[ADDR2:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL2]] to i32*
-; CHECK-GEP: load i32* [[ADDR2]]
+; CHECK-GEP: load i32, i32* [[ADDR2]]
; CHECK-GEP: ret
define i32 @checkProfitability(i32 %arg1, i32 %arg2, i1 %test) {
%shl = shl nsw i32 %arg1, 1
@@ -346,16 +346,16 @@ define i32 @checkProfitability(i32 %arg1, i32 %arg2, i1 %test) {
%arrayidx1 = getelementptr i32, i32* %tmpptr, i64 12
br i1 %test, label %then, label %else
then:
- %res1 = load i32* %arrayidx1
+ %res1 = load i32, i32* %arrayidx1
br label %end
else:
- %res2 = load i32* %arrayidx1
+ %res2 = load i32, i32* %arrayidx1
br label %end
end:
%tmp = phi i32 [%res1, %then], [%res2, %else]
%res = add i32 %tmp, %add1
%addr = inttoptr i32 %res to i32*
- %final = load i32* %addr
+ %final = load i32, i32* %addr
ret i32 %final
}
@@ -377,7 +377,7 @@ end:
; CHECK-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add i64 [[BASE]], [[PROMOTED_CONV]]
; CHECK-NEXT: [[ADDR:%[a-zA-Z_0-9-]+]] = add i64 [[ADD]], 7
; CHECK-NEXT: [[CAST:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[ADDR]] to i8*
-; CHECK-NEXT: load i8* [[CAST]], align 1
+; CHECK-NEXT: load i8, i8* [[CAST]], align 1
define signext i16 @fn3(%struct.dns_packet* nocapture readonly %P) {
entry:
%tmp = getelementptr inbounds %struct.dns_packet, %struct.dns_packet* %P, i64 0, i32 2
@@ -389,7 +389,7 @@ while.body.i.i: ; preds = %while.body.i.i, %en
%inc.i.i = add i16 %src.addr.0.i.i, 1
%idxprom.i.i = sext i16 %src.addr.0.i.i to i64
%arrayidx.i.i = getelementptr inbounds [0 x i8], [0 x i8]* %data.i.i, i64 0, i64 %idxprom.i.i
- %tmp1 = load i8* %arrayidx.i.i, align 1
+ %tmp1 = load i8, i8* %arrayidx.i.i, align 1
%conv2.i.i = zext i8 %tmp1 to i32
%and.i.i = and i32 %conv2.i.i, 15
store i32 %and.i.i, i32* @a, align 4
@@ -402,7 +402,7 @@ fn1.exit.i: ; preds = %while.body.i.i
%sub.i = add nsw i32 %conv.i, -1
%idxprom.i = sext i32 %sub.i to i64
%arrayidx.i = getelementptr inbounds [0 x i8], [0 x i8]* %data.i.i, i64 0, i64 %idxprom.i
- %tmp2 = load i8* %arrayidx.i, align 1
+ %tmp2 = load i8, i8* %arrayidx.i, align 1
%conv2.i = sext i8 %tmp2 to i16
store i16 %conv2.i, i16* @b, align 2
%sub4.i = sub nsw i32 0, %conv.i
@@ -412,7 +412,7 @@ fn1.exit.i: ; preds = %while.body.i.i
if.then.i: ; preds = %fn1.exit.i
%end.i = getelementptr inbounds %struct.dns_packet, %struct.dns_packet* %P, i64 0, i32 1
- %tmp3 = load i32* %end.i, align 4
+ %tmp3 = load i32, i32* %end.i, align 4
%sub7.i = add i32 %tmp3, 65535
%conv8.i = trunc i32 %sub7.i to i16
br label %fn2.exit
@@ -433,7 +433,7 @@ define i8 @noPromotionFlag(i32 %arg1, i32 %arg2) {
%add = add nsw i32 %arg1, %arg2
%zextadd = zext i32 %add to i64
%base = inttoptr i64 %zextadd to i8*
- %res = load i8* %base
+ %res = load i8, i8* %base
ret i8 %res
}
@@ -448,7 +448,7 @@ define i8 @twoArgsPromotionZExt(i32 %arg1, i32 %arg2) {
%add = add nuw i32 %arg1, %arg2
%zextadd = zext i32 %add to i64
%base = inttoptr i64 %zextadd to i8*
- %res = load i8* %base
+ %res = load i8, i8* %base
ret i8 %res
}
@@ -462,7 +462,7 @@ define i8 @oneArgPromotionNegativeCstZExt(i8 %arg1, i8* %base) {
%add = add nuw i8 %arg1, -1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -477,7 +477,7 @@ define i8 @oneArgPromotionZExtZExt(i8 %arg1, i8* %base) {
%add = add nuw i32 %zext, 1
%zextadd = zext i32 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -496,7 +496,7 @@ define i8 @oneArgPromotionBlockTruncZExt(i1 %arg1, i8* %base) {
%add = add nuw i8 %trunc, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -513,7 +513,7 @@ define i8 @oneArgPromotionPassTruncZExt(i1 %arg1, i8* %base) {
%add = add nuw i8 %trunc, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
@@ -529,6 +529,6 @@ define i8 @oneArgPromotionBlockSExtZExt(i1 %arg1, i8* %base) {
%add = add nuw i8 %sextarg1, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
- %res = load i8* %arrayidx
+ %res = load i8, i8* %arrayidx
ret i8 %res
}
diff --git a/llvm/test/CodeGen/X86/codegen-prepare-cast.ll b/llvm/test/CodeGen/X86/codegen-prepare-cast.ll
index cd843036299..1b80a9bf897 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare-cast.ll
+++ b/llvm/test/CodeGen/X86/codegen-prepare-cast.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
@.str = external constant [7 x i8] ; <[7 x i8]*> [#uses=1]
; CHECK-LABEL: @_Dmain
-; CHECK: load i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0)
+; CHECK: load i8, i8* getelementptr inbounds ([7 x i8]* @.str, i32 0, i32 0)
; CHECK ret
define fastcc i32 @_Dmain(%"char[][]" %unnamed) {
entry:
@@ -19,7 +19,7 @@ entry:
foreachbody: ; preds = %entry
%tmp4 = getelementptr i8, i8* %tmp, i32 undef ; <i8*> [#uses=1]
- %tmp5 = load i8* %tmp4 ; <i8> [#uses=0]
+ %tmp5 = load i8, i8* %tmp4 ; <i8> [#uses=0]
unreachable
foreachend: ; preds = %entry
diff --git a/llvm/test/CodeGen/X86/codegen-prepare-extload.ll b/llvm/test/CodeGen/X86/codegen-prepare-extload.ll
index 9b27c33a80f..6619ebec2f8 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare-extload.ll
+++ b/llvm/test/CodeGen/X86/codegen-prepare-extload.ll
@@ -12,13 +12,13 @@
; CHECK: movsbl ({{%rdi|%rcx}}), %eax
;
; OPTALL-LABEL: @foo
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
; OPTALL-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; OPTALL: store i32 [[ZEXT]], i32* %q
; OPTALL: ret
define void @foo(i8* %p, i32* %q) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
true:
@@ -32,7 +32,7 @@ false:
; Check that we manage to form a zextload is an operation with only one
; argument to explicitly extend is in the the way.
; OPTALL-LABEL: @promoteOneArg
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw i32 [[ZEXT]], 2
; Make sure the operation is not promoted when the promotion pass is disabled.
@@ -42,7 +42,7 @@ false:
; OPTALL: ret
define void @promoteOneArg(i8* %p, i32* %q) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%add = add nuw i8 %t, 2
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
@@ -58,7 +58,7 @@ false:
; argument to explicitly extend is in the the way.
; Version with sext.
; OPTALL-LABEL: @promoteOneArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32
; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SEXT]], 2
; DISABLE: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[LD]], 2
@@ -67,7 +67,7 @@ false:
; OPTALL: ret
define void @promoteOneArgSExt(i8* %p, i32* %q) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%add = add nsw i8 %t, 2
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
@@ -90,7 +90,7 @@ false:
; transformation, the regular heuristic does not apply the optimization.
;
; OPTALL-LABEL: @promoteTwoArgZext
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32
@@ -106,7 +106,7 @@ false:
; OPTALL: ret
define void @promoteTwoArgZext(i8* %p, i32* %q, i8 %b) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%add = add nuw i8 %t, %b
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
@@ -122,7 +122,7 @@ false:
; arguments to explicitly extend is in the the way.
; Version with sext.
; OPTALL-LABEL: @promoteTwoArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[SEXTLD:%[a-zA-Z_0-9-]+]] = sext i8 [[LD]] to i32
; STRESS-NEXT: [[SEXTB:%[a-zA-Z_0-9-]+]] = sext i8 %b to i32
@@ -137,7 +137,7 @@ false:
; OPTALL: ret
define void @promoteTwoArgSExt(i8* %p, i32* %q, i8 %b) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%add = add nsw i8 %t, %b
%a = icmp slt i8 %t, 20
br i1 %a, label %true, label %false
@@ -152,7 +152,7 @@ false:
; Check that we do not a zextload if we need to introduce more than
; one additional extension.
; OPTALL-LABEL: @promoteThreeArgZext
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i8 %b to i32
@@ -172,7 +172,7 @@ false:
; OPTALL: ret
define void @promoteThreeArgZext(i8* %p, i32* %q, i8 %b, i8 %c) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%tmp = add nuw i8 %t, %b
%add = add nuw i8 %tmp, %c
%a = icmp slt i8 %t, 20
@@ -188,7 +188,7 @@ false:
; Check that we manage to form a zextload after promoting and merging
; two extensions.
; OPTALL-LABEL: @promoteMergeExtArgZExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = zext i16 %b to i32
@@ -206,7 +206,7 @@ false:
; OPTALL: ret
define void @promoteMergeExtArgZExt(i8* %p, i32* %q, i16 %b) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%ext = zext i8 %t to i16
%add = add nuw i16 %ext, %b
%a = icmp slt i8 %t, 20
@@ -223,7 +223,7 @@ false:
; two extensions.
; Version with sext.
; OPTALL-LABEL: @promoteMergeExtArgSExt
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %p
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p
;
; STRESS-NEXT: [[ZEXTLD:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32
; STRESS-NEXT: [[ZEXTB:%[a-zA-Z_0-9-]+]] = sext i16 %b to i32
@@ -240,7 +240,7 @@ false:
; OPTALL: ret
define void @promoteMergeExtArgSExt(i8* %p, i32* %q, i16 %b) {
entry:
- %t = load i8* %p
+ %t = load i8, i8* %p
%ext = zext i8 %t to i16
%add = add nsw i16 %ext, %b
%a = icmp slt i8 %t, 20
@@ -284,11 +284,11 @@ false:
; 3 identical zext of %ld. The extensions will be CSE'ed by SDag.
;
; OPTALL-LABEL: @severalPromotions
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8* %addr1
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %addr1
; OPT-NEXT: [[ZEXTLD1_1:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
; OPT-NEXT: [[ZEXTLD1_2:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
; OPT-NEXT: [[ZEXTLD1_3:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i64
-; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32* %addr2
+; OPT-NEXT: [[LD2:%[a-zA-Z_0-9-]+]] = load i32, i32* %addr2
; OPT-NEXT: [[SEXTLD2:%[a-zA-Z_0-9-]+]] = sext i32 [[LD2]] to i64
; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXTLD2]], [[ZEXTLD1_1]]
; We do not combine this one: see 2.b.
@@ -308,9 +308,9 @@ false:
; OPTALL: call void @dummy(i64 [[RES]], i64 [[RESZA]], i64 [[RESB]])
; OPTALL: ret
define void @severalPromotions(i8* %addr1, i32* %addr2, i8 %a, i32 %b) {
- %ld = load i8* %addr1
+ %ld = load i8, i8* %addr1
%zextld = zext i8 %ld to i32
- %ld2 = load i32* %addr2
+ %ld2 = load i32, i32* %addr2
%add = add nsw i32 %ld2, %zextld
%sextadd = sext i32 %add to i64
%zexta = zext i8 %a to i32
@@ -345,7 +345,7 @@ entry:
; to an instruction.
; This used to cause a crash.
; OPTALL-LABEL: @promotionOfArgEndsUpInValue
-; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16* %addr
+; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16, i16* %addr
; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i16 [[LD]] to i32
; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i32)
@@ -356,7 +356,7 @@ entry:
; OPTALL-NEXT: ret i32 [[RES]]
define i32 @promotionOfArgEndsUpInValue(i16* %addr) {
entry:
- %val = load i16* %addr
+ %val = load i16, i16* %addr
%add = add nuw nsw i16 %val, zext (i1 icmp ne (i32* getelementptr inbounds ([2 x i32]* @c, i64 0, i64 1), i32* @a) to i16)
%conv3 = sext i16 %add to i32
ret i32 %conv3
diff --git a/llvm/test/CodeGen/X86/codegen-prepare.ll b/llvm/test/CodeGen/X86/codegen-prepare.ll
index 0bb9f8abffb..e58bc22ef14 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare.ll
+++ b/llvm/test/CodeGen/X86/codegen-prepare.ll
@@ -25,9 +25,9 @@ entry:
if.then: ; preds = %entry
%0 = getelementptr inbounds %class.D, %class.D* %address2, i64 0, i32 0, i64 0, i32 0
- %1 = load float* %0, align 4
+ %1 = load float, float* %0, align 4
%2 = getelementptr inbounds float, float* %0, i64 3
- %3 = load float* %2, align 4
+ %3 = load float, float* %2, align 4
%4 = getelementptr inbounds %class.D, %class.D* %address1, i64 0, i32 0, i64 0, i32 0
store float %1, float* %4, align 4
br label %if.end
diff --git a/llvm/test/CodeGen/X86/codemodel.ll b/llvm/test/CodeGen/X86/codemodel.ll
index 3aebc13f874..bbc96d7cb31 100644
--- a/llvm/test/CodeGen/X86/codemodel.ll
+++ b/llvm/test/CodeGen/X86/codemodel.ll
@@ -11,7 +11,7 @@ entry:
; CHECK-SMALL: movl data(%rip), %eax
; CHECK-KERNEL-LABEL: foo:
; CHECK-KERNEL: movl data, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i64 0, i64 0), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i64 0, i64 0), align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -21,7 +21,7 @@ entry:
; CHECK-SMALL: movl data+40(%rip), %eax
; CHECK-KERNEL-LABEL: foo2:
; CHECK-KERNEL: movl data+40, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 10), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 10), align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -31,7 +31,7 @@ entry:
; CHECK-SMALL: movl data-40(%rip), %eax
; CHECK-KERNEL-LABEL: foo3:
; CHECK-KERNEL: movq $-40, %rax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 -10), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 -10), align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -43,7 +43,7 @@ entry:
; CHECK-SMALL: movl data(%rax), %eax
; CHECK-KERNEL-LABEL: foo4:
; CHECK-KERNEL: movl data+16777216, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194304), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194304), align 4 ; <i32> [#uses=1]
ret i32 %0
}
@@ -53,7 +53,7 @@ entry:
; CHECK-SMALL: movl data+16777212(%rip), %eax
; CHECK-KERNEL-LABEL: foo1:
; CHECK-KERNEL: movl data+16777212, %eax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194303), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 4194303), align 4 ; <i32> [#uses=1]
ret i32 %0
}
define i32 @foo5() nounwind readonly {
@@ -62,6 +62,6 @@ entry:
; CHECK-SMALL: movl data-16777216(%rip), %eax
; CHECK-KERNEL-LABEL: foo5:
; CHECK-KERNEL: movq $-16777216, %rax
- %0 = load i32* getelementptr ([0 x i32]* @data, i32 0, i64 -4194304), align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* getelementptr ([0 x i32]* @data, i32 0, i64 -4194304), align 4 ; <i32> [#uses=1]
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/combiner-aa-0.ll b/llvm/test/CodeGen/X86/combiner-aa-0.ll
index ce87a998b01..403059d90ab 100644
--- a/llvm/test/CodeGen/X86/combiner-aa-0.ll
+++ b/llvm/test/CodeGen/X86/combiner-aa-0.ll
@@ -5,14 +5,14 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
@g_flipV_hashkey = external global %struct.Hash_Key, align 16 ; <%struct.Hash_Key*> [#uses=1]
define void @foo() nounwind {
- %t0 = load i32* undef, align 16 ; <i32> [#uses=1]
- %t1 = load i32* null, align 4 ; <i32> [#uses=1]
+ %t0 = load i32, i32* undef, align 16 ; <i32> [#uses=1]
+ %t1 = load i32, i32* null, align 4 ; <i32> [#uses=1]
%t2 = srem i32 %t0, 32 ; <i32> [#uses=1]
%t3 = shl i32 1, %t2 ; <i32> [#uses=1]
%t4 = xor i32 %t3, %t1 ; <i32> [#uses=1]
store i32 %t4, i32* null, align 4
%t5 = getelementptr %struct.Hash_Key, %struct.Hash_Key* @g_flipV_hashkey, i64 0, i32 0, i64 0 ; <i32*> [#uses=2]
- %t6 = load i32* %t5, align 4 ; <i32> [#uses=1]
+ %t6 = load i32, i32* %t5, align 4 ; <i32> [#uses=1]
%t7 = shl i32 1, undef ; <i32> [#uses=1]
%t8 = xor i32 %t7, %t6 ; <i32> [#uses=1]
store i32 %t8, i32* %t5, align 4
diff --git a/llvm/test/CodeGen/X86/combiner-aa-1.ll b/llvm/test/CodeGen/X86/combiner-aa-1.ll
index 04f3206027a..cc3e5ca1260 100644
--- a/llvm/test/CodeGen/X86/combiner-aa-1.ll
+++ b/llvm/test/CodeGen/X86/combiner-aa-1.ll
@@ -13,9 +13,9 @@ target triple = "i386-pc-linux-gnu"
define i32 @._ZN8lam_node18resolve_name_clashEP8arg_nodeP9alst_node._ZNK8lam_nodeeqERK8exp_node._ZN11arglst_nodeD0Ev(%struct.lam_node* %this.this, %struct.arg_node* %outer_arg, %struct.alst_node* %env.cmp, %struct.arglst_node* %this, i32 %functionID) {
comb_entry:
%.SV59 = alloca %struct.node* ; <%struct.node**> [#uses=1]
- %0 = load i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1]
+ %0 = load i32 (...)**, i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1]
%1 = getelementptr inbounds i32 (...)*, i32 (...)** %0, i32 3 ; <i32 (...)**> [#uses=1]
- %2 = load i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1]
+ %2 = load i32 (...)*, i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1]
store %struct.node* undef, %struct.node** %.SV59
%3 = bitcast i32 (...)* %2 to i32 (%struct.node*)* ; <i32 (%struct.node*)*> [#uses=1]
%4 = tail call i32 %3(%struct.node* undef) ; <i32> [#uses=0]
diff --git a/llvm/test/CodeGen/X86/commute-blend-avx2.ll b/llvm/test/CodeGen/X86/commute-blend-avx2.ll
index d06c6dad8db..0172f999978 100644
--- a/llvm/test/CodeGen/X86/commute-blend-avx2.ll
+++ b/llvm/test/CodeGen/X86/commute-blend-avx2.ll
@@ -1,7 +1,7 @@
; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=core-avx2 -mattr=avx2 < %s | FileCheck %s
define <8 x i16> @commute_fold_vpblendw_128(<8 x i16> %a, <8 x i16>* %b) #0 {
- %1 = load <8 x i16>* %b
+ %1 = load <8 x i16>, <8 x i16>* %b
%2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %1, <8 x i16> %a, i8 17)
ret <8 x i16> %2
@@ -12,7 +12,7 @@ define <8 x i16> @commute_fold_vpblendw_128(<8 x i16> %a, <8 x i16>* %b) #0 {
declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone
define <16 x i16> @commute_fold_vpblendw_256(<16 x i16> %a, <16 x i16>* %b) #0 {
- %1 = load <16 x i16>* %b
+ %1 = load <16 x i16>, <16 x i16>* %b
%2 = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %1, <16 x i16> %a, i8 17)
ret <16 x i16> %2
@@ -23,7 +23,7 @@ define <16 x i16> @commute_fold_vpblendw_256(<16 x i16> %a, <16 x i16>* %b) #0 {
declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i8) nounwind readnone
define <4 x i32> @commute_fold_vpblendd_128(<4 x i32> %a, <4 x i32>* %b) #0 {
- %1 = load <4 x i32>* %b
+ %1 = load <4 x i32>, <4 x i32>* %b
%2 = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %1, <4 x i32> %a, i8 1)
ret <4 x i32> %2
@@ -34,7 +34,7 @@ define <4 x i32> @commute_fold_vpblendd_128(<4 x i32> %a, <4 x i32>* %b) #0 {
declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i8) nounwind readnone
define <8 x i32> @commute_fold_vpblendd_256(<8 x i32> %a, <8 x i32>* %b) #0 {
- %1 = load <8 x i32>* %b
+ %1 = load <8 x i32>, <8 x i32>* %b
%2 = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %1, <8 x i32> %a, i8 129)
ret <8 x i32> %2
@@ -45,7 +45,7 @@ define <8 x i32> @commute_fold_vpblendd_256(<8 x i32> %a, <8 x i32>* %b) #0 {
declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i8) nounwind readnone
define <4 x float> @commute_fold_vblendps_128(<4 x float> %a, <4 x float>* %b) #0 {
- %1 = load <4 x float>* %b
+ %1 = load <4 x float>, <4 x float>* %b
%2 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %1, <4 x float> %a, i8 3)
ret <4 x float> %2
@@ -56,7 +56,7 @@ define <4 x float> @commute_fold_vblendps_128(<4 x float> %a, <4 x float>* %b) #
declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwind readnone
define <8 x float> @commute_fold_vblendps_256(<8 x float> %a, <8 x float>* %b) #0 {
- %1 = load <8 x float>* %b
+ %1 = load <8 x float>, <8 x float>* %b
%2 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %1, <8 x float> %a, i8 7)
ret <8 x float> %2
@@ -67,7 +67,7 @@ define <8 x float> @commute_fold_vblendps_256(<8 x float> %a, <8 x float>* %b) #
declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
define <2 x double> @commute_fold_vblendpd_128(<2 x double> %a, <2 x double>* %b) #0 {
- %1 = load <2 x double>* %b
+ %1 = load <2 x double>, <2 x double>* %b
%2 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %1, <2 x double> %a, i8 1)
ret <2 x double> %2
@@ -78,7 +78,7 @@ define <2 x double> @commute_fold_vblendpd_128(<2 x double> %a, <2 x double>* %b
declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i8) nounwind readnone
define <4 x double> @commute_fold_vblendpd_256(<4 x double> %a, <4 x double>* %b) #0 {
- %1 = load <4 x double>* %b
+ %1 = load <4 x double>, <4 x double>* %b
%2 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %1, <4 x double> %a, i8 7)
ret <4 x double> %2
diff --git a/llvm/test/CodeGen/X86/commute-blend-sse41.ll b/llvm/test/CodeGen/X86/commute-blend-sse41.ll
index 59fef8c3a29..500f6a319dd 100644
--- a/llvm/test/CodeGen/X86/commute-blend-sse41.ll
+++ b/llvm/test/CodeGen/X86/commute-blend-sse41.ll
@@ -1,7 +1,7 @@
; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=corei7 < %s | FileCheck %s
define <8 x i16> @commute_fold_pblendw(<8 x i16> %a, <8 x i16>* %b) #0 {
- %1 = load <8 x i16>* %b
+ %1 = load <8 x i16>, <8 x i16>* %b
%2 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %1, <8 x i16> %a, i8 17)
ret <8 x i16> %2
@@ -12,7 +12,7 @@ define <8 x i16> @commute_fold_pblendw(<8 x i16> %a, <8 x i16>* %b) #0 {
declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i8) nounwind readnone
define <4 x float> @commute_fold_blendps(<4 x float> %a, <4 x float>* %b) #0 {
- %1 = load <4 x float>* %b
+ %1 = load <4 x float>, <4 x float>* %b
%2 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %1, <4 x float> %a, i8 3)
ret <4 x float> %2
@@ -23,7 +23,7 @@ define <4 x float> @commute_fold_blendps(<4 x float> %a, <4 x float>* %b) #0 {
declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i8) nounwind readnone
define <2 x double> @commute_fold_blendpd(<2 x double> %a, <2 x double>* %b) #0 {
- %1 = load <2 x double>* %b
+ %1 = load <2 x double>, <2 x double>* %b
%2 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %1, <2 x double> %a, i8 1)
ret <2 x double> %2
diff --git a/llvm/test/CodeGen/X86/commute-clmul.ll b/llvm/test/CodeGen/X86/commute-clmul.ll
index fe3e5569bd2..d13911abc86 100644
--- a/llvm/test/CodeGen/X86/commute-clmul.ll
+++ b/llvm/test/CodeGen/X86/commute-clmul.ll
@@ -12,7 +12,7 @@ define <2 x i64> @commute_lq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
;AVX: vpclmulqdq $0, (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 0)
ret <2 x i64> %2
}
@@ -26,7 +26,7 @@ define <2 x i64> @commute_lq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
;AVX: vpclmulqdq $1, (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 16)
ret <2 x i64> %2
}
@@ -40,7 +40,7 @@ define <2 x i64> @commute_hq_lq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
;AVX: vpclmulqdq $16, (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 1)
ret <2 x i64> %2
}
@@ -54,7 +54,7 @@ define <2 x i64> @commute_hq_hq(<2 x i64>* %a0, <2 x i64> %a1) #0 {
;AVX: vpclmulqdq $17, (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.pclmulqdq(<2 x i64> %1, <2 x i64> %a1, i8 17)
ret <2 x i64> %2
}
diff --git a/llvm/test/CodeGen/X86/commute-fcmp.ll b/llvm/test/CodeGen/X86/commute-fcmp.ll
index 0d7f2af12e1..6f43ebe1fcd 100644
--- a/llvm/test/CodeGen/X86/commute-fcmp.ll
+++ b/llvm/test/CodeGen/X86/commute-fcmp.ll
@@ -15,7 +15,7 @@ define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) #0 {
;AVX: vcmpeqps (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp oeq <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -30,7 +30,7 @@ define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) #0 {
;AVX: vcmpneqps (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp une <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -45,7 +45,7 @@ define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) #0 {
;AVX: vcmpordps (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ord <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -60,7 +60,7 @@ define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) #0 {
;AVX: vcmpunordps (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp uno <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -78,7 +78,7 @@ define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) #0 {
;AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp olt <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -96,7 +96,7 @@ define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) #0 {
;AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0
;AVX-NEXT: retq
- %1 = load <4 x float>* %a0
+ %1 = load <4 x float>, <4 x float>* %a0
%2 = fcmp ole <4 x float> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i32>
ret <4 x i32> %3
@@ -107,7 +107,7 @@ define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
;AVX: vcmpeqps (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp oeq <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -118,7 +118,7 @@ define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
;AVX: vcmpneqps (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp une <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -129,7 +129,7 @@ define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
;AVX: vcmpordps (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ord <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -140,7 +140,7 @@ define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
;AVX: vcmpunordps (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp uno <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -152,7 +152,7 @@ define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
;AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp olt <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -164,7 +164,7 @@ define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) #0 {
;AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0
;AVX-NEXT: retq
- %1 = load <8 x float>* %a0
+ %1 = load <8 x float>, <8 x float>* %a0
%2 = fcmp ole <8 x float> %1, %a1
%3 = sext <8 x i1> %2 to <8 x i32>
ret <8 x i32> %3
@@ -184,7 +184,7 @@ define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) #0 {
;AVX: vcmpeqpd (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp oeq <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -199,7 +199,7 @@ define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) #0 {
;AVX: vcmpneqpd (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp une <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -214,7 +214,7 @@ define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) #0 {
;AVX: vcmpordpd (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ord <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -229,7 +229,7 @@ define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) #0 {
;AVX: vcmpunordpd (%rdi), %xmm0, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp uno <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -247,7 +247,7 @@ define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) #0 {
;AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp olt <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -265,7 +265,7 @@ define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) #0 {
;AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0
;AVX-NEXT: retq
- %1 = load <2 x double>* %a0
+ %1 = load <2 x double>, <2 x double>* %a0
%2 = fcmp ole <2 x double> %1, %a1
%3 = sext <2 x i1> %2 to <2 x i64>
ret <2 x i64> %3
@@ -276,7 +276,7 @@ define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) #0
;AVX: vcmpeqpd (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp oeq <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -287,7 +287,7 @@ define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) #0
;AVX: vcmpneqpd (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp une <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -298,7 +298,7 @@ define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) #0
;AVX: vcmpordpd (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ord <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -309,7 +309,7 @@ define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) #0
;AVX: vcmpunordpd (%rdi), %ymm0, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp uno <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -321,7 +321,7 @@ define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) #0
;AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp olt <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
@@ -333,7 +333,7 @@ define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) #0
;AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0
;AVX-NEXT: retq
- %1 = load <4 x double>* %a0
+ %1 = load <4 x double>, <4 x double>* %a0
%2 = fcmp ole <4 x double> %1, %a1
%3 = sext <4 x i1> %2 to <4 x i64>
ret <4 x i64> %3
diff --git a/llvm/test/CodeGen/X86/commute-intrinsic.ll b/llvm/test/CodeGen/X86/commute-intrinsic.ll
index 7d5ca476689..ff9049cf96d 100644
--- a/llvm/test/CodeGen/X86/commute-intrinsic.ll
+++ b/llvm/test/CodeGen/X86/commute-intrinsic.ll
@@ -6,7 +6,7 @@
define <2 x i64> @madd(<2 x i64> %b) nounwind {
entry:
- %tmp2 = load <2 x i64>* @a, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp2 = load <2 x i64>, <2 x i64>* @a, align 16 ; <<2 x i64>> [#uses=1]
%tmp6 = bitcast <2 x i64> %b to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp9 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp11 = tail call <4 x i32> @llvm.x86.sse2.pmadd.wd( <8 x i16> %tmp9, <8 x i16> %tmp6 ) nounwind readnone ; <<4 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/commute-xop.ll b/llvm/test/CodeGen/X86/commute-xop.ll
index a3e14feddbc..e551d9bfc78 100644
--- a/llvm/test/CodeGen/X86/commute-xop.ll
+++ b/llvm/test/CodeGen/X86/commute-xop.ll
@@ -3,7 +3,7 @@
define <16 x i8> @commute_fold_vpcomb(<16 x i8>* %a0, <16 x i8> %a1) {
;CHECK-LABEL: commute_fold_vpcomb
;CHECK: vpcomgtb (%rdi), %xmm0, %xmm0
- %1 = load <16 x i8>* %a0
+ %1 = load <16 x i8>, <16 x i8>* %a0
%2 = call <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8> %1, <16 x i8> %a1, i8 0) ; vpcomltb
ret <16 x i8> %2
}
@@ -12,7 +12,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomb(<16 x i8>, <16 x i8>, i8) nounwind readno
define <4 x i32> @commute_fold_vpcomd(<4 x i32>* %a0, <4 x i32> %a1) {
;CHECK-LABEL: commute_fold_vpcomd
;CHECK: vpcomged (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32> %1, <4 x i32> %a1, i8 1) ; vpcomled
ret <4 x i32> %2
}
@@ -21,7 +21,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomd(<4 x i32>, <4 x i32>, i8) nounwind readno
define <2 x i64> @commute_fold_vpcomq(<2 x i64>* %a0, <2 x i64> %a1) {
;CHECK-LABEL: commute_fold_vpcomq
;CHECK: vpcomltq (%rdi), %xmm0, %xmm0
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64> %1, <2 x i64> %a1, i8 2) ; vpcomgtq
ret <2 x i64> %2
}
@@ -30,7 +30,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomq(<2 x i64>, <2 x i64>, i8) nounwind readno
define <16 x i8> @commute_fold_vpcomub(<16 x i8>* %a0, <16 x i8> %a1) {
;CHECK-LABEL: commute_fold_vpcomub
;CHECK: vpcomleub (%rdi), %xmm0, %xmm0
- %1 = load <16 x i8>* %a0
+ %1 = load <16 x i8>, <16 x i8>* %a0
%2 = call <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8> %1, <16 x i8> %a1, i8 3) ; vpcomgeub
ret <16 x i8> %2
}
@@ -39,7 +39,7 @@ declare <16 x i8> @llvm.x86.xop.vpcomub(<16 x i8>, <16 x i8>, i8) nounwind readn
define <4 x i32> @commute_fold_vpcomud(<4 x i32>* %a0, <4 x i32> %a1) {
;CHECK-LABEL: commute_fold_vpcomud
;CHECK: vpcomequd (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32> %1, <4 x i32> %a1, i8 4) ; vpcomequd
ret <4 x i32> %2
}
@@ -48,7 +48,7 @@ declare <4 x i32> @llvm.x86.xop.vpcomud(<4 x i32>, <4 x i32>, i8) nounwind readn
define <2 x i64> @commute_fold_vpcomuq(<2 x i64>* %a0, <2 x i64> %a1) {
;CHECK-LABEL: commute_fold_vpcomuq
;CHECK: vpcomnequq (%rdi), %xmm0, %xmm0
- %1 = load <2 x i64>* %a0
+ %1 = load <2 x i64>, <2 x i64>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64> %1, <2 x i64> %a1, i8 5) ; vpcomnequq
ret <2 x i64> %2
}
@@ -57,7 +57,7 @@ declare <2 x i64> @llvm.x86.xop.vpcomuq(<2 x i64>, <2 x i64>, i8) nounwind readn
define <8 x i16> @commute_fold_vpcomuw(<8 x i16>* %a0, <8 x i16> %a1) {
;CHECK-LABEL: commute_fold_vpcomuw
;CHECK: vpcomfalseuw (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16> %1, <8 x i16> %a1, i8 6) ; vpcomfalseuw
ret <8 x i16> %2
}
@@ -66,7 +66,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomuw(<8 x i16>, <8 x i16>, i8) nounwind readn
define <8 x i16> @commute_fold_vpcomw(<8 x i16>* %a0, <8 x i16> %a1) {
;CHECK-LABEL: commute_fold_vpcomw
;CHECK: vpcomtruew (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16> %1, <8 x i16> %a1, i8 7) ; vpcomtruew
ret <8 x i16> %2
}
@@ -75,7 +75,7 @@ declare <8 x i16> @llvm.x86.xop.vpcomw(<8 x i16>, <8 x i16>, i8) nounwind readno
define <4 x i32> @commute_fold_vpmacsdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmacsdd
;CHECK: vpmacsdd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32> %1, <4 x i32> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -84,7 +84,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacsdd(<4 x i32>, <4 x i32>, <4 x i32>) nounwi
define <2 x i64> @commute_fold_vpmacsdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
;CHECK-LABEL: commute_fold_vpmacsdqh
;CHECK: vpmacsdqh %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
}
@@ -93,7 +93,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdqh(<4 x i32>, <4 x i32>, <2 x i64>) nounw
define <2 x i64> @commute_fold_vpmacsdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
;CHECK-LABEL: commute_fold_vpmacsdql
;CHECK: vpmacsdql %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
}
@@ -102,7 +102,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacsdql(<4 x i32>, <4 x i32>, <2 x i64>) nounw
define <4 x i32> @commute_fold_vpmacssdd(<4 x i32>* %a0, <4 x i32> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmacssdd
;CHECK: vpmacssdd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32> %1, <4 x i32> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -111,7 +111,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacssdd(<4 x i32>, <4 x i32>, <4 x i32>) nounw
define <2 x i64> @commute_fold_vpmacssdqh(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
;CHECK-LABEL: commute_fold_vpmacssdqh
;CHECK: vpmacssdqh %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
}
@@ -120,7 +120,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacssdqh(<4 x i32>, <4 x i32>, <2 x i64>) noun
define <2 x i64> @commute_fold_vpmacssdql(<4 x i32>* %a0, <4 x i32> %a1, <2 x i64> %a2) {
;CHECK-LABEL: commute_fold_vpmacssdql
;CHECK: vpmacssdql %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <4 x i32>* %a0
+ %1 = load <4 x i32>, <4 x i32>* %a0
%2 = call <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32> %1, <4 x i32> %a1, <2 x i64> %a2)
ret <2 x i64> %2
}
@@ -129,7 +129,7 @@ declare <2 x i64> @llvm.x86.xop.vpmacssdql(<4 x i32>, <4 x i32>, <2 x i64>) noun
define <4 x i32> @commute_fold_vpmacsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmacsswd
;CHECK: vpmacsswd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -138,7 +138,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacsswd(<8 x i16>, <8 x i16>, <4 x i32>) nounw
define <8 x i16> @commute_fold_vpmacssww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
;CHECK-LABEL: commute_fold_vpmacssww
;CHECK: vpmacssww %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16> %1, <8 x i16> %a1, <8 x i16> %a2)
ret <8 x i16> %2
}
@@ -147,7 +147,7 @@ declare <8 x i16> @llvm.x86.xop.vpmacssww(<8 x i16>, <8 x i16>, <8 x i16>) nounw
define <4 x i32> @commute_fold_vpmacswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmacswd
;CHECK: vpmacswd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -156,7 +156,7 @@ declare <4 x i32> @llvm.x86.xop.vpmacswd(<8 x i16>, <8 x i16>, <4 x i32>) nounwi
define <8 x i16> @commute_fold_vpmacsww(<8 x i16>* %a0, <8 x i16> %a1, <8 x i16> %a2) {
;CHECK-LABEL: commute_fold_vpmacsww
;CHECK: vpmacsww %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16> %1, <8 x i16> %a1, <8 x i16> %a2)
ret <8 x i16> %2
}
@@ -165,7 +165,7 @@ declare <8 x i16> @llvm.x86.xop.vpmacsww(<8 x i16>, <8 x i16>, <8 x i16>) nounwi
define <4 x i32> @commute_fold_vpmadcsswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmadcsswd
;CHECK: vpmadcsswd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
@@ -174,7 +174,7 @@ declare <4 x i32> @llvm.x86.xop.vpmadcsswd(<8 x i16>, <8 x i16>, <4 x i32>) noun
define <4 x i32> @commute_fold_vpmadcswd(<8 x i16>* %a0, <8 x i16> %a1, <4 x i32> %a2) {
;CHECK-LABEL: commute_fold_vpmadcswd
;CHECK: vpmadcswd %xmm1, (%rdi), %xmm0, %xmm0
- %1 = load <8 x i16>* %a0
+ %1 = load <8 x i16>, <8 x i16>* %a0
%2 = call <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16> %1, <8 x i16> %a1, <4 x i32> %a2)
ret <4 x i32> %2
}
diff --git a/llvm/test/CodeGen/X86/compact-unwind.ll b/llvm/test/CodeGen/X86/compact-unwind.ll
index d004f6e858e..f8266a10cfb 100644
--- a/llvm/test/CodeGen/X86/compact-unwind.ll
+++ b/llvm/test/CodeGen/X86/compact-unwind.ll
@@ -39,12 +39,12 @@
define i8* @test0(i64 %size) {
%addr = alloca i64, align 8
- %tmp20 = load i32* @gv, align 4
+ %tmp20 = load i32, i32* @gv, align 4
%tmp21 = call i32 @bar()
- %tmp25 = load i64* %addr, align 8
+ %tmp25 = load i64, i64* %addr, align 8
%tmp26 = inttoptr i64 %tmp25 to %ty*
%tmp29 = getelementptr inbounds %ty, %ty* %tmp26, i64 0, i32 0
- %tmp34 = load i8** %tmp29, align 8
+ %tmp34 = load i8*, i8** %tmp29, align 8
%tmp35 = getelementptr inbounds i8, i8* %tmp34, i64 %size
store i8* %tmp35, i8** %tmp29, align 8
ret i8* null
@@ -85,7 +85,7 @@ for.cond1.preheader: ; preds = %for.inc10, %entry
for.body3: ; preds = %for.inc, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.inc ]
%image4 = getelementptr inbounds %"struct.dyld::MappedRanges", %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 0, i64 %indvars.iv, i32 0
- %0 = load %class.ImageLoader** %image4, align 8
+ %0 = load %class.ImageLoader*, %class.ImageLoader** %image4, align 8
%cmp5 = icmp eq %class.ImageLoader* %0, %image
br i1 %cmp5, label %if.then, label %for.inc
@@ -102,7 +102,7 @@ for.inc: ; preds = %if.then, %for.body3
for.inc10: ; preds = %for.inc
%next = getelementptr inbounds %"struct.dyld::MappedRanges", %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 1
- %1 = load %"struct.dyld::MappedRanges"** %next, align 8
+ %1 = load %"struct.dyld::MappedRanges"*, %"struct.dyld::MappedRanges"** %next, align 8
%cmp = icmp eq %"struct.dyld::MappedRanges"* %1, null
br i1 %cmp, label %for.end11, label %for.cond1.preheader
diff --git a/llvm/test/CodeGen/X86/complex-asm.ll b/llvm/test/CodeGen/X86/complex-asm.ll
index 8ceb47c8aa0..d7b5879309d 100644
--- a/llvm/test/CodeGen/X86/complex-asm.ll
+++ b/llvm/test/CodeGen/X86/complex-asm.ll
@@ -8,9 +8,9 @@ entry:
%v = alloca %0, align 8
call void asm sideeffect "", "=*r,r,r,0,~{dirflag},~{fpsr},~{flags}"(%0* %v, i32 0, i32 1, i128 undef) nounwind
%0 = getelementptr inbounds %0, %0* %v, i64 0, i32 0
- %1 = load i64* %0, align 8
+ %1 = load i64, i64* %0, align 8
%2 = getelementptr inbounds %0, %0* %v, i64 0, i32 1
- %3 = load i64* %2, align 8
+ %3 = load i64, i64* %2, align 8
%mrv4 = insertvalue %0 undef, i64 %1, 0
%mrv5 = insertvalue %0 %mrv4, i64 %3, 1
ret %0 %mrv5
diff --git a/llvm/test/CodeGen/X86/computeKnownBits_urem.ll b/llvm/test/CodeGen/X86/computeKnownBits_urem.ll
index 9902e6f2597..a72740e1957 100644
--- a/llvm/test/CodeGen/X86/computeKnownBits_urem.ll
+++ b/llvm/test/CodeGen/X86/computeKnownBits_urem.ll
@@ -3,7 +3,7 @@ define i32 @main() #0 {
entry:
%a = alloca i32, align 4
store i32 1, i32* %a, align 4
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%or = or i32 1, %0
%and = and i32 1, %or
%rem = urem i32 %and, 1
diff --git a/llvm/test/CodeGen/X86/const-base-addr.ll b/llvm/test/CodeGen/X86/const-base-addr.ll
index 19a103ca58b..42647136fe3 100644
--- a/llvm/test/CodeGen/X86/const-base-addr.ll
+++ b/llvm/test/CodeGen/X86/const-base-addr.ll
@@ -12,11 +12,11 @@ define i32 @test1() nounwind {
; CHECK-NEXT: addl 8(%rcx), %eax
; CHECK-NEXT: addl 12(%rcx), %eax
%addr1 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 1
- %tmp1 = load i32* %addr1
+ %tmp1 = load i32, i32* %addr1
%addr2 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 2
- %tmp2 = load i32* %addr2
+ %tmp2 = load i32, i32* %addr2
%addr3 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 3
- %tmp3 = load i32* %addr3
+ %tmp3 = load i32, i32* %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
ret i32 %tmp5
diff --git a/llvm/test/CodeGen/X86/constant-combines.ll b/llvm/test/CodeGen/X86/constant-combines.ll
index 0c9c2110606..5ea736e92c7 100644
--- a/llvm/test/CodeGen/X86/constant-combines.ll
+++ b/llvm/test/CodeGen/X86/constant-combines.ll
@@ -20,7 +20,7 @@ entry:
%1 = getelementptr inbounds { float, float }, { float, float }* %arg, i64 0, i32 0
%2 = bitcast float* %1 to i64*
- %3 = load i64* %2, align 8
+ %3 = load i64, i64* %2, align 8
%4 = trunc i64 %3 to i32
%5 = lshr i64 %3, 32
%6 = trunc i64 %5 to i32
diff --git a/llvm/test/CodeGen/X86/constant-hoisting-optnone.ll b/llvm/test/CodeGen/X86/constant-hoisting-optnone.ll
index f61fe3f60f9..4d8a06c444d 100644
--- a/llvm/test/CodeGen/X86/constant-hoisting-optnone.ll
+++ b/llvm/test/CodeGen/X86/constant-hoisting-optnone.ll
@@ -12,8 +12,8 @@ define i64 @constant_hoisting_optnone() #0 {
; CHECK-DAG: movabsq {{.*#+}} imm = 0xBEEBEEBEC
; CHECK: ret
entry:
- %0 = load i64* inttoptr (i64 51250129900 to i64*)
- %1 = load i64* inttoptr (i64 51250129908 to i64*)
+ %0 = load i64, i64* inttoptr (i64 51250129900 to i64*)
+ %1 = load i64, i64* inttoptr (i64 51250129908 to i64*)
%2 = add i64 %0, %1
ret i64 %2
}
diff --git a/llvm/test/CodeGen/X86/constant-hoisting-shift-immediate.ll b/llvm/test/CodeGen/X86/constant-hoisting-shift-immediate.ll
index 883be355bd3..65c26f818a6 100644
--- a/llvm/test/CodeGen/X86/constant-hoisting-shift-immediate.ll
+++ b/llvm/test/CodeGen/X86/constant-hoisting-shift-immediate.ll
@@ -6,7 +6,7 @@ define i64 @foo(i1 %z, i192* %p, i192* %q)
; be in another basic block. As a result, a very inefficient code might be
; produced. Here we check that this doesn't occur.
entry:
- %data1 = load i192* %p, align 8
+ %data1 = load i192, i192* %p, align 8
%lshr1 = lshr i192 %data1, 128
%val1 = trunc i192 %lshr1 to i64
br i1 %z, label %End, label %L_val2
@@ -14,7 +14,7 @@ entry:
; CHECK: movq 16(%rdx), %rax
; CHECK-NEXT: retq
L_val2:
- %data2 = load i192* %q, align 8
+ %data2 = load i192, i192* %q, align 8
%lshr2 = lshr i192 %data2, 128
%val2 = trunc i192 %lshr2 to i64
br label %End
diff --git a/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll b/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
index ed55b7f7eda..7fc56f5accc 100644
--- a/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
+++ b/llvm/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
@@ -12,7 +12,7 @@ entry:
%0 = add i32 %i2, 1 ; <i32> [#uses=1]
%1 = sext i32 %0 to i64 ; <i64> [#uses=1]
%2 = getelementptr i8, i8* %ptr, i64 %1 ; <i8*> [#uses=1]
- %3 = load i8* %2, align 1 ; <i8> [#uses=1]
+ %3 = load i8, i8* %2, align 1 ; <i8> [#uses=1]
%4 = icmp eq i8 0, %3 ; <i1> [#uses=1]
br i1 %4, label %bb3, label %bb34
diff --git a/llvm/test/CodeGen/X86/cppeh-catch-all.ll b/llvm/test/CodeGen/X86/cppeh-catch-all.ll
index cf7b3649185..b48d5347599 100644
--- a/llvm/test/CodeGen/X86/cppeh-catch-all.ll
+++ b/llvm/test/CodeGen/X86/cppeh-catch-all.ll
@@ -39,7 +39,7 @@ lpad: ; preds = %entry
br label %catch
catch: ; preds = %lpad
- %exn = load i8** %exn.slot
+ %exn = load i8*, i8** %exn.slot
%3 = call i8* @llvm.eh.begincatch(i8* %exn) #3
call void @_Z16handle_exceptionv()
br label %invoke.cont2
@@ -57,7 +57,7 @@ try.cont: ; preds = %invoke.cont2, %invo
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @_Z4testv to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %struct._Z4testv.ehdata*
; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
-; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: %eh.obj = load i8*, i8** %eh.obj.ptr
; CHECK: call void @_Z16handle_exceptionv()
; CHECK: ret i8* blockaddress(@_Z4testv, %try.cont)
; CHECK: }
diff --git a/llvm/test/CodeGen/X86/cppeh-catch-scalar.ll b/llvm/test/CodeGen/X86/cppeh-catch-scalar.ll
index 2b7f841155c..b5f40c3f944 100644
--- a/llvm/test/CodeGen/X86/cppeh-catch-scalar.ll
+++ b/llvm/test/CodeGen/X86/cppeh-catch-scalar.ll
@@ -55,18 +55,18 @@ lpad: ; preds = %entry
br label %catch.dispatch
catch.dispatch: ; preds = %lpad
- %sel = load i32* %ehselector.slot
+ %sel = load i32, i32* %ehselector.slot
%3 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #3
%matches = icmp eq i32 %sel, %3
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %catch.dispatch
- %exn11 = load i8** %exn.slot
+ %exn11 = load i8*, i8** %exn.slot
%4 = call i8* @llvm.eh.begincatch(i8* %exn11) #3
%5 = bitcast i8* %4 to i32*
- %6 = load i32* %5, align 4
+ %6 = load i32, i32* %5, align 4
store i32 %6, i32* %i, align 4
- %7 = load i32* %i, align 4
+ %7 = load i32, i32* %i, align 4
call void @_Z10handle_inti(i32 %7)
br label %invoke.cont2
@@ -78,8 +78,8 @@ try.cont: ; preds = %invoke.cont2, %invo
ret void
eh.resume: ; preds = %catch.dispatch
- %exn3 = load i8** %exn.slot
- %sel4 = load i32* %ehselector.slot
+ %exn3 = load i8*, i8** %exn.slot
+ %sel4 = load i32, i32* %ehselector.slot
%lpad.val = insertvalue { i8*, i32 } undef, i8* %exn3, 0
%lpad.val5 = insertvalue { i8*, i32 } %lpad.val, i32 %sel4, 1
resume { i8*, i32 } %lpad.val5
@@ -90,12 +90,12 @@ eh.resume: ; preds = %catch.dispatch
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @_Z4testv to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %struct._Z4testv.ehdata*
; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
-; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: %eh.obj = load i8*, i8** %eh.obj.ptr
; CHECK: %i = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 2
; CHECK: %2 = bitcast i8* %eh.obj to i32*
-; CHECK: %3 = load i32* %2, align 4
+; CHECK: %3 = load i32, i32* %2, align 4
; CHECK: store i32 %3, i32* %i, align 4
-; CHECK: %4 = load i32* %i, align 4
+; CHECK: %4 = load i32, i32* %i, align 4
; CHECK: call void @_Z10handle_inti(i32 %4)
; CHECK: ret i8* blockaddress(@_Z4testv, %try.cont)
; CHECK: }
diff --git a/llvm/test/CodeGen/X86/cppeh-frame-vars.ll b/llvm/test/CodeGen/X86/cppeh-frame-vars.ll
index bb4cef49bd7..8b8a849367a 100644
--- a/llvm/test/CodeGen/X86/cppeh-frame-vars.ll
+++ b/llvm/test/CodeGen/X86/cppeh-frame-vars.ll
@@ -83,7 +83,7 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %1, 10
br i1 %cmp, label %for.body, label %for.end
@@ -92,9 +92,9 @@ for.body: ; preds = %for.cond
to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %for.body
- %2 = load i32* %i, align 4
+ %2 = load i32, i32* %i, align 4
%a = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
- %3 = load i32* %a, align 4
+ %3 = load i32, i32* %a, align 4
%add = add nsw i32 %3, %2
store i32 %add, i32* %a, align 4
br label %try.cont
@@ -109,42 +109,42 @@ lpad: ; preds = %for.body
br label %catch.dispatch
catch.dispatch: ; preds = %lpad
- %sel = load i32* %ehselector.slot
+ %sel = load i32, i32* %ehselector.slot
%7 = call i32 @llvm.eh.typeid.for(i8* bitcast (%rtti.TypeDescriptor2* @"\01??_R0H@8" to i8*)) #1
%matches = icmp eq i32 %sel, %7
br i1 %matches, label %catch, label %eh.resume
catch: ; preds = %catch.dispatch
- %exn = load i8** %exn.slot
+ %exn = load i8*, i8** %exn.slot
%8 = call i8* @llvm.eh.begincatch(i8* %exn) #1
%9 = bitcast i8* %8 to i32*
- %10 = load i32* %9, align 4
+ %10 = load i32, i32* %9, align 4
store i32 %10, i32* %e, align 4
- %11 = load i32* %e, align 4
- %12 = load i32* %NumExceptions, align 4
+ %11 = load i32, i32* %e, align 4
+ %12 = load i32, i32* %NumExceptions, align 4
%idxprom = sext i32 %12 to i64
%arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
store i32 %11, i32* %arrayidx, align 4
- %13 = load i32* %NumExceptions, align 4
+ %13 = load i32, i32* %NumExceptions, align 4
%inc = add nsw i32 %13, 1
store i32 %inc, i32* %NumExceptions, align 4
- %14 = load i32* %e, align 4
- %15 = load i32* %i, align 4
+ %14 = load i32, i32* %e, align 4
+ %15 = load i32, i32* %i, align 4
%cmp1 = icmp eq i32 %14, %15
br i1 %cmp1, label %if.then, label %if.else
if.then: ; preds = %catch
- %16 = load i32* %e, align 4
+ %16 = load i32, i32* %e, align 4
%b = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 1
- %17 = load i32* %b, align 4
+ %17 = load i32, i32* %b, align 4
%add2 = add nsw i32 %17, %16
store i32 %add2, i32* %b, align 4
br label %if.end
if.else: ; preds = %catch
- %18 = load i32* %e, align 4
+ %18 = load i32, i32* %e, align 4
%a3 = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
- %19 = load i32* %a3, align 4
+ %19 = load i32, i32* %a3, align 4
%add4 = add nsw i32 %19, %18
store i32 %add4, i32* %a3, align 4
br label %if.end
@@ -154,25 +154,25 @@ if.end: ; preds = %if.else, %if.then
br label %try.cont
try.cont: ; preds = %if.end, %invoke.cont
- %20 = load i32* %NumExceptions, align 4
+ %20 = load i32, i32* %NumExceptions, align 4
call void @"\01?does_not_throw@@YAXH@Z"(i32 %20)
br label %for.inc
for.inc: ; preds = %try.cont
- %21 = load i32* %i, align 4
+ %21 = load i32, i32* %i, align 4
%inc5 = add nsw i32 %21, 1
store i32 %inc5, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %22 = load i32* %NumExceptions, align 4
+ %22 = load i32, i32* %NumExceptions, align 4
%arraydecay = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i32 0
call void @"\01?dump@@YAXPEAHHAEAUSomeData@@@Z"(i32* %arraydecay, i32 %22, %struct.SomeData* dereferenceable(8) %Data)
ret void
eh.resume: ; preds = %catch.dispatch
- %exn6 = load i8** %exn.slot
- %sel7 = load i32* %ehselector.slot
+ %exn6 = load i8*, i8** %exn.slot
+ %sel7 = load i32, i32* %ehselector.slot
%lpad.val = insertvalue { i8*, i32 } undef, i8* %exn6, 0
%lpad.val8 = insertvalue { i8*, i32 } %lpad.val, i32 %sel7, 1
resume { i8*, i32 } %lpad.val8
@@ -184,40 +184,40 @@ eh.resume: ; preds = %catch.dispatch
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %"struct.\01?test@@YAXXZ.ehdata"*
; CHECK: %eh.obj.ptr = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 1
-; CHECK: %eh.obj = load i8** %eh.obj.ptr
+; CHECK: %eh.obj = load i8*, i8** %eh.obj.ptr
; CHECK: %e = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 2
; CHECK: %NumExceptions = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 3
; CHECK: %ExceptionVal = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 4
; CHECK: %i = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 5
; CHECK: %Data = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 6
; CHECK: %2 = bitcast i8* %eh.obj to i32*
-; CHECK: %3 = load i32* %2, align 4
+; CHECK: %3 = load i32, i32* %2, align 4
; CHECK: store i32 %3, i32* %e, align 4
-; CHECK: %4 = load i32* %e, align 4
-; CHECK: %5 = load i32* %NumExceptions, align 4
+; CHECK: %4 = load i32, i32* %e, align 4
+; CHECK: %5 = load i32, i32* %NumExceptions, align 4
; CHECK: %idxprom = sext i32 %5 to i64
; CHECK: %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
; CHECK: store i32 %4, i32* %arrayidx, align 4
-; CHECK: %6 = load i32* %NumExceptions, align 4
+; CHECK: %6 = load i32, i32* %NumExceptions, align 4
; CHECK: %inc = add nsw i32 %6, 1
; CHECK: store i32 %inc, i32* %NumExceptions, align 4
-; CHECK: %7 = load i32* %e, align 4
-; CHECK: %8 = load i32* %i, align 4
+; CHECK: %7 = load i32, i32* %e, align 4
+; CHECK: %8 = load i32, i32* %i, align 4
; CHECK: %cmp1 = icmp eq i32 %7, %8
; CHECK: br i1 %cmp1, label %if.then, label %if.else
;
; CHECK: if.then: ; preds = %catch.entry
-; CHECK: %9 = load i32* %e, align 4
+; CHECK: %9 = load i32, i32* %e, align 4
; CHECK: %b = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 1
-; CHECK: %10 = load i32* %b, align 4
+; CHECK: %10 = load i32, i32* %b, align 4
; CHECK: %add2 = add nsw i32 %10, %9
; CHECK: store i32 %add2, i32* %b, align 4
; CHECK: br label %if.end
;
; CHECK: if.else: ; preds = %catch.entry
-; CHECK: %11 = load i32* %e, align 4
+; CHECK: %11 = load i32, i32* %e, align 4
; CHECK: %a3 = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
-; CHECK: %12 = load i32* %a3, align 4
+; CHECK: %12 = load i32, i32* %a3, align 4
; CHECK: %add4 = add nsw i32 %12, %11
; CHECK: store i32 %add4, i32* %a3, align 4
; CHECK: br label %if.end
diff --git a/llvm/test/CodeGen/X86/crash-O0.ll b/llvm/test/CodeGen/X86/crash-O0.ll
index df978176bcb..dab15c19c69 100644
--- a/llvm/test/CodeGen/X86/crash-O0.ll
+++ b/llvm/test/CodeGen/X86/crash-O0.ll
@@ -45,7 +45,7 @@ entry:
; CHECK: retq
define i64 @addressModeWith32bitIndex(i32 %V) {
%gep = getelementptr i64, i64* null, i32 %V
- %load = load i64* %gep
+ %load = load i64, i64* %gep
%sdiv = sdiv i64 0, %load
ret i64 %sdiv
}
diff --git a/llvm/test/CodeGen/X86/crash-nosse.ll b/llvm/test/CodeGen/X86/crash-nosse.ll
index b1e01f94c9e..aff120dbb84 100644
--- a/llvm/test/CodeGen/X86/crash-nosse.ll
+++ b/llvm/test/CodeGen/X86/crash-nosse.ll
@@ -11,7 +11,7 @@ BB:
br label %CF
CF: ; preds = %CF, %BB
- %L19 = load <8 x float>* %S17
+ %L19 = load <8 x float>, <8 x float>* %S17
%BC = bitcast <32 x i32> %Shuff6 to <32 x float>
%S28 = fcmp ord double 0x3ED1A1F787BB2185, 0x3EE59DE55A8DF890
br i1 %S28, label %CF, label %CF39
diff --git a/llvm/test/CodeGen/X86/crash.ll b/llvm/test/CodeGen/X86/crash.ll
index ffab3428c3f..3acae440be1 100644
--- a/llvm/test/CodeGen/X86/crash.ll
+++ b/llvm/test/CodeGen/X86/crash.ll
@@ -7,9 +7,9 @@
; Chain and flag folding issues.
define i32 @test1() nounwind ssp {
entry:
- %tmp5.i = load volatile i32* undef ; <i32> [#uses=1]
+ %tmp5.i = load volatile i32, i32* undef ; <i32> [#uses=1]
%conv.i = zext i32 %tmp5.i to i64 ; <i64> [#uses=1]
- %tmp12.i = load volatile i32* undef ; <i32> [#uses=1]
+ %tmp12.i = load volatile i32, i32* undef ; <i32> [#uses=1]
%conv13.i = zext i32 %tmp12.i to i64 ; <i64> [#uses=1]
%shl.i = shl i64 %conv13.i, 32 ; <i64> [#uses=1]
%or.i = or i64 %shl.i, %conv.i ; <i64> [#uses=1]
@@ -40,7 +40,7 @@ if.end: ; preds = %land.end
define void @test3() {
dependentGraph243.exit:
- %subject19 = load %pair* undef ; <%1> [#uses=1]
+ %subject19 = load %pair, %pair* undef ; <%1> [#uses=1]
%0 = extractvalue %pair %subject19, 1 ; <double> [#uses=2]
%1 = select i1 undef, double %0, double undef ; <double> [#uses=1]
%2 = select i1 undef, double %1, double %0 ; <double> [#uses=1]
@@ -52,7 +52,7 @@ dependentGraph243.exit:
; PR6605
define i64 @test4(i8* %P) nounwind ssp {
entry:
- %tmp1 = load i8* %P ; <i8> [#uses=3]
+ %tmp1 = load i8, i8* %P ; <i8> [#uses=3]
%tobool = icmp eq i8 %tmp1, 0 ; <i1> [#uses=1]
%tmp58 = sext i1 %tobool to i8 ; <i8> [#uses=1]
%mul.i = and i8 %tmp58, %tmp1 ; <i8> [#uses=1]
@@ -76,7 +76,7 @@ declare i32 @safe(i32)
; PR6607
define fastcc void @test5(i32 %FUNC) nounwind {
foo:
- %0 = load i8* undef, align 1 ; <i8> [#uses=3]
+ %0 = load i8, i8* undef, align 1 ; <i8> [#uses=3]
%1 = sext i8 %0 to i32 ; <i32> [#uses=2]
%2 = zext i8 %0 to i32 ; <i32> [#uses=1]
%tmp1.i5037 = urem i32 %2, 10 ; <i32> [#uses=1]
@@ -121,7 +121,7 @@ entry:
bb14:
%tmp0 = trunc i16 undef to i1
- %tmp1 = load i8* undef, align 8
+ %tmp1 = load i8, i8* undef, align 8
%tmp2 = shl i8 %tmp1, 4
%tmp3 = lshr i8 %tmp2, 7
%tmp4 = trunc i8 %tmp3 to i1
@@ -239,7 +239,7 @@ declare i64 @llvm.objectsize.i64.p0i8(i8*, i1) nounwind readnone
define void @_ZNK4llvm17MipsFrameLowering12emitPrologueERNS_15MachineFunctionE() ssp align 2 {
bb:
- %tmp = load %t9** undef, align 4
+ %tmp = load %t9*, %t9** undef, align 4
%tmp2 = getelementptr inbounds %t9, %t9* %tmp, i32 0, i32 0
%tmp3 = getelementptr inbounds %t9, %t9* %tmp, i32 0, i32 0, i32 0, i32 0, i32 1
br label %bb4
@@ -250,25 +250,25 @@ bb4: ; preds = %bb37, %bb
br i1 undef, label %bb34, label %bb7
bb7: ; preds = %bb4
- %tmp8 = load i32* undef, align 4
+ %tmp8 = load i32, i32* undef, align 4
%tmp9 = and i96 %tmp6, 4294967040
%tmp10 = zext i32 %tmp8 to i96
%tmp11 = shl nuw nsw i96 %tmp10, 32
%tmp12 = or i96 %tmp9, %tmp11
%tmp13 = or i96 %tmp12, 1
- %tmp14 = load i32* undef, align 4
+ %tmp14 = load i32, i32* undef, align 4
%tmp15 = and i96 %tmp5, 4294967040
%tmp16 = zext i32 %tmp14 to i96
%tmp17 = shl nuw nsw i96 %tmp16, 32
%tmp18 = or i96 %tmp15, %tmp17
%tmp19 = or i96 %tmp18, 1
- %tmp20 = load i8* undef, align 1
+ %tmp20 = load i8, i8* undef, align 1
%tmp21 = and i8 %tmp20, 1
%tmp22 = icmp ne i8 %tmp21, 0
%tmp23 = select i1 %tmp22, i96 %tmp19, i96 %tmp13
%tmp24 = select i1 %tmp22, i96 %tmp13, i96 %tmp19
store i96 %tmp24, i96* undef, align 4
- %tmp25 = load %t13** %tmp3, align 4
+ %tmp25 = load %t13*, %t13** %tmp3, align 4
%tmp26 = icmp eq %t13* %tmp25, undef
br i1 %tmp26, label %bb28, label %bb27
@@ -281,7 +281,7 @@ bb28: ; preds = %bb7
bb29: ; preds = %bb28, %bb27
store i96 %tmp23, i96* undef, align 4
- %tmp30 = load %t13** %tmp3, align 4
+ %tmp30 = load %t13*, %t13** %tmp3, align 4
br i1 false, label %bb33, label %bb31
bb31: ; preds = %bb29
@@ -348,13 +348,13 @@ entry:
br label %"4"
"3":
- %0 = load <2 x i32>* null, align 8
+ %0 = load <2 x i32>, <2 x i32>* null, align 8
%1 = xor <2 x i32> zeroinitializer, %0
%2 = and <2 x i32> %1, %6
%3 = or <2 x i32> undef, %2
%4 = and <2 x i32> %3, undef
store <2 x i32> %4, <2 x i32>* undef
- %5 = load <2 x i32>* undef, align 1
+ %5 = load <2 x i32>, <2 x i32>* undef, align 1
br label %"4"
"4":
@@ -378,7 +378,7 @@ entry:
@__force_order = external hidden global i32, align 4
define void @pr11078(i32* %pgd) nounwind {
entry:
- %t0 = load i32* %pgd, align 4
+ %t0 = load i32, i32* %pgd, align 4
%and2 = and i32 %t0, 1
%tobool = icmp eq i32 %and2, 0
br i1 %tobool, label %if.then, label %if.end
@@ -405,7 +405,7 @@ while.body.preheader: ; preds = %entry
br i1 undef, label %if.then3, label %if.end7
if.then3: ; preds = %while.body.preheader
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
br i1 undef, label %land.lhs.true.i255, label %if.end7
land.lhs.true.i255: ; preds = %if.then3
@@ -434,7 +434,7 @@ return: ; preds = %entry
@.str = private unnamed_addr constant { [1 x i8], [63 x i8] } zeroinitializer, align 32
define void @pr13188(i64* nocapture %this) uwtable ssp sanitize_address align 2 {
entry:
- %x7 = load i64* %this, align 8
+ %x7 = load i64, i64* %this, align 8
%sub = add i64 %x7, -1
%conv = uitofp i64 %sub to float
%div = fmul float %conv, 5.000000e-01
@@ -450,12 +450,12 @@ declare void @_Z6PrintFz(...)
define void @pr13943() nounwind uwtable ssp {
entry:
- %srcval = load i576* bitcast ([9 x i32*]* @fn1.g to i576*), align 16
+ %srcval = load i576, i576* bitcast ([9 x i32*]* @fn1.g to i576*), align 16
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%g.0 = phi i576 [ %srcval, %entry ], [ %ins, %for.inc ]
- %0 = load i32* @e, align 4
+ %0 = load i32, i32* @e, align 4
%1 = lshr i576 %g.0, 64
%2 = trunc i576 %1 to i64
%3 = inttoptr i64 %2 to i32*
@@ -510,9 +510,9 @@ bb4: ; preds = %bb3
unreachable
bb5: ; preds = %bb3
- %tmp = load <4 x float>* undef, align 1
+ %tmp = load <4 x float>, <4 x float>* undef, align 1
%tmp6 = bitcast <4 x float> %tmp to i128
- %tmp7 = load <4 x float>* undef, align 1
+ %tmp7 = load <4 x float>, <4 x float>* undef, align 1
%tmp8 = bitcast <4 x float> %tmp7 to i128
br label %bb10
@@ -583,7 +583,7 @@ bb29: ; preds = %bb28, %bb26, %bb25,
}
define void @pr14194() nounwind uwtable {
- %tmp = load i64* undef, align 16
+ %tmp = load i64, i64* undef, align 16
%tmp1 = trunc i64 %tmp to i32
%tmp2 = lshr i64 %tmp, 32
%tmp3 = trunc i64 %tmp2 to i32
diff --git a/llvm/test/CodeGen/X86/critical-anti-dep-breaker.ll b/llvm/test/CodeGen/X86/critical-anti-dep-breaker.ll
index 32d3f49c79c..86afc1f245a 100644
--- a/llvm/test/CodeGen/X86/critical-anti-dep-breaker.ll
+++ b/llvm/test/CodeGen/X86/critical-anti-dep-breaker.ll
@@ -16,9 +16,9 @@
define i32 @Part_Create(i64* %Anchor, i32 %TypeNum, i32 %F, i32 %Z, i32* %Status, i64* %PartTkn) {
%PartObj = alloca i64*, align 8
%Vchunk = alloca i64, align 8
- %1 = load i64* @NullToken, align 4
+ %1 = load i64, i64* @NullToken, align 4
store i64 %1, i64* %Vchunk, align 8
- %2 = load i32* @PartClass, align 4
+ %2 = load i32, i32* @PartClass, align 4
call i32 @Image(i64* %Anchor, i32 %2, i32 0, i32 0, i32* %Status, i64* %PartTkn, i64** %PartObj)
call i32 @Create(i64* %Anchor)
ret i32 %2
diff --git a/llvm/test/CodeGen/X86/cse-add-with-overflow.ll b/llvm/test/CodeGen/X86/cse-add-with-overflow.ll
index 1fcc03f117d..dc02fe91584 100644
--- a/llvm/test/CodeGen/X86/cse-add-with-overflow.ll
+++ b/llvm/test/CodeGen/X86/cse-add-with-overflow.ll
@@ -15,8 +15,8 @@
define i64 @redundantadd(i64* %a0, i64* %a1) {
entry:
- %tmp8 = load i64* %a0, align 8
- %tmp12 = load i64* %a1, align 8
+ %tmp8 = load i64, i64* %a0, align 8
+ %tmp12 = load i64, i64* %a1, align 8
%tmp13 = icmp ult i64 %tmp12, -281474976710656
br i1 %tmp13, label %exit1, label %body
diff --git a/llvm/test/CodeGen/X86/cvt16.ll b/llvm/test/CodeGen/X86/cvt16.ll
index 4d920e2d23d..9846da546e7 100644
--- a/llvm/test/CodeGen/X86/cvt16.ll
+++ b/llvm/test/CodeGen/X86/cvt16.ll
@@ -33,7 +33,7 @@ define void @test1(float %src, i16* %dest) {
define float @test2(i16* nocapture %src) {
- %1 = load i16* %src, align 2
+ %1 = load i16, i16* %src, align 2
%2 = tail call float @llvm.convert.from.fp16.f32(i16 %1)
ret float %2
}
@@ -60,7 +60,7 @@ define float @test3(float %src) nounwind uwtable readnone {
; F16C: ret
define double @test4(i16* nocapture %src) {
- %1 = load i16* %src, align 2
+ %1 = load i16, i16* %src, align 2
%2 = tail call double @llvm.convert.from.fp16.f64(i16 %1)
ret double %2
}
diff --git a/llvm/test/CodeGen/X86/dagcombine-buildvector.ll b/llvm/test/CodeGen/X86/dagcombine-buildvector.ll
index cf631c353fc..3a6231ade1a 100644
--- a/llvm/test/CodeGen/X86/dagcombine-buildvector.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-buildvector.ll
@@ -17,7 +17,7 @@ entry:
; CHECK: movdqa
define void @test2(<4 x i16>* %src, <4 x i32>* %dest) nounwind {
entry:
- %tmp1 = load <4 x i16>* %src
+ %tmp1 = load <4 x i16>, <4 x i16>* %src
%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%0 = tail call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %tmp3)
store <4 x i32> %0, <4 x i32>* %dest
diff --git a/llvm/test/CodeGen/X86/dagcombine-cse.ll b/llvm/test/CodeGen/X86/dagcombine-cse.ll
index 7a1a693f02c..be1dcff7ae8 100644
--- a/llvm/test/CodeGen/X86/dagcombine-cse.ll
+++ b/llvm/test/CodeGen/X86/dagcombine-cse.ll
@@ -7,12 +7,12 @@ entry:
%tmp9 = add i32 %tmp7, %idxX ; <i32> [#uses=1]
%tmp11 = getelementptr i8, i8* %ref_frame_ptr, i32 %tmp9 ; <i8*> [#uses=1]
%tmp1112 = bitcast i8* %tmp11 to i32* ; <i32*> [#uses=1]
- %tmp13 = load i32* %tmp1112, align 4 ; <i32> [#uses=1]
+ %tmp13 = load i32, i32* %tmp1112, align 4 ; <i32> [#uses=1]
%tmp18 = add i32 %idxX, 4 ; <i32> [#uses=1]
%tmp20.sum = add i32 %tmp18, %tmp7 ; <i32> [#uses=1]
%tmp21 = getelementptr i8, i8* %ref_frame_ptr, i32 %tmp20.sum ; <i8*> [#uses=1]
%tmp2122 = bitcast i8* %tmp21 to i16* ; <i16*> [#uses=1]
- %tmp23 = load i16* %tmp2122, align 2 ; <i16> [#uses=1]
+ %tmp23 = load i16, i16* %tmp2122, align 2 ; <i16> [#uses=1]
%tmp2425 = zext i16 %tmp23 to i64 ; <i64> [#uses=1]
%tmp26 = shl i64 %tmp2425, 32 ; <i64> [#uses=1]
%tmp2728 = zext i32 %tmp13 to i64 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/darwin-quote.ll b/llvm/test/CodeGen/X86/darwin-quote.ll
index 8fddc118f61..c912c92e1e8 100644
--- a/llvm/test/CodeGen/X86/darwin-quote.ll
+++ b/llvm/test/CodeGen/X86/darwin-quote.ll
@@ -2,7 +2,7 @@
define internal i64 @baz() nounwind {
- %tmp = load i64* @"+x"
+ %tmp = load i64, i64* @"+x"
ret i64 %tmp
; CHECK: _baz:
; CHECK: movl "L_+x$non_lazy_ptr", %ecx
diff --git a/llvm/test/CodeGen/X86/dbg-changes-codegen.ll b/llvm/test/CodeGen/X86/dbg-changes-codegen.ll
index de2864ad03a..d2a29cabacf 100644
--- a/llvm/test/CodeGen/X86/dbg-changes-codegen.ll
+++ b/llvm/test/CodeGen/X86/dbg-changes-codegen.ll
@@ -43,7 +43,7 @@
; Function Attrs: nounwind readonly uwtable
define zeroext i1 @_ZN3Foo3batEv(%struct.Foo* %this) #0 align 2 {
entry:
- %0 = load %struct.Foo** @pfoo, align 8
+ %0 = load %struct.Foo*, %struct.Foo** @pfoo, align 8
tail call void @llvm.dbg.value(metadata %struct.Foo* %0, i64 0, metadata !62, metadata !{!"0x102"})
%cmp.i = icmp eq %struct.Foo* %0, %this
ret i1 %cmp.i
@@ -52,9 +52,9 @@ entry:
; Function Attrs: nounwind uwtable
define void @_Z3bazv() #1 {
entry:
- %0 = load %struct.Wibble** @wibble1, align 8
+ %0 = load %struct.Wibble*, %struct.Wibble** @wibble1, align 8
tail call void @llvm.dbg.value(metadata %struct.Flibble* undef, i64 0, metadata !65, metadata !{!"0x102"})
- %1 = load %struct.Wibble** @wibble2, align 8
+ %1 = load %struct.Wibble*, %struct.Wibble** @wibble2, align 8
%cmp.i = icmp ugt %struct.Wibble* %1, %0
br i1 %cmp.i, label %if.then.i, label %_ZN7Flibble3barEP6Wibble.exit
diff --git a/llvm/test/CodeGen/X86/dbg-combine.ll b/llvm/test/CodeGen/X86/dbg-combine.ll
index dcdbfc4d44c..e443408f282 100644
--- a/llvm/test/CodeGen/X86/dbg-combine.ll
+++ b/llvm/test/CodeGen/X86/dbg-combine.ll
@@ -31,7 +31,7 @@ entry:
%cleanup.dest.slot = alloca i32
call void @llvm.dbg.declare(metadata i32* %elems, metadata !12, metadata !13), !dbg !14
store i32 3, i32* %elems, align 4, !dbg !14
- %0 = load i32* %elems, align 4, !dbg !15
+ %0 = load i32, i32* %elems, align 4, !dbg !15
%1 = zext i32 %0 to i64, !dbg !16
%2 = call i8* @llvm.stacksave(), !dbg !16
store i8* %2, i8** %saved_stack, !dbg !16
@@ -43,16 +43,16 @@ entry:
store i32 1, i32* %arrayidx1, align 4, !dbg !26
%arrayidx2 = getelementptr inbounds i32, i32* %vla, i64 2, !dbg !27
store i32 2, i32* %arrayidx2, align 4, !dbg !28
- %3 = load i32* %elems, align 4, !dbg !29
+ %3 = load i32, i32* %elems, align 4, !dbg !29
%4 = zext i32 %3 to i64, !dbg !30
%vla3 = alloca i32, i64 %4, align 16, !dbg !30
call void @llvm.dbg.declare(metadata i32* %vla3, metadata !31, metadata !21), !dbg !32
%arrayidx4 = getelementptr inbounds i32, i32* %vla3, i64 0, !dbg !33
store i32 1, i32* %arrayidx4, align 4, !dbg !34
%arrayidx5 = getelementptr inbounds i32, i32* %vla3, i64 0, !dbg !35
- %5 = load i32* %arrayidx5, align 4, !dbg !35
+ %5 = load i32, i32* %arrayidx5, align 4, !dbg !35
store i32 1, i32* %cleanup.dest.slot
- %6 = load i8** %saved_stack, !dbg !36
+ %6 = load i8*, i8** %saved_stack, !dbg !36
call void @llvm.stackrestore(i8* %6), !dbg !36
ret i32 %5, !dbg !36
}
diff --git a/llvm/test/CodeGen/X86/discontiguous-loops.ll b/llvm/test/CodeGen/X86/discontiguous-loops.ll
index 479c450ca20..edebbbe4008 100644
--- a/llvm/test/CodeGen/X86/discontiguous-loops.ll
+++ b/llvm/test/CodeGen/X86/discontiguous-loops.ll
@@ -39,7 +39,7 @@ ybb8: ; preds = %ybb1
br i1 %tmp9, label %bb10, label %ybb12
bb10: ; preds = %ybb8
- %tmp11 = load i8** undef, align 8 ; <i8*> [#uses=1]
+ %tmp11 = load i8*, i8** undef, align 8 ; <i8*> [#uses=1]
call void (i8*, ...)* @fatal(i8* getelementptr inbounds ([37 x i8]* @.str96, i64 0, i64 0), i8* %tmp11) nounwind
unreachable
diff --git a/llvm/test/CodeGen/X86/div8.ll b/llvm/test/CodeGen/X86/div8.ll
index 0825f79e324..f4f50e5a494 100644
--- a/llvm/test/CodeGen/X86/div8.ll
+++ b/llvm/test/CodeGen/X86/div8.ll
@@ -10,13 +10,13 @@ entry:
%quotient = alloca i8, align 1
store i8 %dividend, i8* %dividend.addr, align 2
store i8 %divisor, i8* %divisor.addr, align 1
- %tmp = load i8* %dividend.addr, align 2
- %tmp1 = load i8* %divisor.addr, align 1
+ %tmp = load i8, i8* %dividend.addr, align 2
+ %tmp1 = load i8, i8* %divisor.addr, align 1
; Insist on i8->i32 zero extension, even though divb demands only i16:
; CHECK: movzbl {{.*}}%eax
; CHECK: divb
%div = udiv i8 %tmp, %tmp1
store i8 %div, i8* %quotient, align 1
- %tmp4 = load i8* %quotient, align 1
+ %tmp4 = load i8, i8* %quotient, align 1
ret i8 %tmp4
}
diff --git a/llvm/test/CodeGen/X86/dllimport-x86_64.ll b/llvm/test/CodeGen/X86/dllimport-x86_64.ll
index 839bca4f3c3..af15a8618fc 100644
--- a/llvm/test/CodeGen/X86/dllimport-x86_64.ll
+++ b/llvm/test/CodeGen/X86/dllimport-x86_64.ll
@@ -35,13 +35,13 @@ define void @use() nounwind {
; available_externally uses go away
; OPT-NOT: call void @inline1()
; OPT-NOT: call void @inline2()
-; OPT-NOT: load i32* @Var2
+; OPT-NOT: load i32, i32* @Var2
; OPT: call void (...)* @dummy(i32 %1, i32 1)
; CHECK-DAG: movq __imp_Var1(%rip), [[R1:%[a-z]{3}]]
; CHECK-DAG: movq __imp_Var2(%rip), [[R2:%[a-z]{3}]]
- %1 = load i32* @Var1
- %2 = load i32* @Var2
+ %1 = load i32, i32* @Var1
+ %2 = load i32, i32* @Var2
call void(...)* @dummy(i32 %1, i32 %2)
ret void
diff --git a/llvm/test/CodeGen/X86/dllimport.ll b/llvm/test/CodeGen/X86/dllimport.ll
index 231ad65740b..eb9484cf5cf 100644
--- a/llvm/test/CodeGen/X86/dllimport.ll
+++ b/llvm/test/CodeGen/X86/dllimport.ll
@@ -46,13 +46,13 @@ define void @use() nounwind {
; available_externally uses go away
; OPT-NOT: call void @inline1()
; OPT-NOT: call void @inline2()
-; OPT-NOT: load i32* @Var2
+; OPT-NOT: load i32, i32* @Var2
; OPT: call void (...)* @dummy(i32 %1, i32 1)
; CHECK-DAG: movl __imp__Var1, [[R1:%[a-z]{3}]]
; CHECK-DAG: movl __imp__Var2, [[R2:%[a-z]{3}]]
- %1 = load i32* @Var1
- %2 = load i32* @Var2
+ %1 = load i32, i32* @Var1
+ %2 = load i32, i32* @Var2
call void(...)* @dummy(i32 %1, i32 %2)
ret void
diff --git a/llvm/test/CodeGen/X86/dollar-name.ll b/llvm/test/CodeGen/X86/dollar-name.ll
index 2ecd72909cb..a31b806c031 100644
--- a/llvm/test/CodeGen/X86/dollar-name.ll
+++ b/llvm/test/CodeGen/X86/dollar-name.ll
@@ -8,8 +8,8 @@ define i32 @"$foo"() nounwind {
; CHECK: movl ($bar),
; CHECK: addl ($qux),
; CHECK: calll ($hen)
- %m = load i32* @"$bar"
- %n = load i32* @"$qux"
+ %m = load i32, i32* @"$bar"
+ %n = load i32, i32* @"$qux"
%t = add i32 %m, %n
%u = call i32 @"$hen"(i32 %t)
ret i32 %u
diff --git a/llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll b/llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
index 24d9533eba4..8a334d21631 100644
--- a/llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
+++ b/llvm/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
@@ -10,7 +10,7 @@ entry-block:
%b = alloca float
store double 3.140000e+00, double* %a
- %0 = load double* %a
+ %0 = load double, double* %a
%1 = fptrunc double %0 to float
diff --git a/llvm/test/CodeGen/X86/dynamic-allocas-VLAs.ll b/llvm/test/CodeGen/X86/dynamic-allocas-VLAs.ll
index 9405f76cbed..2925f243b0e 100644
--- a/llvm/test/CodeGen/X86/dynamic-allocas-VLAs.ll
+++ b/llvm/test/CodeGen/X86/dynamic-allocas-VLAs.ll
@@ -7,7 +7,7 @@ define i32 @t1() nounwind uwtable ssp {
entry:
%a = alloca i32, align 4
call void @t1_helper(i32* %a) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -27,7 +27,7 @@ entry:
%a = alloca i32, align 4
%v = alloca <8 x float>, align 32
call void @t2_helper(i32* %a, <8 x float>* %v) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -53,7 +53,7 @@ entry:
%a = alloca i32, align 4
%vla = alloca i32, i64 %sz, align 16
call void @t3_helper(i32* %a, i32* %vla) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -78,7 +78,7 @@ entry:
%v = alloca <8 x float>, align 32
%vla = alloca i32, i64 %sz, align 16
call void @t4_helper(i32* %a, i32* %vla, <8 x float>* %v) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -108,10 +108,10 @@ define i32 @t5(float* nocapture %f) nounwind uwtable ssp {
entry:
%a = alloca i32, align 4
%0 = bitcast float* %f to <8 x float>*
- %1 = load <8 x float>* %0, align 32
+ %1 = load <8 x float>, <8 x float>* %0, align 32
call void @t5_helper1(i32* %a) nounwind
call void @t5_helper2(<8 x float> %1) nounwind
- %2 = load i32* %a, align 4
+ %2 = load i32, i32* %a, align 4
%add = add nsw i32 %2, 13
ret i32 %add
@@ -138,11 +138,11 @@ entry:
; CHECK: _t6
%a = alloca i32, align 4
%0 = bitcast float* %f to <8 x float>*
- %1 = load <8 x float>* %0, align 32
+ %1 = load <8 x float>, <8 x float>* %0, align 32
%vla = alloca i32, i64 %sz, align 16
call void @t6_helper1(i32* %a, i32* %vla) nounwind
call void @t6_helper2(<8 x float> %1) nounwind
- %2 = load i32* %a, align 4
+ %2 = load i32, i32* %a, align 4
%add = add nsw i32 %2, 13
ret i32 %add
}
@@ -162,7 +162,7 @@ entry:
store i32 0, i32* %x, align 32
%0 = zext i32 %size to i64
%vla = alloca i32, i64 %0, align 16
- %1 = load i32* %x, align 32
+ %1 = load i32, i32* %x, align 32
call void @bar(i32 %1, i32* %vla, %struct.struct_t* byval align 8 %arg1)
ret void
@@ -195,7 +195,7 @@ define i32 @t8() nounwind uwtable {
entry:
%a = alloca i32, align 4
call void @t1_helper(i32* %a) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
@@ -213,7 +213,7 @@ entry:
%a = alloca i32, align 4
%vla = alloca i32, i64 %sz, align 16
call void @t3_helper(i32* %a, i32* %vla) nounwind
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 13
ret i32 %add
diff --git a/llvm/test/CodeGen/X86/early-ifcvt.ll b/llvm/test/CodeGen/X86/early-ifcvt.ll
index 24c1edc71c5..6215519532c 100644
--- a/llvm/test/CodeGen/X86/early-ifcvt.ll
+++ b/llvm/test/CodeGen/X86/early-ifcvt.ll
@@ -15,7 +15,7 @@ do.body:
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ]
%p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ]
%incdec.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 1
- %0 = load i32* %p.addr.0, align 4
+ %0 = load i32, i32* %p.addr.0, align 4
%cmp = icmp sgt i32 %0, %max.0
br i1 %cmp, label %do.cond, label %if.else
diff --git a/llvm/test/CodeGen/X86/emit-big-cst.ll b/llvm/test/CodeGen/X86/emit-big-cst.ll
index 96c15d4a365..51852d00f82 100644
--- a/llvm/test/CodeGen/X86/emit-big-cst.ll
+++ b/llvm/test/CodeGen/X86/emit-big-cst.ll
@@ -10,7 +10,7 @@
define void @accessBig(i64* %storage) {
%addr = bitcast i64* %storage to i82*
- %bigLoadedCst = load volatile i82* @bigCst
+ %bigLoadedCst = load volatile i82, i82* @bigCst
%tmp = add i82 %bigLoadedCst, 1
store i82 %tmp, i82* %addr
ret void
diff --git a/llvm/test/CodeGen/X86/expand-opaque-const.ll b/llvm/test/CodeGen/X86/expand-opaque-const.ll
index 6e461cf8c30..1e39cd88308 100644
--- a/llvm/test/CodeGen/X86/expand-opaque-const.ll
+++ b/llvm/test/CodeGen/X86/expand-opaque-const.ll
@@ -11,11 +11,11 @@ entry:
%op2 = alloca i64
store i64 -6687208052682386272, i64* %op1
store i64 7106745059734980448, i64* %op2
- %tmp1 = load i64* %op1
- %tmp2 = load i64* %op2
+ %tmp1 = load i64, i64* %op1
+ %tmp2 = load i64, i64* %op2
%tmp = xor i64 %tmp2, 7106745059734980448
%tmp3 = lshr i64 %tmp1, %tmp
store i64 %tmp3, i64* %retval
- %tmp4 = load i64* %retval
+ %tmp4 = load i64, i64* %retval
ret i64 %tmp4
}
diff --git a/llvm/test/CodeGen/X86/extend.ll b/llvm/test/CodeGen/X86/extend.ll
index 9553b1b578b..d349e782d5d 100644
--- a/llvm/test/CodeGen/X86/extend.ll
+++ b/llvm/test/CodeGen/X86/extend.ll
@@ -5,13 +5,13 @@
@G2 = internal global i8 0 ; <i8*> [#uses=1]
define i16 @test1() {
- %tmp.0 = load i8* @G1 ; <i8> [#uses=1]
+ %tmp.0 = load i8, i8* @G1 ; <i8> [#uses=1]
%tmp.3 = zext i8 %tmp.0 to i16 ; <i16> [#uses=1]
ret i16 %tmp.3
}
define i16 @test2() {
- %tmp.0 = load i8* @G2 ; <i8> [#uses=1]
+ %tmp.0 = load i8, i8* @G2 ; <i8> [#uses=1]
%tmp.3 = sext i8 %tmp.0 to i16 ; <i16> [#uses=1]
ret i16 %tmp.3
}
diff --git a/llvm/test/CodeGen/X86/extract-extract.ll b/llvm/test/CodeGen/X86/extract-extract.ll
index bf7a38e6407..9f151635620 100644
--- a/llvm/test/CodeGen/X86/extract-extract.ll
+++ b/llvm/test/CodeGen/X86/extract-extract.ll
@@ -12,10 +12,10 @@
define fastcc void @foo(%pp* nocapture byval %p_arg) {
entry:
%tmp2 = getelementptr %pp, %pp* %p_arg, i64 0, i32 0 ; <%cc*> [#uses=
- %tmp3 = load %cc* %tmp2 ; <%cc> [#uses=1]
+ %tmp3 = load %cc, %cc* %tmp2 ; <%cc> [#uses=1]
%tmp34 = extractvalue %cc %tmp3, 0 ; <%crd> [#uses=1]
%tmp345 = extractvalue %crd %tmp34, 0 ; <i64> [#uses=1]
- %.ptr.i = load %cr** undef ; <%cr*> [#uses=0]
+ %.ptr.i = load %cr*, %cr** undef ; <%cr*> [#uses=0]
%tmp15.i = shl i64 %tmp345, 3 ; <i64> [#uses=0]
store %cr* undef, %cr** undef
ret void
diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll
index 732f698f59f..e50d353797a 100644
--- a/llvm/test/CodeGen/X86/extractelement-load.ll
+++ b/llvm/test/CodeGen/X86/extractelement-load.ll
@@ -9,7 +9,7 @@ define i32 @t(<2 x i64>* %val) nounwind {
; CHECK-NOT: movd
; CHECK: movl 8(
; CHECK-NEXT: ret
- %tmp2 = load <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp2 = load <2 x i64>, <2 x i64>* %val, align 16 ; <<2 x i64>> [#uses=1]
%tmp3 = bitcast <2 x i64> %tmp2 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp3, i32 2 ; <i32> [#uses=1]
ret i32 %tmp4
@@ -20,7 +20,7 @@ define i32 @t(<2 x i64>* %val) nounwind {
define i32 @t2(<8 x i32>* %xp) {
; CHECK-LABEL: t2:
; CHECK: ret
- %x = load <8 x i32>* %xp
+ %x = load <8 x i32>, <8 x i32>* %xp
%Shuff68 = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32
undef, i32 7, i32 9, i32 undef, i32 13, i32 15, i32 1, i32 3>
%y = extractelement <8 x i32> %Shuff68, i32 0
@@ -41,7 +41,7 @@ define void @t3() {
; CHECK: movhpd
bb:
- %tmp13 = load <2 x double>* undef, align 1
+ %tmp13 = load <2 x double>, <2 x double>* undef, align 1
%.sroa.3.24.vec.extract = extractelement <2 x double> %tmp13, i32 1
store double %.sroa.3.24.vec.extract, double* undef, align 8
unreachable
@@ -55,7 +55,7 @@ define i64 @t4(<2 x double>* %a) {
; CHECK-LABEL: t4:
; CHECK: mov
; CHECK: ret
- %b = load <2 x double>* %a, align 16
+ %b = load <2 x double>, <2 x double>* %a, align 16
%c = shufflevector <2 x double> %b, <2 x double> %b, <2 x i32> <i32 1, i32 0>
%d = bitcast <2 x double> %c to <2 x i64>
%e = extractelement <2 x i64> %d, i32 1
diff --git a/llvm/test/CodeGen/X86/extractps.ll b/llvm/test/CodeGen/X86/extractps.ll
index 9e1a3754d0f..fecd2faed32 100644
--- a/llvm/test/CodeGen/X86/extractps.ll
+++ b/llvm/test/CodeGen/X86/extractps.ll
@@ -7,7 +7,7 @@
external global float, align 16 ; <float*>:0 [#uses=2]
define internal void @""() nounwind {
- load float* @0, align 16 ; <float>:1 [#uses=1]
+ load float, float* @0, align 16 ; <float>:1 [#uses=1]
insertelement <4 x float> undef, float %1, i32 0 ; <<4 x float>>:2 [#uses=1]
call <4 x float> @llvm.x86.sse.rsqrt.ss( <4 x float> %2 ) ; <<4 x float>>:3 [#uses=1]
extractelement <4 x float> %3, i32 0 ; <float>:4 [#uses=1]
@@ -15,7 +15,7 @@ define internal void @""() nounwind {
ret void
}
define internal void @""() nounwind {
- load float* @0, align 16 ; <float>:1 [#uses=1]
+ load float, float* @0, align 16 ; <float>:1 [#uses=1]
insertelement <4 x float> undef, float %1, i32 1 ; <<4 x float>>:2 [#uses=1]
call <4 x float> @llvm.x86.sse.rsqrt.ss( <4 x float> %2 ) ; <<4 x float>>:3 [#uses=1]
extractelement <4 x float> %3, i32 1 ; <float>:4 [#uses=1]
diff --git a/llvm/test/CodeGen/X86/f16c-intrinsics.ll b/llvm/test/CodeGen/X86/f16c-intrinsics.ll
index 802f91708ac..02967d52048 100644
--- a/llvm/test/CodeGen/X86/f16c-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/f16c-intrinsics.ll
@@ -25,7 +25,7 @@ entry:
; CHECK-LABEL: test_x86_vcvtph2ps_256_m:
; CHECK-NOT: vmov
; CHECK: vcvtph2ps (%
- %tmp1 = load <8 x i16>* %a, align 16
+ %tmp1 = load <8 x i16>, <8 x i16>* %a, align 16
%0 = tail call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %tmp1)
ret <8 x float> %0
}
@@ -54,7 +54,7 @@ define <4 x float> @test_x86_vcvtps2ph_128_scalar(i64* %ptr) {
; CHECK-NOT: vmov
; CHECK: vcvtph2ps (%
- %load = load i64* %ptr
+ %load = load i64, i64* %ptr
%ins1 = insertelement <2 x i64> undef, i64 %load, i32 0
%ins2 = insertelement <2 x i64> %ins1, i64 0, i32 1
%bc = bitcast <2 x i64> %ins2 to <8 x i16>
diff --git a/llvm/test/CodeGen/X86/fast-isel-args-fail.ll b/llvm/test/CodeGen/X86/fast-isel-args-fail.ll
index 7e783d2891d..0026832ed28 100644
--- a/llvm/test/CodeGen/X86/fast-isel-args-fail.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-args-fail.ll
@@ -17,6 +17,6 @@ entry:
; WIN32: movl (%rcx), %eax
; WIN64: foo
; WIN64: movl (%rdi), %eax
- %0 = load i32* %p, align 4
+ %0 = load i32, i32* %p, align 4
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll b/llvm/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll
index 21fae4a8205..3310e6113f1 100644
--- a/llvm/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll
@@ -15,10 +15,10 @@ entry:
store i32 %x, i32* %x.addr, align 4
store i32 %y, i32* %y.addr, align 4
store i32 %z, i32* %z.addr, align 4
- %tmp = load i32* %x.addr, align 4
- %tmp1 = load i32* %y.addr, align 4
+ %tmp = load i32, i32* %x.addr, align 4
+ %tmp1 = load i32, i32* %y.addr, align 4
%add = add nsw i32 %tmp, %tmp1
- %tmp2 = load i32* %z.addr, align 4
+ %tmp2 = load i32, i32* %z.addr, align 4
%add3 = add nsw i32 %add, %tmp2
ret i32 %add3
}
diff --git a/llvm/test/CodeGen/X86/fast-isel-call-bool.ll b/llvm/test/CodeGen/X86/fast-isel-call-bool.ll
index 5cdb2c94116..aaa8ef4f644 100644
--- a/llvm/test/CodeGen/X86/fast-isel-call-bool.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-call-bool.ll
@@ -8,7 +8,7 @@ declare i64 @bar(i1)
define i64 @foo(i8* %arg) {
; CHECK-LABEL: foo:
top:
- %0 = load i8* %arg
+ %0 = load i8, i8* %arg
; CHECK: movb
%1 = trunc i8 %0 to i1
; CHECK: andb $1,
diff --git a/llvm/test/CodeGen/X86/fast-isel-fold-mem.ll b/llvm/test/CodeGen/X86/fast-isel-fold-mem.ll
index 86bf2f2315a..5686484ef93 100644
--- a/llvm/test/CodeGen/X86/fast-isel-fold-mem.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-fold-mem.ll
@@ -5,7 +5,7 @@ define i64 @fold_load(i64* %a, i64 %b) {
; CHECK-LABEL: fold_load
; CHECK: addq (%rdi), %rsi
; CHECK-NEXT: movq %rsi, %rax
- %1 = load i64* %a, align 8
+ %1 = load i64, i64* %a, align 8
%2 = add i64 %1, %b
ret i64 %2
}
diff --git a/llvm/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll b/llvm/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
index 90a0537155b..e4e9aeaa262 100644
--- a/llvm/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
@@ -47,7 +47,7 @@ define double @single_to_double_rm(float* %x) {
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: ret
entry:
- %0 = load float* %x, align 4
+ %0 = load float, float* %x, align 4
%conv = fpext float %0 to double
ret double %conv
}
@@ -59,7 +59,7 @@ define float @double_to_single_rm(double* %x) {
; AVX-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; ALL-NEXT: ret
entry:
- %0 = load double* %x, align 8
+ %0 = load double, double* %x, align 8
%conv = fptrunc double %0 to float
ret float %conv
}
diff --git a/llvm/test/CodeGen/X86/fast-isel-gep.ll b/llvm/test/CodeGen/X86/fast-isel-gep.ll
index 55de06d3156..67b30292be3 100644
--- a/llvm/test/CodeGen/X86/fast-isel-gep.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-gep.ll
@@ -7,7 +7,7 @@
; PR3181
define i32 @test1(i32 %t3, i32* %t1) nounwind {
%t9 = getelementptr i32, i32* %t1, i32 %t3 ; <i32*> [#uses=1]
- %t15 = load i32* %t9 ; <i32> [#uses=1]
+ %t15 = load i32, i32* %t9 ; <i32> [#uses=1]
ret i32 %t15
; X32-LABEL: test1:
; X32: movl (%eax,%ecx,4), %eax
@@ -21,7 +21,7 @@ define i32 @test1(i32 %t3, i32* %t1) nounwind {
}
define i32 @test2(i64 %t3, i32* %t1) nounwind {
%t9 = getelementptr i32, i32* %t1, i64 %t3 ; <i32*> [#uses=1]
- %t15 = load i32* %t9 ; <i32> [#uses=1]
+ %t15 = load i32, i32* %t9 ; <i32> [#uses=1]
ret i32 %t15
; X32-LABEL: test2:
; X32: movl (%edx,%ecx,4), %e
@@ -38,7 +38,7 @@ define i32 @test2(i64 %t3, i32* %t1) nounwind {
define i8 @test3(i8* %start) nounwind {
entry:
%A = getelementptr i8, i8* %start, i64 -2 ; <i8*> [#uses=1]
- %B = load i8* %A, align 1 ; <i8> [#uses=1]
+ %B = load i8, i8* %A, align 1 ; <i8> [#uses=1]
ret i8 %B
@@ -59,11 +59,11 @@ entry:
%p.addr = alloca double*, align 8 ; <double**> [#uses=2]
store i64 %x, i64* %x.addr
store double* %p, double** %p.addr
- %tmp = load i64* %x.addr ; <i64> [#uses=1]
+ %tmp = load i64, i64* %x.addr ; <i64> [#uses=1]
%add = add nsw i64 %tmp, 16 ; <i64> [#uses=1]
- %tmp1 = load double** %p.addr ; <double*> [#uses=1]
+ %tmp1 = load double*, double** %p.addr ; <double*> [#uses=1]
%arrayidx = getelementptr inbounds double, double* %tmp1, i64 %add ; <double*> [#uses=1]
- %tmp2 = load double* %arrayidx ; <double> [#uses=1]
+ %tmp2 = load double, double* %arrayidx ; <double> [#uses=1]
ret double %tmp2
; X32-LABEL: test4:
@@ -77,7 +77,7 @@ entry:
define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind {
%v8 = getelementptr i8, i8* %A, i32 %I
%v9 = bitcast i8* %v8 to i64*
- %v10 = load i64* %v9
+ %v10 = load i64, i64* %v9
%v11 = add i64 %B, %v10
ret i64 %v11
; X64-LABEL: test5:
@@ -91,7 +91,7 @@ define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind {
; of their blocks.
define void @test6() {
if.end: ; preds = %if.then, %invoke.cont
- %tmp15 = load i64* undef
+ %tmp15 = load i64, i64* undef
%dec = add i64 %tmp15, 13
store i64 %dec, i64* undef
%call17 = invoke i8* @_ZNK18G__FastAllocString4dataEv()
@@ -119,7 +119,7 @@ define i32 @test7({i32,i32,i32}* %tmp1, i32 %tmp71, i32 %tmp63) nounwind {
%tmp29 = getelementptr inbounds {i32,i32,i32}, {i32,i32,i32}* %tmp1, i32 0, i32 2
- %tmp30 = load i32* %tmp29, align 4
+ %tmp30 = load i32, i32* %tmp29, align 4
%p2 = getelementptr inbounds {i32,i32,i32}, {i32,i32,i32}* %tmp1, i32 0, i32 2
store i32 4, i32* %p2
diff --git a/llvm/test/CodeGen/X86/fast-isel-gv.ll b/llvm/test/CodeGen/X86/fast-isel-gv.ll
index de750956890..b3955d6e4f8 100644
--- a/llvm/test/CodeGen/X86/fast-isel-gv.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-gv.ll
@@ -12,15 +12,15 @@ entry:
%retval = alloca i32 ; <i32*> [#uses=2]
%0 = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %1 = load i8 (...)** @f, align 8 ; <i8 (...)*> [#uses=1]
+ %1 = load i8 (...)*, i8 (...)** @f, align 8 ; <i8 (...)*> [#uses=1]
%2 = icmp ne i8 (...)* %1, @kill ; <i1> [#uses=1]
%3 = zext i1 %2 to i32 ; <i32> [#uses=1]
store i32 %3, i32* %0, align 4
- %4 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %4, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
diff --git a/llvm/test/CodeGen/X86/fast-isel-i1.ll b/llvm/test/CodeGen/X86/fast-isel-i1.ll
index d7cb2d49ff4..d72a31ce354 100644
--- a/llvm/test/CodeGen/X86/fast-isel-i1.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-i1.ll
@@ -27,7 +27,7 @@ entry:
; CHECK: movb {{.*}} %al
; CHECK-NEXT: xorb $1, %al
; CHECK-NEXT: testb $1
- %tmp = load i8* %a, align 1
+ %tmp = load i8, i8* %a, align 1
%tobool = trunc i8 %tmp to i1
%tobool2 = xor i1 %tobool, true
br i1 %tobool2, label %if.then, label %if.end
diff --git a/llvm/test/CodeGen/X86/fast-isel-int-float-conversion.ll b/llvm/test/CodeGen/X86/fast-isel-int-float-conversion.ll
index e231e4d1efb..d5fe833a40a 100644
--- a/llvm/test/CodeGen/X86/fast-isel-int-float-conversion.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-int-float-conversion.ll
@@ -18,7 +18,7 @@ define double @int_to_double_rm(i32* %a) {
; AVX: vcvtsi2sdl (%rdi), %xmm0, %xmm0
; ALL-NEXT: ret
entry:
- %0 = load i32* %a
+ %0 = load i32, i32* %a
%1 = sitofp i32 %0 to double
ret double %1
}
@@ -39,7 +39,7 @@ define float @int_to_float_rm(i32* %a) {
; AVX: vcvtsi2ssl (%rdi), %xmm0, %xmm0
; ALL-NEXT: ret
entry:
- %0 = load i32* %a
+ %0 = load i32, i32* %a
%1 = sitofp i32 %0 to float
ret float %1
}
diff --git a/llvm/test/CodeGen/X86/fast-isel-mem.ll b/llvm/test/CodeGen/X86/fast-isel-mem.ll
index eca1ae9f02a..717b5ec083a 100644
--- a/llvm/test/CodeGen/X86/fast-isel-mem.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-mem.ll
@@ -6,8 +6,8 @@
; rdar://6653118
define i32 @loadgv() nounwind {
entry:
- %0 = load i32* @src, align 4
- %1 = load i32* @src, align 4
+ %0 = load i32, i32* @src, align 4
+ %1 = load i32, i32* @src, align 4
%2 = add i32 %0, %1
store i32 %2, i32* @src
ret i32 %2
diff --git a/llvm/test/CodeGen/X86/fast-isel-tailcall.ll b/llvm/test/CodeGen/X86/fast-isel-tailcall.ll
index 79ff79d4bca..88ad05e8e1a 100644
--- a/llvm/test/CodeGen/X86/fast-isel-tailcall.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-tailcall.ll
@@ -7,7 +7,7 @@
define i32 @stub(i8* %t0) nounwind {
entry:
- %t1 = load i32* inttoptr (i32 139708680 to i32*) ; <i32> [#uses=1]
+ %t1 = load i32, i32* inttoptr (i32 139708680 to i32*) ; <i32> [#uses=1]
%t2 = bitcast i8* %t0 to i32 (i32)* ; <i32 (i32)*> [#uses=1]
%t3 = call fastcc i32 %t2(i32 %t1) ; <i32> [#uses=1]
ret i32 %t3
diff --git a/llvm/test/CodeGen/X86/fast-isel-tls.ll b/llvm/test/CodeGen/X86/fast-isel-tls.ll
index 686df43ac50..18bb9c13ff0 100644
--- a/llvm/test/CodeGen/X86/fast-isel-tls.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-tls.ll
@@ -4,7 +4,7 @@
@v = thread_local global i32 0
define i32 @f() nounwind {
entry:
- %t = load i32* @v
+ %t = load i32, i32* @v
%s = add i32 %t, 1
ret i32 %s
}
@@ -16,7 +16,7 @@ entry:
@alias = internal alias i32* @v
define i32 @f_alias() nounwind {
entry:
- %t = load i32* @v
+ %t = load i32, i32* @v
%s = add i32 %t, 1
ret i32 %s
}
diff --git a/llvm/test/CodeGen/X86/fast-isel-x86-64.ll b/llvm/test/CodeGen/X86/fast-isel-x86-64.ll
index 695914bff4a..d4bbb63c49a 100644
--- a/llvm/test/CodeGen/X86/fast-isel-x86-64.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-x86-64.ll
@@ -20,7 +20,7 @@ define void @test2(i64 %x) nounwind ssp {
entry:
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
- %tmp = load i64* %x.addr, align 8
+ %tmp = load i64, i64* %x.addr, align 8
%cmp = icmp sgt i64 %tmp, 42
br i1 %cmp, label %if.then, label %if.end
@@ -53,7 +53,7 @@ define i64 @test3() nounwind {
define i32 @test4(i64 %idxprom9) nounwind {
%arrayidx10 = getelementptr inbounds [153 x i8], [153 x i8]* @rtx_length, i32 0, i64 %idxprom9
- %tmp11 = load i8* %arrayidx10, align 1
+ %tmp11 = load i8, i8* %arrayidx10, align 1
%conv = zext i8 %tmp11 to i32
ret i32 %conv
@@ -212,7 +212,7 @@ declare void @foo() unnamed_addr ssp align 2
; w.r.t. the call.
define i32 @test17(i32 *%P) ssp nounwind {
entry:
- %tmp = load i32* %P
+ %tmp = load i32, i32* %P
%cmp = icmp ne i32 %tmp, 5
call void @foo()
br i1 %cmp, label %if.then, label %if.else
diff --git a/llvm/test/CodeGen/X86/fast-isel-x86.ll b/llvm/test/CodeGen/X86/fast-isel-x86.ll
index 61e9b98f6e7..8049c72ec01 100644
--- a/llvm/test/CodeGen/X86/fast-isel-x86.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-x86.ll
@@ -6,7 +6,7 @@
; CHECK: retl
@G = external global float
define float @test0() nounwind {
- %t = load float* @G
+ %t = load float, float* @G
ret float %t
}
@@ -28,7 +28,7 @@ define void @test1({i32, i32, i32, i32}* sret %p) nounwind {
; CHECK: retl
@HHH = external global i32
define i32 @test2() nounwind {
- %t = load i32* @HHH
+ %t = load i32, i32* @HHH
ret i32 %t
}
diff --git a/llvm/test/CodeGen/X86/fast-isel.ll b/llvm/test/CodeGen/X86/fast-isel.ll
index 154ef503105..36183e48c29 100644
--- a/llvm/test/CodeGen/X86/fast-isel.ll
+++ b/llvm/test/CodeGen/X86/fast-isel.ll
@@ -5,9 +5,9 @@
define i32* @foo(i32* %p, i32* %q, i32** %z) nounwind {
entry:
- %r = load i32* %p
- %s = load i32* %q
- %y = load i32** %z
+ %r = load i32, i32* %p
+ %s = load i32, i32* %q
+ %y = load i32*, i32** %z
br label %fast
fast:
@@ -29,8 +29,8 @@ exit:
define void @bar(double* %p, double* %q) nounwind {
entry:
- %r = load double* %p
- %s = load double* %q
+ %r = load double, double* %p
+ %s = load double, double* %q
br label %fast
fast:
@@ -94,7 +94,7 @@ define void @mul_i8(i8 %a, i8* %p) nounwind {
}
define void @load_store_i1(i1* %p, i1* %q) nounwind {
- %t = load i1* %p
+ %t = load i1, i1* %p
store i1 %t, i1* %q
ret void
}
@@ -102,7 +102,7 @@ define void @load_store_i1(i1* %p, i1* %q) nounwind {
@crash_test1x = external global <2 x i32>, align 8
define void @crash_test1() nounwind ssp {
- %tmp = load <2 x i32>* @crash_test1x, align 8
+ %tmp = load <2 x i32>, <2 x i32>* @crash_test1x, align 8
%neg = xor <2 x i32> %tmp, <i32 -1, i32 -1>
ret void
}
@@ -113,7 +113,7 @@ define i64* @life() nounwind {
%a1 = alloca i64*, align 8
%a2 = bitcast i64** %a1 to i8*
call void @llvm.lifetime.start(i64 -1, i8* %a2) nounwind
- %a3 = load i64** %a1, align 8
+ %a3 = load i64*, i64** %a1, align 8
ret i64* %a3
}
diff --git a/llvm/test/CodeGen/X86/fastcc-byval.ll b/llvm/test/CodeGen/X86/fastcc-byval.ll
index 32ba6ed8799..1706de46111 100644
--- a/llvm/test/CodeGen/X86/fastcc-byval.ll
+++ b/llvm/test/CodeGen/X86/fastcc-byval.ll
@@ -17,7 +17,7 @@ define fastcc i32 @bar() nounwind {
%a = getelementptr %struct.MVT, %struct.MVT* %V, i32 0, i32 0
store i32 1, i32* %a
call fastcc void @foo(%struct.MVT* byval %V) nounwind
- %t = load i32* %a
+ %t = load i32, i32* %a
ret i32 %t
}
diff --git a/llvm/test/CodeGen/X86/fastcc-sret.ll b/llvm/test/CodeGen/X86/fastcc-sret.ll
index 9f00970fb61..499aadda44f 100644
--- a/llvm/test/CodeGen/X86/fastcc-sret.ll
+++ b/llvm/test/CodeGen/X86/fastcc-sret.ll
@@ -19,7 +19,7 @@ define void @foo() nounwind {
call fastcc void @bar( %struct.foo* sret %memtmp ) nounwind
%tmp4 = getelementptr %struct.foo, %struct.foo* %memtmp, i32 0, i32 0
%tmp5 = getelementptr [4 x i32], [4 x i32]* %tmp4, i32 0, i32 0
- %tmp6 = load i32* %tmp5
+ %tmp6 = load i32, i32* %tmp5
store i32 %tmp6, i32* @dst
ret void
}
diff --git a/llvm/test/CodeGen/X86/fastcc.ll b/llvm/test/CodeGen/X86/fastcc.ll
index a362f8d1ca7..020e7f9d353 100644
--- a/llvm/test/CodeGen/X86/fastcc.ll
+++ b/llvm/test/CodeGen/X86/fastcc.ll
@@ -9,10 +9,10 @@
define i32 @foo() nounwind {
entry:
- %0 = load double* @d, align 8 ; <double> [#uses=1]
- %1 = load double* @c, align 8 ; <double> [#uses=1]
- %2 = load double* @b, align 8 ; <double> [#uses=1]
- %3 = load double* @a, align 8 ; <double> [#uses=1]
+ %0 = load double, double* @d, align 8 ; <double> [#uses=1]
+ %1 = load double, double* @c, align 8 ; <double> [#uses=1]
+ %2 = load double, double* @b, align 8 ; <double> [#uses=1]
+ %3 = load double, double* @a, align 8 ; <double> [#uses=1]
tail call fastcc void @bar( i32 0, i32 1, i32 2, double 1.000000e+00, double %3, double %2, double %1, double %0 ) nounwind
ret i32 0
}
diff --git a/llvm/test/CodeGen/X86/fastisel-gep-promote-before-add.ll b/llvm/test/CodeGen/X86/fastisel-gep-promote-before-add.ll
index 8e78d380f40..1f67299a804 100644
--- a/llvm/test/CodeGen/X86/fastisel-gep-promote-before-add.ll
+++ b/llvm/test/CodeGen/X86/fastisel-gep-promote-before-add.ll
@@ -6,13 +6,13 @@ define zeroext i8 @gep_promotion(i8* %ptr) nounwind uwtable ssp {
entry:
%ptr.addr = alloca i8*, align 8
%add = add i8 64, 64 ; 0x40 + 0x40
- %0 = load i8** %ptr.addr, align 8
+ %0 = load i8*, i8** %ptr.addr, align 8
; CHECK-LABEL: _gep_promotion:
; CHECK: movzbl ({{.*}})
%arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
ret i8 %1
}
@@ -22,16 +22,16 @@ entry:
%ptr.addr = alloca i8*, align 8
store i8 %i, i8* %i.addr, align 4
store i8* %ptr, i8** %ptr.addr, align 8
- %0 = load i8* %i.addr, align 4
+ %0 = load i8, i8* %i.addr, align 4
; CHECK-LABEL: _gep_promotion_nonconst:
; CHECK: movzbl ({{.*}})
%xor = xor i8 %0, -128 ; %0 ^ 0x80
%add = add i8 %xor, -127 ; %xor + 0x81
- %1 = load i8** %ptr.addr, align 8
+ %1 = load i8*, i8** %ptr.addr, align 8
%arrayidx = getelementptr inbounds i8, i8* %1, i8 %add
- %2 = load i8* %arrayidx, align 1
+ %2 = load i8, i8* %arrayidx, align 1
ret i8 %2
}
diff --git a/llvm/test/CodeGen/X86/fma-do-not-commute.ll b/llvm/test/CodeGen/X86/fma-do-not-commute.ll
index 4e211721a38..1f6a19cfff8 100644
--- a/llvm/test/CodeGen/X86/fma-do-not-commute.ll
+++ b/llvm/test/CodeGen/X86/fma-do-not-commute.ll
@@ -18,8 +18,8 @@ entry:
loop:
%sum0 = phi float [ %fma, %loop ], [ %arg, %entry ]
- %addrVal = load float* %addr, align 4
- %addr2Val = load float* %addr2, align 4
+ %addrVal = load float, float* %addr, align 4
+ %addr2Val = load float, float* %addr2, align 4
%fmul = fmul float %addrVal, %addr2Val
%fma = fadd float %sum0, %fmul
br i1 true, label %exit, label %loop
diff --git a/llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll b/llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
index 64a2068aea4..85de1ef5c9d 100644
--- a/llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
+++ b/llvm/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
@@ -4,14 +4,14 @@
; VFMADD
define < 4 x float > @test_x86_fma_vfmadd_ss_load(< 4 x float > %a0, < 4 x float > %a1, float* %a2) {
; CHECK: vfmaddss (%{{.*}})
- %x = load float *%a2
+ %x = load float , float *%a2
%y = insertelement <4 x float> undef, float %x, i32 0
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ss(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %y)
ret < 4 x float > %res
}
define < 4 x float > @test_x86_fma_vfmadd_ss_load2(< 4 x float > %a0, float* %a1, < 4 x float > %a2) {
; CHECK: vfmaddss %{{.*}}, (%{{.*}})
- %x = load float *%a1
+ %x = load float , float *%a1
%y = insertelement <4 x float> undef, float %x, i32 0
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ss(< 4 x float > %a0, < 4 x float > %y, < 4 x float > %a2)
ret < 4 x float > %res
@@ -21,14 +21,14 @@ declare < 4 x float > @llvm.x86.fma.vfmadd.ss(< 4 x float >, < 4 x float >, < 4
define < 2 x double > @test_x86_fma_vfmadd_sd_load(< 2 x double > %a0, < 2 x double > %a1, double* %a2) {
; CHECK: vfmaddsd (%{{.*}})
- %x = load double *%a2
+ %x = load double , double *%a2
%y = insertelement <2 x double> undef, double %x, i32 0
%res = call < 2 x double > @llvm.x86.fma.vfmadd.sd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %y)
ret < 2 x double > %res
}
define < 2 x double > @test_x86_fma_vfmadd_sd_load2(< 2 x double > %a0, double* %a1, < 2 x double > %a2) {
; CHECK: vfmaddsd %{{.*}}, (%{{.*}})
- %x = load double *%a1
+ %x = load double , double *%a1
%y = insertelement <2 x double> undef, double %x, i32 0
%res = call < 2 x double > @llvm.x86.fma.vfmadd.sd(< 2 x double > %a0, < 2 x double > %y, < 2 x double > %a2)
ret < 2 x double > %res
@@ -36,13 +36,13 @@ define < 2 x double > @test_x86_fma_vfmadd_sd_load2(< 2 x double > %a0, double*
declare < 2 x double > @llvm.x86.fma.vfmadd.sd(< 2 x double >, < 2 x double >, < 2 x double >) nounwind readnone
define < 4 x float > @test_x86_fma_vfmadd_ps_load(< 4 x float > %a0, < 4 x float > %a1, < 4 x float >* %a2) {
; CHECK: vfmaddps (%{{.*}})
- %x = load <4 x float>* %a2
+ %x = load <4 x float>, <4 x float>* %a2
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %x)
ret < 4 x float > %res
}
define < 4 x float > @test_x86_fma_vfmadd_ps_load2(< 4 x float > %a0, < 4 x float >* %a1, < 4 x float > %a2) {
; CHECK: vfmaddps %{{.*}}, (%{{.*}})
- %x = load <4 x float>* %a1
+ %x = load <4 x float>, <4 x float>* %a1
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %a0, < 4 x float > %x, < 4 x float > %a2)
ret < 4 x float > %res
}
@@ -52,21 +52,21 @@ declare < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float >, < 4 x float >, < 4
define < 4 x float > @test_x86_fma_vfmadd_ps_load3(< 4 x float >* %a0, < 4 x float >* %a1, < 4 x float > %a2) {
; CHECK: vmovaps
; CHECK: vfmaddps %{{.*}}, (%{{.*}})
- %x = load <4 x float>* %a0
- %y = load <4 x float>* %a1
+ %x = load <4 x float>, <4 x float>* %a0
+ %y = load <4 x float>, <4 x float>* %a1
%res = call < 4 x float > @llvm.x86.fma.vfmadd.ps(< 4 x float > %x, < 4 x float > %y, < 4 x float > %a2)
ret < 4 x float > %res
}
define < 2 x double > @test_x86_fma_vfmadd_pd_load(< 2 x double > %a0, < 2 x double > %a1, < 2 x double >* %a2) {
; CHECK: vfmaddpd (%{{.*}})
- %x = load <2 x double>* %a2
+ %x = load <2 x double>, <2 x double>* %a2
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %x)
ret < 2 x double > %res
}
define < 2 x double > @test_x86_fma_vfmadd_pd_load2(< 2 x double > %a0, < 2 x double >* %a1, < 2 x double > %a2) {
; CHECK: vfmaddpd %{{.*}}, (%{{.*}})
- %x = load <2 x double>* %a1
+ %x = load <2 x double>, <2 x double>* %a1
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %a0, < 2 x double > %x, < 2 x double > %a2)
ret < 2 x double > %res
}
@@ -76,8 +76,8 @@ declare < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double >, < 2 x double >, <
define < 2 x double > @test_x86_fma_vfmadd_pd_load3(< 2 x double >* %a0, < 2 x double >* %a1, < 2 x double > %a2) {
; CHECK: vmovapd
; CHECK: vfmaddpd %{{.*}}, (%{{.*}})
- %x = load <2 x double>* %a0
- %y = load <2 x double>* %a1
+ %x = load <2 x double>, <2 x double>* %a0
+ %y = load <2 x double>, <2 x double>* %a1
%res = call < 2 x double > @llvm.x86.fma.vfmadd.pd(< 2 x double > %x, < 2 x double > %y, < 2 x double > %a2)
ret < 2 x double > %res
}
diff --git a/llvm/test/CodeGen/X86/fma_patterns.ll b/llvm/test/CodeGen/X86/fma_patterns.ll
index 9b52db9f14e..a27b760face 100644
--- a/llvm/test/CodeGen/X86/fma_patterns.ll
+++ b/llvm/test/CodeGen/X86/fma_patterns.ll
@@ -190,7 +190,7 @@ define float @test_x86_fnmsub_ss(float %a0, float %a1, float %a2) {
; CHECK_FMA4: vfmaddps %xmm1, (%rdi), %xmm0, %xmm0
; CHECK_FMA4: ret
define <4 x float> @test_x86_fmadd_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) {
- %x = load <4 x float>* %a0
+ %x = load <4 x float>, <4 x float>* %a0
%y = fmul <4 x float> %x, %a1
%res = fadd <4 x float> %y, %a2
ret <4 x float> %res
@@ -204,7 +204,7 @@ define <4 x float> @test_x86_fmadd_ps_load(<4 x float>* %a0, <4 x float> %a1, <4
; CHECK_FMA4: vfmsubps %xmm1, (%rdi), %xmm0, %xmm0
; CHECK_FMA4: ret
define <4 x float> @test_x86_fmsub_ps_load(<4 x float>* %a0, <4 x float> %a1, <4 x float> %a2) {
- %x = load <4 x float>* %a0
+ %x = load <4 x float>, <4 x float>* %a0
%y = fmul <4 x float> %x, %a1
%res = fsub <4 x float> %y, %a2
ret <4 x float> %res
diff --git a/llvm/test/CodeGen/X86/fmul-zero.ll b/llvm/test/CodeGen/X86/fmul-zero.ll
index 03bad659412..bc139f88534 100644
--- a/llvm/test/CodeGen/X86/fmul-zero.ll
+++ b/llvm/test/CodeGen/X86/fmul-zero.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=x86-64 | grep mulps
define void @test14(<4 x float>*) nounwind {
- load <4 x float>* %0, align 1
+ load <4 x float>, <4 x float>* %0, align 1
fmul <4 x float> %2, zeroinitializer
store <4 x float> %3, <4 x float>* %0, align 1
ret void
diff --git a/llvm/test/CodeGen/X86/fold-add.ll b/llvm/test/CodeGen/X86/fold-add.ll
index 18c0ec1b5dd..7d274007408 100644
--- a/llvm/test/CodeGen/X86/fold-add.ll
+++ b/llvm/test/CodeGen/X86/fold-add.ll
@@ -13,12 +13,12 @@ define fastcc i32 @longest_match(i32 %cur_match) nounwind {
; CHECK: ret
entry:
- %0 = load i32* @prev_length, align 4 ; <i32> [#uses=3]
+ %0 = load i32, i32* @prev_length, align 4 ; <i32> [#uses=3]
%1 = zext i32 %cur_match to i64 ; <i64> [#uses=1]
%2 = sext i32 %0 to i64 ; <i64> [#uses=1]
%.sum3 = add i64 %1, %2 ; <i64> [#uses=1]
%3 = getelementptr [65536 x i8], [65536 x i8]* @window, i64 0, i64 %.sum3 ; <i8*> [#uses=1]
- %4 = load i8* %3, align 1 ; <i8> [#uses=1]
+ %4 = load i8, i8* %3, align 1 ; <i8> [#uses=1]
%5 = icmp eq i8 %4, 0 ; <i1> [#uses=1]
br i1 %5, label %bb5, label %bb23
diff --git a/llvm/test/CodeGen/X86/fold-and-shift.ll b/llvm/test/CodeGen/X86/fold-and-shift.ll
index 8432a706f82..00173efff69 100644
--- a/llvm/test/CodeGen/X86/fold-and-shift.ll
+++ b/llvm/test/CodeGen/X86/fold-and-shift.ll
@@ -12,7 +12,7 @@ entry:
%tmp4 = and i32 %tmp2, 1020
%tmp7 = getelementptr i8, i8* %X, i32 %tmp4
%tmp78 = bitcast i8* %tmp7 to i32*
- %tmp9 = load i32* %tmp78
+ %tmp9 = load i32, i32* %tmp78
ret i32 %tmp9
}
@@ -28,7 +28,7 @@ entry:
%tmp4 = and i32 %tmp2, 131070
%tmp7 = getelementptr i16, i16* %X, i32 %tmp4
%tmp78 = bitcast i16* %tmp7 to i32*
- %tmp9 = load i32* %tmp78
+ %tmp9 = load i32, i32* %tmp78
ret i32 %tmp9
}
@@ -46,11 +46,11 @@ define i32 @t3(i16* %i.ptr, i32* %arr) {
; CHECK: ret
entry:
- %i = load i16* %i.ptr
+ %i = load i16, i16* %i.ptr
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%val.ptr = getelementptr inbounds i32, i32* %arr, i32 %index
- %val = load i32* %val.ptr
+ %val = load i32, i32* %val.ptr
%sum = add i32 %val, %i.zext
ret i32 %sum
}
@@ -65,12 +65,12 @@ define i32 @t4(i16* %i.ptr, i32* %arr) {
; CHECK: ret
entry:
- %i = load i16* %i.ptr
+ %i = load i16, i16* %i.ptr
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%index.zext = zext i32 %index to i64
%val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext
- %val = load i32* %val.ptr
+ %val = load i32, i32* %val.ptr
%sum.1 = add i32 %val, %i.zext
%sum.2 = add i32 %sum.1, %index
ret i32 %sum.2
diff --git a/llvm/test/CodeGen/X86/fold-call-2.ll b/llvm/test/CodeGen/X86/fold-call-2.ll
index 7a2b03833ae..b5e2606410f 100644
--- a/llvm/test/CodeGen/X86/fold-call-2.ll
+++ b/llvm/test/CodeGen/X86/fold-call-2.ll
@@ -4,7 +4,7 @@
define i32 @main() nounwind {
entry:
- load void ()** @f, align 8 ; <void ()*>:0 [#uses=1]
+ load void ()*, void ()** @f, align 8 ; <void ()*>:0 [#uses=1]
tail call void %0( ) nounwind
ret i32 0
}
diff --git a/llvm/test/CodeGen/X86/fold-call-3.ll b/llvm/test/CodeGen/X86/fold-call-3.ll
index 69cd198dedf..e7e81b9422a 100644
--- a/llvm/test/CodeGen/X86/fold-call-3.ll
+++ b/llvm/test/CodeGen/X86/fold-call-3.ll
@@ -10,7 +10,7 @@
define void @_Z25RawPointerPerformanceTestPvRN5clang6ActionE(i8* %Val, %"struct.clang::Action"* %Actions) nounwind {
entry:
%0 = alloca %"struct.clang::ActionBase::ActionResult<0u>", align 8 ; <%"struct.clang::ActionBase::ActionResult<0u>"*> [#uses=3]
- %1 = load i32* @NumTrials, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* @NumTrials, align 4 ; <i32> [#uses=1]
%2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %return, label %bb.nph
@@ -25,18 +25,18 @@ bb.nph: ; preds = %entry
bb: ; preds = %bb, %bb.nph
%Trial.01 = phi i32 [ 0, %bb.nph ], [ %12, %bb ] ; <i32> [#uses=1]
%Val_addr.02 = phi i8* [ %Val, %bb.nph ], [ %11, %bb ] ; <i8*> [#uses=1]
- %6 = load i32 (...)*** %3, align 8 ; <i32 (...)**> [#uses=1]
+ %6 = load i32 (...)**, i32 (...)*** %3, align 8 ; <i32 (...)**> [#uses=1]
%7 = getelementptr i32 (...)*, i32 (...)** %6, i64 70 ; <i32 (...)**> [#uses=1]
- %8 = load i32 (...)** %7, align 8 ; <i32 (...)*> [#uses=1]
+ %8 = load i32 (...)*, i32 (...)** %7, align 8 ; <i32 (...)*> [#uses=1]
%9 = bitcast i32 (...)* %8 to { i64, i64 } (%"struct.clang::Action"*, i8*)* ; <{ i64, i64 } (%"struct.clang::Action"*, i8*)*> [#uses=1]
%10 = call { i64, i64 } %9(%"struct.clang::Action"* %Actions, i8* %Val_addr.02) nounwind ; <{ i64, i64 }> [#uses=2]
%mrv_gr = extractvalue { i64, i64 } %10, 0 ; <i64> [#uses=1]
store i64 %mrv_gr, i64* %mrv_gep
%mrv_gr2 = extractvalue { i64, i64 } %10, 1 ; <i64> [#uses=1]
store i64 %mrv_gr2, i64* %4
- %11 = load i8** %5, align 8 ; <i8*> [#uses=1]
+ %11 = load i8*, i8** %5, align 8 ; <i8*> [#uses=1]
%12 = add i32 %Trial.01, 1 ; <i32> [#uses=2]
- %13 = load i32* @NumTrials, align 4 ; <i32> [#uses=1]
+ %13 = load i32, i32* @NumTrials, align 4 ; <i32> [#uses=1]
%14 = icmp ult i32 %12, %13 ; <i1> [#uses=1]
br i1 %14, label %bb, label %return
diff --git a/llvm/test/CodeGen/X86/fold-call-oper.ll b/llvm/test/CodeGen/X86/fold-call-oper.ll
index 94383d403ad..1e3e58ddc6c 100644
--- a/llvm/test/CodeGen/X86/fold-call-oper.ll
+++ b/llvm/test/CodeGen/X86/fold-call-oper.ll
@@ -14,7 +14,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; CHECK: callq *{{.*}}(%rbp)
define void @foldCallOper(i32 (i32*, i32, i32**)* nocapture %p1) #0 {
entry:
- %0 = load i32*** @a, align 8
+ %0 = load i32**, i32*** @a, align 8
br label %for.body.i
for.body.i: ; preds = %for.body.i, %entry
@@ -33,14 +33,14 @@ for.body3.i: ; preds = %for.inc8.i, %for.bo
br i1 %tobool.i, label %for.inc8.i, label %if.then.i
if.then.i: ; preds = %for.body3.i
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
store i32 %2, i32* @b, align 4
br label %for.inc8.i
for.inc8.i: ; preds = %if.then.i, %for.body3.i
%lftr.wideiv.i = trunc i64 %indvars.iv.i to i32
%arrayidx4.phi.trans.insert.i = getelementptr inbounds [0 x i32*], [0 x i32*]* undef, i64 0, i64 %indvars.iv.i
- %.pre.i = load i32** %arrayidx4.phi.trans.insert.i, align 8
+ %.pre.i = load i32*, i32** %arrayidx4.phi.trans.insert.i, align 8
%phitmp.i = add i64 %indvars.iv.i, 1
br label %for.body3.i
}
diff --git a/llvm/test/CodeGen/X86/fold-call.ll b/llvm/test/CodeGen/X86/fold-call.ll
index 4bc52438a2a..00839943f67 100644
--- a/llvm/test/CodeGen/X86/fold-call.ll
+++ b/llvm/test/CodeGen/X86/fold-call.ll
@@ -20,7 +20,7 @@ define void @test1(i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, void()*
define void @test2(%struct.X* nocapture %x) {
entry:
%f = getelementptr inbounds %struct.X, %struct.X* %x, i64 0, i32 0
- %0 = load void ()** %f
+ %0 = load void ()*, void ()** %f
store void ()* null, void ()** %f
tail call void %0()
ret void
diff --git a/llvm/test/CodeGen/X86/fold-load-unops.ll b/llvm/test/CodeGen/X86/fold-load-unops.ll
index 0b2e6c78211..b03e80be799 100644
--- a/llvm/test/CodeGen/X86/fold-load-unops.ll
+++ b/llvm/test/CodeGen/X86/fold-load-unops.ll
@@ -9,7 +9,7 @@ define float @rcpss(float* %a) {
; CHECK-LABEL: rcpss:
; CHECK: vrcpss (%rdi), %xmm0, %xmm0
- %ld = load float* %a
+ %ld = load float, float* %a
%ins = insertelement <4 x float> undef, float %ld, i32 0
%res = tail call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %ins)
%ext = extractelement <4 x float> %res, i32 0
@@ -20,7 +20,7 @@ define float @rsqrtss(float* %a) {
; CHECK-LABEL: rsqrtss:
; CHECK: vrsqrtss (%rdi), %xmm0, %xmm0
- %ld = load float* %a
+ %ld = load float, float* %a
%ins = insertelement <4 x float> undef, float %ld, i32 0
%res = tail call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %ins)
%ext = extractelement <4 x float> %res, i32 0
@@ -31,7 +31,7 @@ define float @sqrtss(float* %a) {
; CHECK-LABEL: sqrtss:
; CHECK: vsqrtss (%rdi), %xmm0, %xmm0
- %ld = load float* %a
+ %ld = load float, float* %a
%ins = insertelement <4 x float> undef, float %ld, i32 0
%res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ins)
%ext = extractelement <4 x float> %res, i32 0
@@ -42,7 +42,7 @@ define double @sqrtsd(double* %a) {
; CHECK-LABEL: sqrtsd:
; CHECK: vsqrtsd (%rdi), %xmm0, %xmm0
- %ld = load double* %a
+ %ld = load double, double* %a
%ins = insertelement <2 x double> undef, double %ld, i32 0
%res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ins)
%ext = extractelement <2 x double> %res, i32 0
diff --git a/llvm/test/CodeGen/X86/fold-load-vec.ll b/llvm/test/CodeGen/X86/fold-load-vec.ll
index 27149234871..657b7bdd24f 100644
--- a/llvm/test/CodeGen/X86/fold-load-vec.ll
+++ b/llvm/test/CodeGen/X86/fold-load-vec.ll
@@ -14,24 +14,24 @@ entry:
store <4 x float>* %source, <4 x float>** %source.addr, align 8
store <2 x float>* %dest, <2 x float>** %dest.addr, align 8
store <2 x float> zeroinitializer, <2 x float>* %tmp, align 8
- %0 = load <4 x float>** %source.addr, align 8
+ %0 = load <4 x float>*, <4 x float>** %source.addr, align 8
%arrayidx = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0
- %1 = load <4 x float>* %arrayidx, align 16
+ %1 = load <4 x float>, <4 x float>* %arrayidx, align 16
%2 = extractelement <4 x float> %1, i32 0
- %3 = load <2 x float>* %tmp, align 8
+ %3 = load <2 x float>, <2 x float>* %tmp, align 8
%4 = insertelement <2 x float> %3, float %2, i32 1
store <2 x float> %4, <2 x float>* %tmp, align 8
- %5 = load <2 x float>* %tmp, align 8
- %6 = load <2 x float>** %dest.addr, align 8
+ %5 = load <2 x float>, <2 x float>* %tmp, align 8
+ %6 = load <2 x float>*, <2 x float>** %dest.addr, align 8
%arrayidx1 = getelementptr inbounds <2 x float>, <2 x float>* %6, i64 0
store <2 x float> %5, <2 x float>* %arrayidx1, align 8
- %7 = load <2 x float>** %dest.addr, align 8
+ %7 = load <2 x float>*, <2 x float>** %dest.addr, align 8
%arrayidx2 = getelementptr inbounds <2 x float>, <2 x float>* %7, i64 0
- %8 = load <2 x float>* %arrayidx2, align 8
+ %8 = load <2 x float>, <2 x float>* %arrayidx2, align 8
%vecext = extractelement <2 x float> %8, i32 0
- %9 = load <2 x float>** %dest.addr, align 8
+ %9 = load <2 x float>*, <2 x float>** %dest.addr, align 8
%arrayidx3 = getelementptr inbounds <2 x float>, <2 x float>* %9, i64 0
- %10 = load <2 x float>* %arrayidx3, align 8
+ %10 = load <2 x float>, <2 x float>* %arrayidx3, align 8
%vecext4 = extractelement <2 x float> %10, i32 1
call void @ext(float %vecext, float %vecext4)
ret void
diff --git a/llvm/test/CodeGen/X86/fold-load.ll b/llvm/test/CodeGen/X86/fold-load.ll
index dde0a2d1c5d..49eeb6b41d1 100644
--- a/llvm/test/CodeGen/X86/fold-load.ll
+++ b/llvm/test/CodeGen/X86/fold-load.ll
@@ -10,7 +10,7 @@ entry:
cond_true: ; preds = %entry
%new_size.0.i = select i1 false, i32 0, i32 0 ; <i32> [#uses=1]
- %tmp.i = load i32* bitcast (i8* getelementptr (%struct.obstack* @stmt_obstack, i32 0, i32 10) to i32*) ; <i32> [#uses=1]
+ %tmp.i = load i32, i32* bitcast (i8* getelementptr (%struct.obstack* @stmt_obstack, i32 0, i32 10) to i32*) ; <i32> [#uses=1]
%tmp.i.upgrd.1 = trunc i32 %tmp.i to i8 ; <i8> [#uses=1]
%tmp21.i = and i8 %tmp.i.upgrd.1, 1 ; <i8> [#uses=1]
%tmp22.i = icmp eq i8 %tmp21.i, 0 ; <i1> [#uses=1]
@@ -30,7 +30,7 @@ cond_next: ; preds = %entry
define i32 @test2(i16* %P, i16* %Q) nounwind {
- %A = load i16* %P, align 4 ; <i16> [#uses=11]
+ %A = load i16, i16* %P, align 4 ; <i16> [#uses=11]
%C = zext i16 %A to i32 ; <i32> [#uses=1]
%D = and i32 %C, 255 ; <i32> [#uses=1]
br label %L
@@ -54,8 +54,8 @@ define i1 @test3(i32* %P, i32* %Q) nounwind {
; CHECK: xorl (%e
; CHECK: j
entry:
- %0 = load i32* %P, align 4
- %1 = load i32* %Q, align 4
+ %0 = load i32, i32* %P, align 4
+ %1 = load i32, i32* %Q, align 4
%2 = xor i32 %0, %1
%3 = and i32 %2, 89947
%4 = icmp eq i32 %3, 0
diff --git a/llvm/test/CodeGen/X86/fold-mul-lohi.ll b/llvm/test/CodeGen/X86/fold-mul-lohi.ll
index 18ec0a2ebdd..8d4c5ef9eb2 100644
--- a/llvm/test/CodeGen/X86/fold-mul-lohi.ll
+++ b/llvm/test/CodeGen/X86/fold-mul-lohi.ll
@@ -14,7 +14,7 @@ entry:
bb:
%i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
%tmp2 = getelementptr [1000 x i8], [1000 x i8]* @B, i32 0, i32 %i.019.0
- %tmp3 = load i8* %tmp2, align 4
+ %tmp3 = load i8, i8* %tmp2, align 4
%tmp4 = mul i8 %tmp3, 2
%tmp5 = getelementptr [1000 x i8], [1000 x i8]* @A, i32 0, i32 %i.019.0
store i8 %tmp4, i8* %tmp5, align 4
diff --git a/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll b/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll
index 60a6844b39b..d95c6323de4 100644
--- a/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll
+++ b/llvm/test/CodeGen/X86/fold-pcmpeqd-2.ll
@@ -20,7 +20,7 @@
define void @program_1(%struct._image2d_t* %dest, %struct._image2d_t* %t0, <4 x float> %p0, <4 x float> %p1, <4 x float> %p4, <4 x float> %p5, <4 x float> %p6) nounwind {
entry:
- %tmp3.i = load i32* null ; <i32> [#uses=1]
+ %tmp3.i = load i32, i32* null ; <i32> [#uses=1]
%cmp = icmp slt i32 0, %tmp3.i ; <i1> [#uses=1]
br i1 %cmp, label %forcond, label %ifthen
@@ -28,7 +28,7 @@ ifthen: ; preds = %entry
ret void
forcond: ; preds = %entry
- %tmp3.i536 = load i32* null ; <i32> [#uses=1]
+ %tmp3.i536 = load i32, i32* null ; <i32> [#uses=1]
%cmp12 = icmp slt i32 0, %tmp3.i536 ; <i1> [#uses=1]
br i1 %cmp12, label %forbody, label %afterfor
diff --git a/llvm/test/CodeGen/X86/fold-sext-trunc.ll b/llvm/test/CodeGen/X86/fold-sext-trunc.ll
index b453310608e..ab28888a108 100644
--- a/llvm/test/CodeGen/X86/fold-sext-trunc.ll
+++ b/llvm/test/CodeGen/X86/fold-sext-trunc.ll
@@ -9,8 +9,8 @@ declare void @func_28(i64, i64)
define void @int322(i32 %foo) nounwind {
entry:
- %val = load i64* getelementptr (%0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0) ; <i64> [#uses=1]
- %0 = load i32* getelementptr (%struct.S1* @g_10, i32 0, i32 1), align 4 ; <i32> [#uses=1]
+ %val = load i64, i64* getelementptr (%0* bitcast (%struct.S1* @g_10 to %0*), i32 0, i32 0) ; <i64> [#uses=1]
+ %0 = load i32, i32* getelementptr (%struct.S1* @g_10, i32 0, i32 1), align 4 ; <i32> [#uses=1]
%1 = sext i32 %0 to i64 ; <i64> [#uses=1]
%tmp4.i = lshr i64 %val, 32 ; <i64> [#uses=1]
%tmp5.i = trunc i64 %tmp4.i to i32 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/fold-tied-op.ll b/llvm/test/CodeGen/X86/fold-tied-op.ll
index 1695d121a7b..62fed421938 100644
--- a/llvm/test/CodeGen/X86/fold-tied-op.ll
+++ b/llvm/test/CodeGen/X86/fold-tied-op.ll
@@ -23,20 +23,20 @@ target triple = "i386--netbsd"
; Function Attrs: nounwind uwtable
define i64 @fn1() #0 {
entry:
- %0 = load i32* @a, align 4, !tbaa !1
+ %0 = load i32, i32* @a, align 4, !tbaa !1
%1 = inttoptr i32 %0 to %struct.XXH_state64_t*
%total_len = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 0
- %2 = load i32* %total_len, align 4, !tbaa !5
+ %2 = load i32, i32* %total_len, align 4, !tbaa !5
%tobool = icmp eq i32 %2, 0
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
%v3 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 3
- %3 = load i64* %v3, align 4, !tbaa !8
+ %3 = load i64, i64* %v3, align 4, !tbaa !8
%v4 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 4
- %4 = load i64* %v4, align 4, !tbaa !9
+ %4 = load i64, i64* %v4, align 4, !tbaa !9
%v2 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 2
- %5 = load i64* %v2, align 4, !tbaa !10
+ %5 = load i64, i64* %v2, align 4, !tbaa !10
%shl = shl i64 %5, 1
%or = or i64 %shl, %5
%shl2 = shl i64 %3, 2
@@ -54,7 +54,7 @@ if.then: ; preds = %entry
br label %if.end
if.else: ; preds = %entry
- %6 = load i64* @b, align 8, !tbaa !11
+ %6 = load i64, i64* @b, align 8, !tbaa !11
%xor10 = xor i64 %6, -4417276706812531889
%mul11 = mul nsw i64 %xor10, 400714785074694791
br label %if.end
diff --git a/llvm/test/CodeGen/X86/fold-vex.ll b/llvm/test/CodeGen/X86/fold-vex.ll
index 5a8b1d8cbfd..006db6effdf 100644
--- a/llvm/test/CodeGen/X86/fold-vex.ll
+++ b/llvm/test/CodeGen/X86/fold-vex.ll
@@ -14,7 +14,7 @@
; unless specially configured on some CPUs such as AMD Family 10H.
define <4 x i32> @test1(<4 x i32>* %p0, <4 x i32> %in1) nounwind {
- %in0 = load <4 x i32>* %p0, align 2
+ %in0 = load <4 x i32>, <4 x i32>* %p0, align 2
%a = and <4 x i32> %in0, %in1
ret <4 x i32> %a
diff --git a/llvm/test/CodeGen/X86/fold-zext-trunc.ll b/llvm/test/CodeGen/X86/fold-zext-trunc.ll
index f901ad280b5..1e4944a7f31 100644
--- a/llvm/test/CodeGen/X86/fold-zext-trunc.ll
+++ b/llvm/test/CodeGen/X86/fold-zext-trunc.ll
@@ -12,9 +12,9 @@ define void @foo() nounwind {
; CHECK-NOT: movzbl
; CHECK: calll
entry:
- %tmp17 = load i8* getelementptr inbounds (%struct.S0* @g_98, i32 0, i32 1, i32 0), align 4
+ %tmp17 = load i8, i8* getelementptr inbounds (%struct.S0* @g_98, i32 0, i32 1, i32 0), align 4
%tmp54 = zext i8 %tmp17 to i32
- %foo = load i32* bitcast (i8* getelementptr inbounds (%struct.S0* @g_98, i32 0, i32 1, i32 0) to i32*), align 4
+ %foo = load i32, i32* bitcast (i8* getelementptr inbounds (%struct.S0* @g_98, i32 0, i32 1, i32 0) to i32*), align 4
%conv.i = trunc i32 %foo to i8
tail call void @func_12(i32 %tmp54, i8 zeroext %conv.i) nounwind
ret void
diff --git a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
index bd980694392..a9ba20f45e8 100644
--- a/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
+++ b/llvm/test/CodeGen/X86/force-align-stack-alloca.ll
@@ -10,7 +10,7 @@ target triple = "i386-unknown-linux-gnu"
define i32 @f(i8* %p) nounwind {
entry:
- %0 = load i8* %p
+ %0 = load i8, i8* %p
%conv = sext i8 %0 to i32
ret i32 %conv
}
diff --git a/llvm/test/CodeGen/X86/fp-double-rounding.ll b/llvm/test/CodeGen/X86/fp-double-rounding.ll
index 030cb9a3c01..c7578acbec2 100644
--- a/llvm/test/CodeGen/X86/fp-double-rounding.ll
+++ b/llvm/test/CodeGen/X86/fp-double-rounding.ll
@@ -11,7 +11,7 @@ target triple = "x86_64--"
; UNSAFE-NOT: cvt
define void @double_rounding(fp128* %x, float* %f) {
entry:
- %0 = load fp128* %x, align 16
+ %0 = load fp128, fp128* %x, align 16
%1 = fptrunc fp128 %0 to double
%2 = fptrunc double %1 to float
store float %2, float* %f, align 4
diff --git a/llvm/test/CodeGen/X86/fp-load-trunc.ll b/llvm/test/CodeGen/X86/fp-load-trunc.ll
index e6c1e1adb59..3896913857d 100644
--- a/llvm/test/CodeGen/X86/fp-load-trunc.ll
+++ b/llvm/test/CodeGen/X86/fp-load-trunc.ll
@@ -23,7 +23,7 @@ define <1 x float> @test1(<1 x double>* %p) nounwind {
; AVX-NEXT: flds (%esp)
; AVX-NEXT: popl %eax
; AVX-NEXT: retl
- %x = load <1 x double>* %p
+ %x = load <1 x double>, <1 x double>* %p
%y = fptrunc <1 x double> %x to <1 x float>
ret <1 x float> %y
}
@@ -40,7 +40,7 @@ define <2 x float> @test2(<2 x double>* %p) nounwind {
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vcvtpd2psx (%eax), %xmm0
; AVX-NEXT: retl
- %x = load <2 x double>* %p
+ %x = load <2 x double>, <2 x double>* %p
%y = fptrunc <2 x double> %x to <2 x float>
ret <2 x float> %y
}
@@ -59,7 +59,7 @@ define <4 x float> @test3(<4 x double>* %p) nounwind {
; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-NEXT: vcvtpd2psy (%eax), %xmm0
; AVX-NEXT: retl
- %x = load <4 x double>* %p
+ %x = load <4 x double>, <4 x double>* %p
%y = fptrunc <4 x double> %x to <4 x float>
ret <4 x float> %y
}
@@ -83,7 +83,7 @@ define <8 x float> @test4(<8 x double>* %p) nounwind {
; AVX-NEXT: vcvtpd2psy 32(%eax), %xmm1
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retl
- %x = load <8 x double>* %p
+ %x = load <8 x double>, <8 x double>* %p
%y = fptrunc <8 x double> %x to <8 x float>
ret <8 x float> %y
}
diff --git a/llvm/test/CodeGen/X86/fp-stack-O0-crash.ll b/llvm/test/CodeGen/X86/fp-stack-O0-crash.ll
index ae83a02c674..5acfd5d3b61 100644
--- a/llvm/test/CodeGen/X86/fp-stack-O0-crash.ll
+++ b/llvm/test/CodeGen/X86/fp-stack-O0-crash.ll
@@ -11,14 +11,14 @@ entry:
br i1 false, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %tmp = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
- %tmp1 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
+ %tmp = load x86_fp80, x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
+ %tmp1 = load x86_fp80, x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
%cmp = fcmp oeq x86_fp80 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %if.then, label %if.end
cond.false: ; preds = %entry
- %tmp2 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
- %tmp3 = load x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
+ %tmp2 = load x86_fp80, x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
+ %tmp3 = load x86_fp80, x86_fp80* %x.addr ; <x86_fp80> [#uses=1]
%cmp4 = fcmp une x86_fp80 %tmp2, %tmp3 ; <i1> [#uses=1]
br i1 %cmp4, label %if.then, label %if.end
diff --git a/llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll b/llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll
index b457fbc1a33..1d3548816b7 100644
--- a/llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll
+++ b/llvm/test/CodeGen/X86/fp-stack-compare-cmov.ll
@@ -4,7 +4,7 @@
define float @foo(float* %col.2.0) {
; CHECK: fucompi
; CHECK: fcmov
- %tmp = load float* %col.2.0
+ %tmp = load float, float* %col.2.0
%tmp16 = fcmp olt float %tmp, 0.000000e+00
%tmp20 = fsub float -0.000000e+00, %tmp
%iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
diff --git a/llvm/test/CodeGen/X86/fp-stack-compare.ll b/llvm/test/CodeGen/X86/fp-stack-compare.ll
index a8557adeaf7..96088d75923 100644
--- a/llvm/test/CodeGen/X86/fp-stack-compare.ll
+++ b/llvm/test/CodeGen/X86/fp-stack-compare.ll
@@ -6,7 +6,7 @@ define float @foo(float* %col.2.0) {
; CHECK-NOT: fucompi
; CHECK: j
; CHECK-NOT: fcmov
- %tmp = load float* %col.2.0
+ %tmp = load float, float* %col.2.0
%tmp16 = fcmp olt float %tmp, 0.000000e+00
%tmp20 = fsub float -0.000000e+00, %tmp
%iftmp.2.0 = select i1 %tmp16, float %tmp20, float %tmp
diff --git a/llvm/test/CodeGen/X86/fp-stack-ret.ll b/llvm/test/CodeGen/X86/fp-stack-ret.ll
index 2733117a1f0..9635e2d2511 100644
--- a/llvm/test/CodeGen/X86/fp-stack-ret.ll
+++ b/llvm/test/CodeGen/X86/fp-stack-ret.ll
@@ -7,7 +7,7 @@
; CHECK: fldl
; CHECK-NEXT: ret
define double @test1(double *%P) {
- %A = load double* %P
+ %A = load double, double* %P
ret double %A
}
diff --git a/llvm/test/CodeGen/X86/fp-stack.ll b/llvm/test/CodeGen/X86/fp-stack.ll
index dca644de667..44c03963388 100644
--- a/llvm/test/CodeGen/X86/fp-stack.ll
+++ b/llvm/test/CodeGen/X86/fp-stack.ll
@@ -5,9 +5,9 @@ target triple = "i386-pc-linux-gnu"
define void @foo() nounwind {
entry:
- %tmp6 = load x86_fp80* undef ; <x86_fp80> [#uses=2]
- %tmp15 = load x86_fp80* undef ; <x86_fp80> [#uses=2]
- %tmp24 = load x86_fp80* undef ; <x86_fp80> [#uses=1]
+ %tmp6 = load x86_fp80, x86_fp80* undef ; <x86_fp80> [#uses=2]
+ %tmp15 = load x86_fp80, x86_fp80* undef ; <x86_fp80> [#uses=2]
+ %tmp24 = load x86_fp80, x86_fp80* undef ; <x86_fp80> [#uses=1]
br i1 undef, label %return, label %bb.nph
bb.nph: ; preds = %entry
diff --git a/llvm/test/CodeGen/X86/fp2sint.ll b/llvm/test/CodeGen/X86/fp2sint.ll
index 16754448871..b41f56f9f41 100644
--- a/llvm/test/CodeGen/X86/fp2sint.ll
+++ b/llvm/test/CodeGen/X86/fp2sint.ll
@@ -4,10 +4,10 @@
define i32 @main(i32 %argc, i8** %argv) {
cond_false.i.i.i: ; preds = %bb.i5
- %tmp35.i = load double* null, align 8 ; <double> [#uses=1]
+ %tmp35.i = load double, double* null, align 8 ; <double> [#uses=1]
%tmp3536.i = fptosi double %tmp35.i to i32 ; <i32> [#uses=1]
%tmp3536140.i = zext i32 %tmp3536.i to i64 ; <i64> [#uses=1]
- %tmp39.i = load double* null, align 4 ; <double> [#uses=1]
+ %tmp39.i = load double, double* null, align 4 ; <double> [#uses=1]
%tmp3940.i = fptosi double %tmp39.i to i32 ; <i32> [#uses=1]
%tmp3940137.i = zext i32 %tmp3940.i to i64 ; <i64> [#uses=1]
%tmp3940137138.i = shl i64 %tmp3940137.i, 32 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/fp_load_cast_fold.ll b/llvm/test/CodeGen/X86/fp_load_cast_fold.ll
index 72ea12f9430..5fd22e3fa6e 100644
--- a/llvm/test/CodeGen/X86/fp_load_cast_fold.ll
+++ b/llvm/test/CodeGen/X86/fp_load_cast_fold.ll
@@ -1,19 +1,19 @@
; RUN: llc < %s -march=x86 | FileCheck %s
define double @short(i16* %P) {
- %V = load i16* %P ; <i16> [#uses=1]
+ %V = load i16, i16* %P ; <i16> [#uses=1]
%V2 = sitofp i16 %V to double ; <double> [#uses=1]
ret double %V2
}
define double @int(i32* %P) {
- %V = load i32* %P ; <i32> [#uses=1]
+ %V = load i32, i32* %P ; <i32> [#uses=1]
%V2 = sitofp i32 %V to double ; <double> [#uses=1]
ret double %V2
}
define double @long(i64* %P) {
- %V = load i64* %P ; <i64> [#uses=1]
+ %V = load i64, i64* %P ; <i64> [#uses=1]
%V2 = sitofp i64 %V to double ; <double> [#uses=1]
ret double %V2
}
diff --git a/llvm/test/CodeGen/X86/fp_load_fold.ll b/llvm/test/CodeGen/X86/fp_load_fold.ll
index a2cea5e57f6..57497454792 100644
--- a/llvm/test/CodeGen/X86/fp_load_fold.ll
+++ b/llvm/test/CodeGen/X86/fp_load_fold.ll
@@ -4,37 +4,37 @@
; Test that the load of the memory location is folded into the operation.
define double @test_add(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fadd double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_mul(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fmul double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_sub(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fsub double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_subr(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fsub double %Y, %X ; <double> [#uses=1]
ret double %R
}
define double @test_div(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fdiv double %X, %Y ; <double> [#uses=1]
ret double %R
}
define double @test_divr(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
+ %Y = load double, double* %P ; <double> [#uses=1]
%R = fdiv double %Y, %X ; <double> [#uses=1]
ret double %R
}
diff --git a/llvm/test/CodeGen/X86/frameallocate.ll b/llvm/test/CodeGen/X86/frameallocate.ll
index 7a2f9e3eb74..2172ac02420 100644
--- a/llvm/test/CodeGen/X86/frameallocate.ll
+++ b/llvm/test/CodeGen/X86/frameallocate.ll
@@ -10,7 +10,7 @@ declare i32 @printf(i8*, ...)
define void @print_framealloc_from_fp(i8* %fp) {
%alloc = call i8* @llvm.framerecover(i8* bitcast (void(i32*, i32*)* @alloc_func to i8*), i8* %fp)
%alloc_i32 = bitcast i8* %alloc to i32*
- %r = load i32* %alloc_i32
+ %r = load i32, i32* %alloc_i32
call i32 (i8*, ...)* @printf(i8* getelementptr ([10 x i8]* @str, i32 0, i32 0), i32 %r)
ret void
}
diff --git a/llvm/test/CodeGen/X86/full-lsr.ll b/llvm/test/CodeGen/X86/full-lsr.ll
index 03eadc09539..85b2b41fa19 100644
--- a/llvm/test/CodeGen/X86/full-lsr.ll
+++ b/llvm/test/CodeGen/X86/full-lsr.ll
@@ -19,17 +19,17 @@ entry:
bb: ; preds = %bb, %entry
%i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=5]
%1 = getelementptr float, float* %A, i32 %i.03 ; <float*> [#uses=1]
- %2 = load float* %1, align 4 ; <float> [#uses=1]
+ %2 = load float, float* %1, align 4 ; <float> [#uses=1]
%3 = getelementptr float, float* %B, i32 %i.03 ; <float*> [#uses=1]
- %4 = load float* %3, align 4 ; <float> [#uses=1]
+ %4 = load float, float* %3, align 4 ; <float> [#uses=1]
%5 = fadd float %2, %4 ; <float> [#uses=1]
%6 = getelementptr float, float* %C, i32 %i.03 ; <float*> [#uses=1]
store float %5, float* %6, align 4
%7 = add i32 %i.03, 10 ; <i32> [#uses=3]
%8 = getelementptr float, float* %A, i32 %7 ; <float*> [#uses=1]
- %9 = load float* %8, align 4 ; <float> [#uses=1]
+ %9 = load float, float* %8, align 4 ; <float> [#uses=1]
%10 = getelementptr float, float* %B, i32 %7 ; <float*> [#uses=1]
- %11 = load float* %10, align 4 ; <float> [#uses=1]
+ %11 = load float, float* %10, align 4 ; <float> [#uses=1]
%12 = fadd float %9, %11 ; <float> [#uses=1]
%13 = getelementptr float, float* %C, i32 %7 ; <float*> [#uses=1]
store float %12, float* %13, align 4
diff --git a/llvm/test/CodeGen/X86/gather-addresses.ll b/llvm/test/CodeGen/X86/gather-addresses.ll
index c30896023f3..f7d4eb380d5 100644
--- a/llvm/test/CodeGen/X86/gather-addresses.ll
+++ b/llvm/test/CodeGen/X86/gather-addresses.ll
@@ -35,8 +35,8 @@
; WIN: movhpd (%rcx,%r[[REG4]],8), %xmm1
define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
- %a = load <4 x i32>* %i
- %b = load <4 x i32>* %h
+ %a = load <4 x i32>, <4 x i32>* %i
+ %b = load <4 x i32>, <4 x i32>* %h
%j = and <4 x i32> %a, %b
%d0 = extractelement <4 x i32> %j, i32 0
%d1 = extractelement <4 x i32> %j, i32 1
@@ -46,10 +46,10 @@ define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
%q1 = getelementptr double, double* %p, i32 %d1
%q2 = getelementptr double, double* %p, i32 %d2
%q3 = getelementptr double, double* %p, i32 %d3
- %r0 = load double* %q0
- %r1 = load double* %q1
- %r2 = load double* %q2
- %r3 = load double* %q3
+ %r0 = load double, double* %q0
+ %r1 = load double, double* %q1
+ %r2 = load double, double* %q2
+ %r3 = load double, double* %q3
%v0 = insertelement <4 x double> undef, double %r0, i32 0
%v1 = insertelement <4 x double> %v0, double %r1, i32 1
%v2 = insertelement <4 x double> %v1, double %r2, i32 2
@@ -67,8 +67,8 @@ define <4 x double> @foo(double* %p, <4 x i32>* %i, <4 x i32>* %h) nounwind {
; LIN32-DAG: {{(mov|and)}}l 8(%esp),
; LIN32-DAG: {{(mov|and)}}l 12(%esp),
define <4 x i64> @old(double* %p, <4 x i32>* %i, <4 x i32>* %h, i64 %f) nounwind {
- %a = load <4 x i32>* %i
- %b = load <4 x i32>* %h
+ %a = load <4 x i32>, <4 x i32>* %i
+ %b = load <4 x i32>, <4 x i32>* %h
%j = and <4 x i32> %a, %b
%d0 = extractelement <4 x i32> %j, i32 0
%d1 = extractelement <4 x i32> %j, i32 1
diff --git a/llvm/test/CodeGen/X86/ghc-cc.ll b/llvm/test/CodeGen/X86/ghc-cc.ll
index 3ada8c8ce98..16e4db60502 100644
--- a/llvm/test/CodeGen/X86/ghc-cc.ll
+++ b/llvm/test/CodeGen/X86/ghc-cc.ll
@@ -32,10 +32,10 @@ entry:
; CHECK-NEXT: movl hp, %edi
; CHECK-NEXT: movl sp, %ebp
; CHECK-NEXT: movl base, %ebx
- %0 = load i32* @r1
- %1 = load i32* @hp
- %2 = load i32* @sp
- %3 = load i32* @base
+ %0 = load i32, i32* @r1
+ %1 = load i32, i32* @hp
+ %2 = load i32, i32* @sp
+ %3 = load i32, i32* @base
; CHECK: jmp bar
tail call ghccc void @bar( i32 %3, i32 %2, i32 %1, i32 %0 ) nounwind
ret void
diff --git a/llvm/test/CodeGen/X86/ghc-cc64.ll b/llvm/test/CodeGen/X86/ghc-cc64.ll
index 7251dd673b3..c4ce8cfdef1 100644
--- a/llvm/test/CodeGen/X86/ghc-cc64.ll
+++ b/llvm/test/CodeGen/X86/ghc-cc64.ll
@@ -57,22 +57,22 @@ entry:
; CHECK-NEXT: movq hp(%rip), %r12
; CHECK-NEXT: movq sp(%rip), %rbp
; CHECK-NEXT: movq base(%rip), %r13
- %0 = load double* @d2
- %1 = load double* @d1
- %2 = load float* @f4
- %3 = load float* @f3
- %4 = load float* @f2
- %5 = load float* @f1
- %6 = load i64* @splim
- %7 = load i64* @r6
- %8 = load i64* @r5
- %9 = load i64* @r4
- %10 = load i64* @r3
- %11 = load i64* @r2
- %12 = load i64* @r1
- %13 = load i64* @hp
- %14 = load i64* @sp
- %15 = load i64* @base
+ %0 = load double, double* @d2
+ %1 = load double, double* @d1
+ %2 = load float, float* @f4
+ %3 = load float, float* @f3
+ %4 = load float, float* @f2
+ %5 = load float, float* @f1
+ %6 = load i64, i64* @splim
+ %7 = load i64, i64* @r6
+ %8 = load i64, i64* @r5
+ %9 = load i64, i64* @r4
+ %10 = load i64, i64* @r3
+ %11 = load i64, i64* @r2
+ %12 = load i64, i64* @r1
+ %13 = load i64, i64* @hp
+ %14 = load i64, i64* @sp
+ %15 = load i64, i64* @base
; CHECK: jmp bar
tail call ghccc void @bar( i64 %15, i64 %14, i64 %13, i64 %12, i64 %11,
i64 %10, i64 %9, i64 %8, i64 %7, i64 %6,
diff --git a/llvm/test/CodeGen/X86/gs-fold.ll b/llvm/test/CodeGen/X86/gs-fold.ll
index 6470d2b2581..bbdd0339f36 100644
--- a/llvm/test/CodeGen/X86/gs-fold.ll
+++ b/llvm/test/CodeGen/X86/gs-fold.ll
@@ -6,9 +6,9 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
define i32 @test() nounwind uwtable {
entry:
- %0 = load volatile %struct.thread* addrspace(256)* null
+ %0 = load volatile %struct.thread*, %struct.thread* addrspace(256)* null
%c = getelementptr inbounds %struct.thread, %struct.thread* %0, i64 0, i32 2
- %1 = load i32* %c, align 4
+ %1 = load i32, i32* %c, align 4
ret i32 %1
}
diff --git a/llvm/test/CodeGen/X86/h-register-addressing-32.ll b/llvm/test/CodeGen/X86/h-register-addressing-32.ll
index f79b7f2f52d..d0214137b0e 100644
--- a/llvm/test/CodeGen/X86/h-register-addressing-32.ll
+++ b/llvm/test/CodeGen/X86/h-register-addressing-32.ll
@@ -6,7 +6,7 @@ define double @foo8(double* nocapture inreg %p, i32 inreg %x) nounwind readonly
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
%t2 = getelementptr double, double* %p, i32 %t1
- %t3 = load double* %t2, align 8
+ %t3 = load double, double* %t2, align 8
ret double %t3
}
; CHECK: foo8:
@@ -16,7 +16,7 @@ define float @foo4(float* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
%t2 = getelementptr float, float* %p, i32 %t1
- %t3 = load float* %t2, align 8
+ %t3 = load float, float* %t2, align 8
ret float %t3
}
; CHECK: foo4:
@@ -26,7 +26,7 @@ define i16 @foo2(i16* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
%t2 = getelementptr i16, i16* %p, i32 %t1
- %t3 = load i16* %t2, align 8
+ %t3 = load i16, i16* %t2, align 8
ret i16 %t3
}
; CHECK: foo2:
@@ -36,7 +36,7 @@ define i8 @foo1(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
%t2 = getelementptr i8, i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: foo1:
@@ -46,7 +46,7 @@ define i8 @bar8(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 5
%t1 = and i32 %t0, 2040
%t2 = getelementptr i8, i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar8:
@@ -56,7 +56,7 @@ define i8 @bar4(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 6
%t1 = and i32 %t0, 1020
%t2 = getelementptr i8, i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar4:
@@ -66,7 +66,7 @@ define i8 @bar2(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 7
%t1 = and i32 %t0, 510
%t2 = getelementptr i8, i8* %p, i32 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar2:
diff --git a/llvm/test/CodeGen/X86/h-register-addressing-64.ll b/llvm/test/CodeGen/X86/h-register-addressing-64.ll
index c9bd097c943..b3159f4896a 100644
--- a/llvm/test/CodeGen/X86/h-register-addressing-64.ll
+++ b/llvm/test/CodeGen/X86/h-register-addressing-64.ll
@@ -6,7 +6,7 @@ define double @foo8(double* nocapture inreg %p, i64 inreg %x) nounwind readonly
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
%t2 = getelementptr double, double* %p, i64 %t1
- %t3 = load double* %t2, align 8
+ %t3 = load double, double* %t2, align 8
ret double %t3
}
; CHECK: foo8:
@@ -16,7 +16,7 @@ define float @foo4(float* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
%t2 = getelementptr float, float* %p, i64 %t1
- %t3 = load float* %t2, align 8
+ %t3 = load float, float* %t2, align 8
ret float %t3
}
; CHECK: foo4:
@@ -26,7 +26,7 @@ define i16 @foo2(i16* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
%t2 = getelementptr i16, i16* %p, i64 %t1
- %t3 = load i16* %t2, align 8
+ %t3 = load i16, i16* %t2, align 8
ret i16 %t3
}
; CHECK: foo2:
@@ -36,7 +36,7 @@ define i8 @foo1(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
%t2 = getelementptr i8, i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: foo1:
@@ -46,7 +46,7 @@ define i8 @bar8(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 5
%t1 = and i64 %t0, 2040
%t2 = getelementptr i8, i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar8:
@@ -56,7 +56,7 @@ define i8 @bar4(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 6
%t1 = and i64 %t0, 1020
%t2 = getelementptr i8, i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar4:
@@ -66,7 +66,7 @@ define i8 @bar2(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 7
%t1 = and i64 %t0, 510
%t2 = getelementptr i8, i8* %p, i64 %t1
- %t3 = load i8* %t2, align 8
+ %t3 = load i8, i8* %t2, align 8
ret i8 %t3
}
; CHECK: bar2:
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index 1dcf93939b8..f4331bab143 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -5,7 +5,7 @@ define void @test_load_store(half* %in, half* %out) {
; CHECK-LABEL: test_load_store:
; CHECK: movw (%rdi), [[TMP:%[a-z0-9]+]]
; CHECK: movw [[TMP]], (%rsi)
- %val = load half* %in
+ %val = load half, half* %in
store half %val, half* %out
ret void
}
@@ -13,7 +13,7 @@ define void @test_load_store(half* %in, half* %out) {
define i16 @test_bitcast_from_half(half* %addr) {
; CHECK-LABEL: test_bitcast_from_half:
; CHECK: movzwl (%rdi), %eax
- %val = load half* %addr
+ %val = load half, half* %addr
%val_int = bitcast half %val to i16
ret i16 %val_int
}
@@ -31,7 +31,7 @@ define float @test_extend32(half* %addr) {
; CHECK-LIBCALL: jmp __gnu_h2f_ieee
; CHECK-FP16: vcvtph2ps
- %val16 = load half* %addr
+ %val16 = load half, half* %addr
%val32 = fpext half %val16 to float
ret float %val32
}
@@ -43,7 +43,7 @@ define double @test_extend64(half* %addr) {
; CHECK-LIBCALL: cvtss2sd
; CHECK-FP16: vcvtph2ps
; CHECK-FP16: vcvtss2sd
- %val16 = load half* %addr
+ %val16 = load half, half* %addr
%val32 = fpext half %val16 to double
ret double %val32
}
diff --git a/llvm/test/CodeGen/X86/hidden-vis-2.ll b/llvm/test/CodeGen/X86/hidden-vis-2.ll
index 74554d15e2f..62e143d61c6 100644
--- a/llvm/test/CodeGen/X86/hidden-vis-2.ll
+++ b/llvm/test/CodeGen/X86/hidden-vis-2.ll
@@ -5,6 +5,6 @@
define i32 @t() nounwind readonly {
entry:
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/hidden-vis-3.ll b/llvm/test/CodeGen/X86/hidden-vis-3.ll
index 4be881e84d6..5d9ef44a4d4 100644
--- a/llvm/test/CodeGen/X86/hidden-vis-3.ll
+++ b/llvm/test/CodeGen/X86/hidden-vis-3.ll
@@ -12,8 +12,8 @@ entry:
; X64: _t:
; X64: movl _y(%rip), %eax
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
- %1 = load i32* @y, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* @y, align 4 ; <i32> [#uses=1]
%2 = add i32 %1, %0 ; <i32> [#uses=1]
ret i32 %2
}
diff --git a/llvm/test/CodeGen/X86/hidden-vis-4.ll b/llvm/test/CodeGen/X86/hidden-vis-4.ll
index 25a87b905bc..17d44d0e42f 100644
--- a/llvm/test/CodeGen/X86/hidden-vis-4.ll
+++ b/llvm/test/CodeGen/X86/hidden-vis-4.ll
@@ -7,6 +7,6 @@ entry:
; CHECK-LABEL: t:
; CHECK: movl _x, %eax
; CHECK: .comm _x,4
- %0 = load i32* @x, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @x, align 4 ; <i32> [#uses=1]
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/hidden-vis-pic.ll b/llvm/test/CodeGen/X86/hidden-vis-pic.ll
index 1caab7a6a00..e49bb48ac67 100644
--- a/llvm/test/CodeGen/X86/hidden-vis-pic.ll
+++ b/llvm/test/CodeGen/X86/hidden-vis-pic.ll
@@ -45,6 +45,6 @@ entry:
br label %return
return: ; preds = %entry
- %retval1 = load i32* %retval ; <i32> [#uses=1]
+ %retval1 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval1
}
diff --git a/llvm/test/CodeGen/X86/hipe-cc.ll b/llvm/test/CodeGen/X86/hipe-cc.ll
index b34417ebf69..e3808e75422 100644
--- a/llvm/test/CodeGen/X86/hipe-cc.ll
+++ b/llvm/test/CodeGen/X86/hipe-cc.ll
@@ -53,18 +53,18 @@ entry:
; CHECK-NEXT: movl 12(%esp), %ebp
; CHECK-NEXT: movl 8(%esp), %eax
; CHECK-NEXT: movl 4(%esp), %edx
- %0 = load i32* %hp_var
- %1 = load i32* %p_var
- %2 = load i32* %arg0_var
- %3 = load i32* %arg1_var
- %4 = load i32* %arg2_var
+ %0 = load i32, i32* %hp_var
+ %1 = load i32, i32* %p_var
+ %2 = load i32, i32* %arg0_var
+ %3 = load i32, i32* %arg1_var
+ %4 = load i32, i32* %arg2_var
; CHECK: jmp bar
tail call cc 11 void @bar(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4) nounwind
ret void
}
define cc 11 void @baz() nounwind {
- %tmp_clos = load i32* @clos
+ %tmp_clos = load i32, i32* @clos
%tmp_clos2 = inttoptr i32 %tmp_clos to i32*
%indirect_call = bitcast i32* %tmp_clos2 to void (i32, i32, i32)*
; CHECK: movl $42, %eax
diff --git a/llvm/test/CodeGen/X86/hipe-cc64.ll b/llvm/test/CodeGen/X86/hipe-cc64.ll
index 27e1c723a8f..28d90399d85 100644
--- a/llvm/test/CodeGen/X86/hipe-cc64.ll
+++ b/llvm/test/CodeGen/X86/hipe-cc64.ll
@@ -62,19 +62,19 @@ entry:
; CHECK-NEXT: movq 24(%rsp), %rsi
; CHECK-NEXT: movq 16(%rsp), %rdx
; CHECK-NEXT: movq 8(%rsp), %rcx
- %0 = load i64* %hp_var
- %1 = load i64* %p_var
- %2 = load i64* %arg0_var
- %3 = load i64* %arg1_var
- %4 = load i64* %arg2_var
- %5 = load i64* %arg3_var
+ %0 = load i64, i64* %hp_var
+ %1 = load i64, i64* %p_var
+ %2 = load i64, i64* %arg0_var
+ %3 = load i64, i64* %arg1_var
+ %4 = load i64, i64* %arg2_var
+ %5 = load i64, i64* %arg3_var
; CHECK: jmp bar
tail call cc 11 void @bar(i64 %0, i64 %1, i64 %2, i64 %3, i64 %4, i64 %5) nounwind
ret void
}
define cc 11 void @baz() nounwind {
- %tmp_clos = load i64* @clos
+ %tmp_clos = load i64, i64* @clos
%tmp_clos2 = inttoptr i64 %tmp_clos to i64*
%indirect_call = bitcast i64* %tmp_clos2 to void (i64, i64, i64)*
; CHECK: movl $42, %esi
diff --git a/llvm/test/CodeGen/X86/hoist-invariant-load.ll b/llvm/test/CodeGen/X86/hoist-invariant-load.ll
index c9e52903c79..2d3d99f5ba2 100644
--- a/llvm/test/CodeGen/X86/hoist-invariant-load.ll
+++ b/llvm/test/CodeGen/X86/hoist-invariant-load.ll
@@ -15,7 +15,7 @@ entry:
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %0 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !invariant.load !0
+ %0 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !invariant.load !0
%call = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %x, i8* %0)
%inc = add i32 %i.01, 1
%exitcond = icmp eq i32 %inc, 10000
diff --git a/llvm/test/CodeGen/X86/i128-mul.ll b/llvm/test/CodeGen/X86/i128-mul.ll
index 5fe11646867..21bca028888 100644
--- a/llvm/test/CodeGen/X86/i128-mul.ll
+++ b/llvm/test/CodeGen/X86/i128-mul.ll
@@ -27,7 +27,7 @@ for.body: ; preds = %entry, %for.body
%carry.013 = phi i64 [ %conv6, %for.body ], [ 0, %entry ]
%i.012 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i64, i64* %x, i64 %i.012
- %0 = load i64* %arrayidx, align 8
+ %0 = load i64, i64* %arrayidx, align 8
%conv2 = zext i64 %0 to i128
%mul = mul i128 %conv2, %conv
%conv3 = zext i64 %carry.013 to i128
diff --git a/llvm/test/CodeGen/X86/i128-ret.ll b/llvm/test/CodeGen/X86/i128-ret.ll
index 264f07ceb4c..1d76471225e 100644
--- a/llvm/test/CodeGen/X86/i128-ret.ll
+++ b/llvm/test/CodeGen/X86/i128-ret.ll
@@ -4,7 +4,7 @@
; CHECK: movq 8([[A0]]), %rdx
define i128 @test(i128 *%P) {
- %A = load i128* %P
+ %A = load i128, i128* %P
ret i128 %A
}
diff --git a/llvm/test/CodeGen/X86/i1narrowfail.ll b/llvm/test/CodeGen/X86/i1narrowfail.ll
index e280f3cff51..4f9a75672bf 100644
--- a/llvm/test/CodeGen/X86/i1narrowfail.ll
+++ b/llvm/test/CodeGen/X86/i1narrowfail.ll
@@ -3,7 +3,7 @@
; CHECK-LABEL: @foo
; CHECK: orb $16
define void @foo(i64* %ptr) {
- %r11 = load i64* %ptr, align 8
+ %r11 = load i64, i64* %ptr, align 8
%r12 = or i64 16, %r11
store i64 %r12, i64* %ptr, align 8
ret void
diff --git a/llvm/test/CodeGen/X86/i256-add.ll b/llvm/test/CodeGen/X86/i256-add.ll
index 5a7a7a7fe84..6164d898ca1 100644
--- a/llvm/test/CodeGen/X86/i256-add.ll
+++ b/llvm/test/CodeGen/X86/i256-add.ll
@@ -3,15 +3,15 @@
; RUN: grep sbbl %t | count 7
define void @add(i256* %p, i256* %q) nounwind {
- %a = load i256* %p
- %b = load i256* %q
+ %a = load i256, i256* %p
+ %b = load i256, i256* %q
%c = add i256 %a, %b
store i256 %c, i256* %p
ret void
}
define void @sub(i256* %p, i256* %q) nounwind {
- %a = load i256* %p
- %b = load i256* %q
+ %a = load i256, i256* %p
+ %b = load i256, i256* %q
%c = sub i256 %a, %b
store i256 %c, i256* %p
ret void
diff --git a/llvm/test/CodeGen/X86/i2k.ll b/llvm/test/CodeGen/X86/i2k.ll
index 6116c2e7165..83c10a58a3a 100644
--- a/llvm/test/CodeGen/X86/i2k.ll
+++ b/llvm/test/CodeGen/X86/i2k.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=x86
define void @foo(i2011* %x, i2011* %y, i2011* %p) nounwind {
- %a = load i2011* %x
- %b = load i2011* %y
+ %a = load i2011, i2011* %x
+ %b = load i2011, i2011* %y
%c = add i2011 %a, %b
store i2011 %c, i2011* %p
ret void
diff --git a/llvm/test/CodeGen/X86/i486-fence-loop.ll b/llvm/test/CodeGen/X86/i486-fence-loop.ll
index d8096197b0d..96ed0569cf0 100644
--- a/llvm/test/CodeGen/X86/i486-fence-loop.ll
+++ b/llvm/test/CodeGen/X86/i486-fence-loop.ll
@@ -16,9 +16,9 @@ entry:
br label %while.body
while.body:
- %0 = load volatile i32* %addr, align 4
+ %0 = load volatile i32, i32* %addr, align 4
fence seq_cst
- %1 = load volatile i32* %addr, align 4
+ %1 = load volatile i32, i32* %addr, align 4
%cmp = icmp sgt i32 %1, %0
br i1 %cmp, label %while.body, label %if.then
diff --git a/llvm/test/CodeGen/X86/i64-mem-copy.ll b/llvm/test/CodeGen/X86/i64-mem-copy.ll
index bf778968c89..21f88779cf5 100644
--- a/llvm/test/CodeGen/X86/i64-mem-copy.ll
+++ b/llvm/test/CodeGen/X86/i64-mem-copy.ll
@@ -11,7 +11,7 @@
define void @foo(i64* %x, i64* %y) nounwind {
entry:
- %tmp1 = load i64* %y, align 8 ; <i64> [#uses=1]
+ %tmp1 = load i64, i64* %y, align 8 ; <i64> [#uses=1]
store i64 %tmp1, i64* %x, align 8
ret void
}
diff --git a/llvm/test/CodeGen/X86/inline-asm-fpstack.ll b/llvm/test/CodeGen/X86/inline-asm-fpstack.ll
index bb3778a2811..220eb72d691 100644
--- a/llvm/test/CodeGen/X86/inline-asm-fpstack.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-fpstack.ll
@@ -169,11 +169,11 @@ entry:
; CHECK: testPR4485
define void @testPR4485(x86_fp80* %a) {
entry:
- %0 = load x86_fp80* %a, align 16
+ %0 = load x86_fp80, x86_fp80* %a, align 16
%1 = fmul x86_fp80 %0, 0xK4006B400000000000000
%2 = fmul x86_fp80 %1, 0xK4012F424000000000000
tail call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %2)
- %3 = load x86_fp80* %a, align 16
+ %3 = load x86_fp80, x86_fp80* %a, align 16
%4 = fmul x86_fp80 %3, 0xK4006B400000000000000
%5 = fmul x86_fp80 %4, 0xK4012F424000000000000
tail call void asm sideeffect "fistpl $0", "{st},~{st}"(x86_fp80 %5)
@@ -367,7 +367,7 @@ entry:
; Function Attrs: ssp
define void @test_live_st(i32 %a1) {
entry:
- %0 = load x86_fp80* undef, align 16
+ %0 = load x86_fp80, x86_fp80* undef, align 16
%cond = icmp eq i32 %a1, 1
br i1 %cond, label %sw.bb4.i, label %_Z5tointRKe.exit
diff --git a/llvm/test/CodeGen/X86/inline-asm-out-regs.ll b/llvm/test/CodeGen/X86/inline-asm-out-regs.ll
index 46966f5370d..8e47f81a5e0 100644
--- a/llvm/test/CodeGen/X86/inline-asm-out-regs.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-out-regs.ll
@@ -9,7 +9,7 @@ entry:
br label %bb1.i
bb1.i: ; preds = %bb6.i.i, %bb1.i, %entry
- %0 = load i32* null, align 8 ; <i32> [#uses=1]
+ %0 = load i32, i32* null, align 8 ; <i32> [#uses=1]
%1 = icmp ugt i32 %0, 1048575 ; <i1> [#uses=1]
br i1 %1, label %bb2.i, label %bb1.i
@@ -19,7 +19,7 @@ bb2.i: ; preds = %bb1.i
; <i32> [#uses=1]
%2 = lshr i32 %asmresult2.i.i, 8 ; <i32> [#uses=1]
%3 = trunc i32 %2 to i8 ; <i8> [#uses=1]
- %4 = load i32* @pcibios_last_bus, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* @pcibios_last_bus, align 4 ; <i32> [#uses=1]
%5 = icmp slt i32 %4, 0 ; <i1> [#uses=1]
br i1 %5, label %bb5.i.i, label %bb6.i.i
diff --git a/llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll b/llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll
index 50e30210181..21353468b1f 100644
--- a/llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-ptr-cast.ll
@@ -16,12 +16,12 @@ entry:
store i64 1, i64* %flags, align 8
store i64 -1, i64* %newflags, align 8
%0 = bitcast i32* %dst to i8*
- %tmp = load i64* %flags, align 8
+ %tmp = load i64, i64* %flags, align 8
%and = and i64 %tmp, 1
%1 = bitcast i32* %src to i8*
- %tmp1 = load i8* %1
+ %tmp1 = load i8, i8* %1
%2 = bitcast i32* %dst to i8*
- %tmp2 = load i8* %2
+ %tmp2 = load i8, i8* %2
call void asm "pushfq \0Aandq $2, (%rsp) \0Aorq $3, (%rsp) \0Apopfq \0Aaddb $4, $1 \0Apushfq \0Apopq $0 \0A", "=*&rm,=*&rm,i,r,r,1,~{cc},~{dirflag},~{fpsr},~{flags}"(i64* %newflags, i8* %0, i64 -2, i64 %and, i8 %tmp1, i8 %tmp2) nounwind
ret void
}
diff --git a/llvm/test/CodeGen/X86/inline-asm-stack-realign.ll b/llvm/test/CodeGen/X86/inline-asm-stack-realign.ll
index f2ac0f451bb..cfbe260a33a 100644
--- a/llvm/test/CodeGen/X86/inline-asm-stack-realign.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-stack-realign.ll
@@ -11,6 +11,6 @@ entry:
%r = alloca i32, align 16
store i32 -1, i32* %r, align 16
call void asm sideeffect inteldialect "push esi\0A\09xor esi, esi\0A\09mov dword ptr $0, esi\0A\09pop esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r)
- %0 = load i32* %r, align 16
+ %0 = load i32, i32* %r, align 16
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/inline-asm-stack-realign2.ll b/llvm/test/CodeGen/X86/inline-asm-stack-realign2.ll
index 0e4e7e1a677..3dfae113cf6 100644
--- a/llvm/test/CodeGen/X86/inline-asm-stack-realign2.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-stack-realign2.ll
@@ -11,6 +11,6 @@ entry:
%r = alloca i32, align 16
store i32 -1, i32* %r, align 16
call void asm sideeffect "push %esi\0A\09xor %esi, %esi\0A\09mov %esi, $0\0A\09pop %esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r)
- %0 = load i32* %r, align 16
+ %0 = load i32, i32* %r, align 16
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll b/llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll
index 3baaaaa7d93..be0c6f51112 100644
--- a/llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-stack-realign3.ll
@@ -15,7 +15,7 @@ doit:
br label %skip
skip:
- %0 = load i32* %r, align 128
+ %0 = load i32, i32* %r, align 128
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/inline-asm-tied.ll b/llvm/test/CodeGen/X86/inline-asm-tied.ll
index fb5896b0ad6..9ceb0e8b4b7 100644
--- a/llvm/test/CodeGen/X86/inline-asm-tied.ll
+++ b/llvm/test/CodeGen/X86/inline-asm-tied.ll
@@ -11,12 +11,12 @@ entry:
%retval = alloca i64 ; <i64*> [#uses=2]
%_data.addr = alloca i64 ; <i64*> [#uses=4]
store i64 %_data, i64* %_data.addr
- %tmp = load i64* %_data.addr ; <i64> [#uses=1]
+ %tmp = load i64, i64* %_data.addr ; <i64> [#uses=1]
%0 = call i64 asm "bswap %eax\0A\09bswap %edx\0A\09xchgl %eax, %edx", "=A,0,~{dirflag},~{fpsr},~{flags}"(i64 %tmp) nounwind ; <i64> [#uses=1]
store i64 %0, i64* %_data.addr
- %tmp1 = load i64* %_data.addr ; <i64> [#uses=1]
+ %tmp1 = load i64, i64* %_data.addr ; <i64> [#uses=1]
store i64 %tmp1, i64* %retval
- %1 = load i64* %retval ; <i64> [#uses=1]
+ %1 = load i64, i64* %retval ; <i64> [#uses=1]
ret i64 %1
}
diff --git a/llvm/test/CodeGen/X86/ins_split_regalloc.ll b/llvm/test/CodeGen/X86/ins_split_regalloc.ll
index f5c5254fcec..f04d088ce68 100644
--- a/llvm/test/CodeGen/X86/ins_split_regalloc.ll
+++ b/llvm/test/CodeGen/X86/ins_split_regalloc.ll
@@ -25,7 +25,7 @@ target datalayout = "e-i64:64-f80:128-s:64-n8:16:32:64-S128"
; CHECK: jmpq *[[F_ADDR_TC]]
define void @test(i32 %a, i32 %b, i32 %c) {
entry:
- %fct_f = load void (i32)** @f, align 8
+ %fct_f = load void (i32)*, void (i32)** @f, align 8
tail call void %fct_f(i32 %a)
tail call void %fct_f(i32 %b)
tail call void %fct_f(i32 %c)
diff --git a/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll b/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll
index a74e3f20c41..4a5d8dfaf68 100644
--- a/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll
+++ b/llvm/test/CodeGen/X86/ins_subreg_coalesce-1.ll
@@ -18,7 +18,7 @@ bb22: ; preds = %bb4
bb4.i: ; preds = %bb22
ret i32 0
walkExprTree.exit: ; preds = %bb22
- %tmp83 = load i16* null, align 4 ; <i16> [#uses=1]
+ %tmp83 = load i16, i16* null, align 4 ; <i16> [#uses=1]
%tmp84 = or i16 %tmp83, 2 ; <i16> [#uses=2]
store i16 %tmp84, i16* null, align 4
%tmp98993 = zext i16 %tmp84 to i32 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll b/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
index 48df724bb6b..71890bc23b6 100644
--- a/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
+++ b/llvm/test/CodeGen/X86/ins_subreg_coalesce-3.ll
@@ -35,7 +35,7 @@ bb428: ; preds = %bb366, %bb304
bb433: ; preds = %bb428
ret void
bb650: ; preds = %bb650, %bb428
- %tmp658 = load i8* null, align 8 ; <i8> [#uses=1]
+ %tmp658 = load i8, i8* null, align 8 ; <i8> [#uses=1]
%tmp659 = icmp eq i8 %tmp658, 0 ; <i1> [#uses=1]
br i1 %tmp659, label %bb650, label %bb662
bb662: ; preds = %bb650
@@ -43,7 +43,7 @@ bb662: ; preds = %bb650
bb688: ; preds = %bb662
ret void
bb761: ; preds = %bb662
- %tmp487248736542 = load i32* null, align 4 ; <i32> [#uses=2]
+ %tmp487248736542 = load i32, i32* null, align 4 ; <i32> [#uses=2]
%tmp487648776541 = and i32 %tmp487248736542, 57344 ; <i32> [#uses=1]
%tmp4881 = icmp eq i32 %tmp487648776541, 8192 ; <i1> [#uses=1]
br i1 %tmp4881, label %bb4884, label %bb4897
@@ -54,10 +54,10 @@ bb4884: ; preds = %bb761
bb4897: ; preds = %bb4884, %bb761
ret void
bb4932: ; preds = %bb4884
- %tmp4933 = load i32* null, align 4 ; <i32> [#uses=1]
+ %tmp4933 = load i32, i32* null, align 4 ; <i32> [#uses=1]
br i1 %foo, label %bb5054, label %bb4940
bb4940: ; preds = %bb4932
- %tmp4943 = load i32* null, align 4 ; <i32> [#uses=2]
+ %tmp4943 = load i32, i32* null, align 4 ; <i32> [#uses=2]
switch i32 %tmp4933, label %bb5054 [
i32 159, label %bb4970
i32 160, label %bb5002
@@ -67,10 +67,10 @@ bb4970: ; preds = %bb4940
%tmp49764977 = and i16 %tmp49746536, 4095 ; <i16> [#uses=1]
%mask498049814982 = zext i16 %tmp49764977 to i64 ; <i64> [#uses=1]
%tmp4984 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* null, i64 %mask498049814982, i32 5 ; <%struct.rec**> [#uses=1]
- %tmp4985 = load %struct.rec** %tmp4984, align 8 ; <%struct.rec*> [#uses=1]
+ %tmp4985 = load %struct.rec*, %struct.rec** %tmp4984, align 8 ; <%struct.rec*> [#uses=1]
%tmp4988 = getelementptr %struct.rec, %struct.rec* %tmp4985, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
%tmp4991 = bitcast %struct.THIRD_UNION* %tmp4988 to i32* ; <i32*> [#uses=1]
- %tmp4992 = load i32* %tmp4991, align 8 ; <i32> [#uses=1]
+ %tmp4992 = load i32, i32* %tmp4991, align 8 ; <i32> [#uses=1]
%tmp49924993 = trunc i32 %tmp4992 to i16 ; <i16> [#uses=1]
%tmp4996 = add i16 %tmp49924993, 0 ; <i16> [#uses=1]
br label %bb5054
@@ -79,10 +79,10 @@ bb5002: ; preds = %bb4940
%tmp50085009 = and i16 %tmp50066537, 4095 ; <i16> [#uses=1]
%mask501250135014 = zext i16 %tmp50085009 to i64 ; <i64> [#uses=1]
%tmp5016 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* null, i64 %mask501250135014, i32 5 ; <%struct.rec**> [#uses=1]
- %tmp5017 = load %struct.rec** %tmp5016, align 8 ; <%struct.rec*> [#uses=1]
+ %tmp5017 = load %struct.rec*, %struct.rec** %tmp5016, align 8 ; <%struct.rec*> [#uses=1]
%tmp5020 = getelementptr %struct.rec, %struct.rec* %tmp5017, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
%tmp5023 = bitcast %struct.THIRD_UNION* %tmp5020 to i32* ; <i32*> [#uses=1]
- %tmp5024 = load i32* %tmp5023, align 8 ; <i32> [#uses=1]
+ %tmp5024 = load i32, i32* %tmp5023, align 8 ; <i32> [#uses=1]
%tmp50245025 = trunc i32 %tmp5024 to i16 ; <i16> [#uses=1]
%tmp5028 = sub i16 %tmp50245025, 0 ; <i16> [#uses=1]
br label %bb5054
diff --git a/llvm/test/CodeGen/X86/insertps-O0-bug.ll b/llvm/test/CodeGen/X86/insertps-O0-bug.ll
index e89ac26ea07..73748ee7e52 100644
--- a/llvm/test/CodeGen/X86/insertps-O0-bug.ll
+++ b/llvm/test/CodeGen/X86/insertps-O0-bug.ll
@@ -40,11 +40,11 @@ define <4 x float> @test(<4 x float> %a, <4 x float>* %b) {
; CHECK: insertps $64, [[REG]],
; CHECK: ret
entry:
- %0 = load <4 x float>* %b, align 16
+ %0 = load <4 x float>, <4 x float>* %b, align 16
%1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %0, i32 64)
%2 = alloca <4 x float>, align 16
store <4 x float> %1, <4 x float>* %2, align 16
- %3 = load <4 x float>* %2, align 16
+ %3 = load <4 x float>, <4 x float>* %2, align 16
ret <4 x float> %3
}
diff --git a/llvm/test/CodeGen/X86/invalid-shift-immediate.ll b/llvm/test/CodeGen/X86/invalid-shift-immediate.ll
index 77a9f7eda78..21ad6e8f1a0 100644
--- a/llvm/test/CodeGen/X86/invalid-shift-immediate.ll
+++ b/llvm/test/CodeGen/X86/invalid-shift-immediate.ll
@@ -9,7 +9,7 @@ entry:
%x_addr = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %x, i32* %x_addr
- %tmp = load i32* %x_addr, align 4 ; <i32> [#uses=1]
+ %tmp = load i32, i32* %x_addr, align 4 ; <i32> [#uses=1]
%tmp1 = ashr i32 %tmp, -2 ; <i32> [#uses=1]
%tmp2 = and i32 %tmp1, 1 ; <i32> [#uses=1]
%tmp23 = trunc i32 %tmp2 to i8 ; <i8> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/isel-optnone.ll b/llvm/test/CodeGen/X86/isel-optnone.ll
index 7ca62e4936c..831ad3837d9 100644
--- a/llvm/test/CodeGen/X86/isel-optnone.ll
+++ b/llvm/test/CodeGen/X86/isel-optnone.ll
@@ -2,9 +2,9 @@
define i32* @fooOptnone(i32* %p, i32* %q, i32** %z) #0 {
entry:
- %r = load i32* %p
- %s = load i32* %q
- %y = load i32** %z
+ %r = load i32, i32* %p
+ %s = load i32, i32* %q
+ %y = load i32*, i32** %z
%t0 = add i32 %r, %s
%t1 = add i32 %t0, 1
@@ -21,9 +21,9 @@ entry:
define i32* @fooNormal(i32* %p, i32* %q, i32** %z) #1 {
entry:
- %r = load i32* %p
- %s = load i32* %q
- %y = load i32** %z
+ %r = load i32, i32* %p
+ %s = load i32, i32* %q
+ %y = load i32*, i32** %z
%t0 = add i32 %r, %s
%t1 = add i32 %t0, 1
diff --git a/llvm/test/CodeGen/X86/isel-sink.ll b/llvm/test/CodeGen/X86/isel-sink.ll
index 40d60a4efe0..27abe051a9b 100644
--- a/llvm/test/CodeGen/X86/isel-sink.ll
+++ b/llvm/test/CodeGen/X86/isel-sink.ll
@@ -18,6 +18,6 @@ T:
store i32 4, i32* %P
ret i32 141
F:
- %V = load i32* %P
+ %V = load i32, i32* %P
ret i32 %V
}
diff --git a/llvm/test/CodeGen/X86/isel-sink2.ll b/llvm/test/CodeGen/X86/isel-sink2.ll
index 7cfd73a0951..65f1994b9fe 100644
--- a/llvm/test/CodeGen/X86/isel-sink2.ll
+++ b/llvm/test/CodeGen/X86/isel-sink2.ll
@@ -5,13 +5,13 @@
define i8 @test(i32 *%P) nounwind {
%Q = getelementptr i32, i32* %P, i32 1
%R = bitcast i32* %Q to i8*
- %S = load i8* %R
+ %S = load i8, i8* %R
%T = icmp eq i8 %S, 0
br i1 %T, label %TB, label %F
TB:
ret i8 4
F:
%U = getelementptr i8, i8* %R, i32 3
- %V = load i8* %U
+ %V = load i8, i8* %U
ret i8 %V
}
diff --git a/llvm/test/CodeGen/X86/isel-sink3.ll b/llvm/test/CodeGen/X86/isel-sink3.ll
index 15af197f74f..fa633dc25ae 100644
--- a/llvm/test/CodeGen/X86/isel-sink3.ll
+++ b/llvm/test/CodeGen/X86/isel-sink3.ll
@@ -11,7 +11,7 @@ target triple = "i386-apple-darwin7"
define i32 @bar(i32** %P) nounwind {
entry:
- %0 = load i32** %P, align 4 ; <i32*> [#uses=2]
+ %0 = load i32*, i32** %P, align 4 ; <i32*> [#uses=2]
%1 = getelementptr i32, i32* %0, i32 1 ; <i32*> [#uses=1]
%2 = icmp ugt i32* %1, inttoptr (i64 1233 to i32*) ; <i1> [#uses=1]
br i1 %2, label %bb1, label %bb
@@ -22,6 +22,6 @@ bb: ; preds = %entry
bb1: ; preds = %entry, %bb
%3 = getelementptr i32, i32* %1, i32 1 ; <i32*> [#uses=1]
- %4 = load i32* %3, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* %3, align 4 ; <i32> [#uses=1]
ret i32 %4
}
diff --git a/llvm/test/CodeGen/X86/jump_sign.ll b/llvm/test/CodeGen/X86/jump_sign.ll
index 7bdbb7b60b7..440f1cc9b49 100644
--- a/llvm/test/CodeGen/X86/jump_sign.ll
+++ b/llvm/test/CodeGen/X86/jump_sign.ll
@@ -164,7 +164,7 @@ entry:
; PR://13046
define void @func_o() nounwind uwtable {
entry:
- %0 = load i16* undef, align 2
+ %0 = load i16, i16* undef, align 2
br i1 undef, label %if.then.i, label %if.end.i
if.then.i: ; preds = %entry
@@ -238,7 +238,7 @@ entry:
; CHECK: j
; CHECK-NOT: sub
; CHECK: ret
- %0 = load i32* %offset, align 8
+ %0 = load i32, i32* %offset, align 8
%cmp = icmp slt i32 %0, %size
br i1 %cmp, label %return, label %if.end
@@ -287,10 +287,10 @@ entry:
; CHECK: andb
; CHECK: j
; CHECK: ret
- %0 = load i32* @b, align 4
+ %0 = load i32, i32* @b, align 4
%cmp = icmp ult i32 %0, %p1
%conv = zext i1 %cmp to i32
- %1 = load i32* @a, align 4
+ %1 = load i32, i32* @a, align 4
%and = and i32 %conv, %1
%conv1 = trunc i32 %and to i8
%2 = urem i8 %conv1, 3
diff --git a/llvm/test/CodeGen/X86/large-constants.ll b/llvm/test/CodeGen/X86/large-constants.ll
index 157ecc4af66..945d633eec1 100644
--- a/llvm/test/CodeGen/X86/large-constants.ll
+++ b/llvm/test/CodeGen/X86/large-constants.ll
@@ -40,10 +40,10 @@ fail:
define void @constant_expressions() {
entry:
- %0 = load i64* inttoptr (i64 add (i64 51250129900, i64 0) to i64*)
- %1 = load i64* inttoptr (i64 add (i64 51250129900, i64 8) to i64*)
- %2 = load i64* inttoptr (i64 add (i64 51250129900, i64 16) to i64*)
- %3 = load i64* inttoptr (i64 add (i64 51250129900, i64 24) to i64*)
+ %0 = load i64, i64* inttoptr (i64 add (i64 51250129900, i64 0) to i64*)
+ %1 = load i64, i64* inttoptr (i64 add (i64 51250129900, i64 8) to i64*)
+ %2 = load i64, i64* inttoptr (i64 add (i64 51250129900, i64 16) to i64*)
+ %3 = load i64, i64* inttoptr (i64 add (i64 51250129900, i64 24) to i64*)
%4 = add i64 %0, %1
%5 = add i64 %2, %3
%6 = add i64 %4, %5
@@ -54,10 +54,10 @@ entry:
define void @constant_expressions2() {
entry:
- %0 = load i64* inttoptr (i64 51250129900 to i64*)
- %1 = load i64* inttoptr (i64 51250129908 to i64*)
- %2 = load i64* inttoptr (i64 51250129916 to i64*)
- %3 = load i64* inttoptr (i64 51250129924 to i64*)
+ %0 = load i64, i64* inttoptr (i64 51250129900 to i64*)
+ %1 = load i64, i64* inttoptr (i64 51250129908 to i64*)
+ %2 = load i64, i64* inttoptr (i64 51250129916 to i64*)
+ %3 = load i64, i64* inttoptr (i64 51250129924 to i64*)
%4 = add i64 %0, %1
%5 = add i64 %2, %3
%6 = add i64 %4, %5
diff --git a/llvm/test/CodeGen/X86/ldzero.ll b/llvm/test/CodeGen/X86/ldzero.ll
index dab04bc353c..3befa8a99b3 100644
--- a/llvm/test/CodeGen/X86/ldzero.ll
+++ b/llvm/test/CodeGen/X86/ldzero.ll
@@ -11,15 +11,15 @@ entry:
%d = alloca double, align 8 ; <double*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store double 0.000000e+00, double* %d, align 8
- %tmp1 = load double* %d, align 8 ; <double> [#uses=1]
+ %tmp1 = load double, double* %d, align 8 ; <double> [#uses=1]
%tmp12 = fpext double %tmp1 to x86_fp80 ; <x86_fp80> [#uses=1]
store x86_fp80 %tmp12, x86_fp80* %tmp, align 16
- %tmp3 = load x86_fp80* %tmp, align 16 ; <x86_fp80> [#uses=1]
+ %tmp3 = load x86_fp80, x86_fp80* %tmp, align 16 ; <x86_fp80> [#uses=1]
store x86_fp80 %tmp3, x86_fp80* %retval, align 16
br label %return
return: ; preds = %entry
- %retval4 = load x86_fp80* %retval ; <x86_fp80> [#uses=1]
+ %retval4 = load x86_fp80, x86_fp80* %retval ; <x86_fp80> [#uses=1]
ret x86_fp80 %retval4
}
@@ -30,14 +30,14 @@ entry:
%ld = alloca x86_fp80, align 16 ; <x86_fp80*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store x86_fp80 0xK00000000000000000000, x86_fp80* %ld, align 16
- %tmp1 = load x86_fp80* %ld, align 16 ; <x86_fp80> [#uses=1]
+ %tmp1 = load x86_fp80, x86_fp80* %ld, align 16 ; <x86_fp80> [#uses=1]
%tmp12 = fptrunc x86_fp80 %tmp1 to double ; <double> [#uses=1]
store double %tmp12, double* %tmp, align 8
- %tmp3 = load double* %tmp, align 8 ; <double> [#uses=1]
+ %tmp3 = load double, double* %tmp, align 8 ; <double> [#uses=1]
store double %tmp3, double* %retval, align 8
br label %return
return: ; preds = %entry
- %retval4 = load double* %retval ; <double> [#uses=1]
+ %retval4 = load double, double* %retval ; <double> [#uses=1]
ret double %retval4
}
diff --git a/llvm/test/CodeGen/X86/lea-5.ll b/llvm/test/CodeGen/X86/lea-5.ll
index 3df65c6f2a7..b89c199e719 100644
--- a/llvm/test/CodeGen/X86/lea-5.ll
+++ b/llvm/test/CodeGen/X86/lea-5.ll
@@ -18,7 +18,7 @@ while.cond: ; preds = %while.cond, %entry
; CHECK: leaq -40(%rsp,%r{{[^,]*}},4), %rax
; X32: leal -40(%rsp,%r{{[^,]*}},4), %eax
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp eq i32 %0, 0
%inc = add nsw i32 %d.addr.0, 1
@@ -45,7 +45,7 @@ while.cond: ; preds = %while.cond, %entry
; CHECK: leaq (%rsp,%r{{[^,]*}},4), %rax
; X32: leal (%rsp,%r{{[^,]*}},4), %eax
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp eq i32 %0, 0
%inc = add nsw i32 %d.addr.0, 1
diff --git a/llvm/test/CodeGen/X86/lea-recursion.ll b/llvm/test/CodeGen/X86/lea-recursion.ll
index 9480600312c..5d939597ec5 100644
--- a/llvm/test/CodeGen/X86/lea-recursion.ll
+++ b/llvm/test/CodeGen/X86/lea-recursion.ll
@@ -13,32 +13,32 @@
define void @foo() {
entry:
- %tmp4 = load i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 0) ; <i32> [#uses=1]
- %tmp8 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 0) ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 0) ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 0) ; <i32> [#uses=1]
%tmp9 = add i32 %tmp4, 1 ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2]
store i32 %tmp10, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 1)
- %tmp8.1 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 1) ; <i32> [#uses=1]
+ %tmp8.1 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 1) ; <i32> [#uses=1]
%tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1]
%tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2]
store i32 %tmp10.1, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 2)
- %tmp8.2 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 2) ; <i32> [#uses=1]
+ %tmp8.2 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 2) ; <i32> [#uses=1]
%tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1]
%tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2]
store i32 %tmp10.2, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 3)
- %tmp8.3 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 3) ; <i32> [#uses=1]
+ %tmp8.3 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 3) ; <i32> [#uses=1]
%tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1]
%tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2]
store i32 %tmp10.3, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 4)
- %tmp8.4 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 4) ; <i32> [#uses=1]
+ %tmp8.4 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 4) ; <i32> [#uses=1]
%tmp9.4 = add i32 %tmp10.3, 1 ; <i32> [#uses=1]
%tmp10.4 = add i32 %tmp9.4, %tmp8.4 ; <i32> [#uses=2]
store i32 %tmp10.4, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 5)
- %tmp8.5 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 5) ; <i32> [#uses=1]
+ %tmp8.5 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 5) ; <i32> [#uses=1]
%tmp9.5 = add i32 %tmp10.4, 1 ; <i32> [#uses=1]
%tmp10.5 = add i32 %tmp9.5, %tmp8.5 ; <i32> [#uses=2]
store i32 %tmp10.5, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 6)
- %tmp8.6 = load i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 6) ; <i32> [#uses=1]
+ %tmp8.6 = load i32, i32* getelementptr ([1000 x i32]* @g1, i32 0, i32 6) ; <i32> [#uses=1]
%tmp9.6 = add i32 %tmp10.5, 1 ; <i32> [#uses=1]
%tmp10.6 = add i32 %tmp9.6, %tmp8.6 ; <i32> [#uses=1]
store i32 %tmp10.6, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 7)
diff --git a/llvm/test/CodeGen/X86/legalize-shift-64.ll b/llvm/test/CodeGen/X86/legalize-shift-64.ll
index 64460bb9118..fb8f7b6a602 100644
--- a/llvm/test/CodeGen/X86/legalize-shift-64.ll
+++ b/llvm/test/CodeGen/X86/legalize-shift-64.ll
@@ -71,7 +71,7 @@ define i32 @test6() {
%t = alloca i64, align 8
store i32 1, i32* %x, align 4
store i64 1, i64* %t, align 8 ;; DEAD
- %load = load i32* %x, align 4
+ %load = load i32, i32* %x, align 4
%shl = shl i32 %load, 8
%add = add i32 %shl, -224
%sh_prom = zext i32 %add to i64
diff --git a/llvm/test/CodeGen/X86/licm-nested.ll b/llvm/test/CodeGen/X86/licm-nested.ll
index 2c71cbbd0e7..d8af64f1a6b 100644
--- a/llvm/test/CodeGen/X86/licm-nested.ll
+++ b/llvm/test/CodeGen/X86/licm-nested.ll
@@ -14,7 +14,7 @@ entry:
while.cond.preheader: ; preds = %entry
%arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1 ; <i8**> [#uses=1]
- %tmp2 = load i8** %arrayidx ; <i8*> [#uses=1]
+ %tmp2 = load i8*, i8** %arrayidx ; <i8*> [#uses=1]
%call = tail call i32 @atoi(i8* %tmp2) nounwind ; <i32> [#uses=2]
%tobool51 = icmp eq i32 %call, 0 ; <i1> [#uses=1]
br i1 %tobool51, label %while.end, label %bb.nph53
@@ -50,7 +50,7 @@ for.body15: ; preds = %for.body, %for.inc3
%tmp73 = shl i64 %indvar57, 1 ; <i64> [#uses=1]
%add = add i64 %tmp73, 4 ; <i64> [#uses=2]
%arrayidx17 = getelementptr [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %tmp68 ; <i8*> [#uses=1]
- %tmp18 = load i8* %arrayidx17 ; <i8> [#uses=1]
+ %tmp18 = load i8, i8* %arrayidx17 ; <i8> [#uses=1]
%tobool19 = icmp eq i8 %tmp18, 0 ; <i1> [#uses=1]
br i1 %tobool19, label %for.inc35, label %if.then
diff --git a/llvm/test/CodeGen/X86/liveness-local-regalloc.ll b/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
index 36050f7eafa..0954f9d5dd4 100644
--- a/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
+++ b/llvm/test/CodeGen/X86/liveness-local-regalloc.ll
@@ -71,7 +71,7 @@ BB:
%A2 = alloca <2 x i8>
%A1 = alloca i1
%A = alloca i32
- %L = load i8* %0
+ %L = load i8, i8* %0
store i8 -37, i8* %0
%E = extractelement <4 x i64> zeroinitializer, i32 2
%Shuff = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 5, i32 7, i32 1, i32 3>
diff --git a/llvm/test/CodeGen/X86/load-slice.ll b/llvm/test/CodeGen/X86/load-slice.ll
index 464b1917ae0..7f3dd63d2ea 100644
--- a/llvm/test/CodeGen/X86/load-slice.ll
+++ b/llvm/test/CodeGen/X86/load-slice.ll
@@ -48,7 +48,7 @@ define void @t1(%class.Complex* nocapture %out, i64 %out_start) {
entry:
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%tmp = bitcast %class.Complex* %arrayidx to i64*
- %tmp1 = load i64* %tmp, align 8
+ %tmp1 = load i64, i64* %tmp, align 8
%t0.sroa.0.0.extract.trunc = trunc i64 %tmp1 to i32
%tmp2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float
%t0.sroa.2.0.extract.shift = lshr i64 %tmp1, 32
@@ -57,11 +57,11 @@ entry:
%add = add i64 %out_start, 8
%arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add
%i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0
- %tmp4 = load float* %i.i, align 4
+ %tmp4 = load float, float* %i.i, align 4
%add.i = fadd float %tmp4, %tmp2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
%r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1
- %tmp5 = load float* %r.i, align 4
+ %tmp5 = load float, float* %r.i, align 4
%add5.i = fadd float %tmp5, %tmp3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
%ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>*
@@ -102,7 +102,7 @@ declare void @llvm.lifetime.end(i64, i8* nocapture)
define i32 @t2(%class.Complex* nocapture %out, i64 %out_start) {
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%bitcast = bitcast %class.Complex* %arrayidx to i64*
- %chunk64 = load i64* %bitcast, align 8
+ %chunk64 = load i64, i64* %bitcast, align 8
%slice32_low = trunc i64 %chunk64 to i32
%shift48 = lshr i64 %chunk64, 48
%slice32_high = trunc i64 %shift48 to i32
@@ -127,7 +127,7 @@ define i32 @t2(%class.Complex* nocapture %out, i64 %out_start) {
define i32 @t3(%class.Complex* nocapture %out, i64 %out_start) {
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%bitcast = bitcast %class.Complex* %arrayidx to i64*
- %chunk64 = load i64* %bitcast, align 8
+ %chunk64 = load i64, i64* %bitcast, align 8
%slice32_low = trunc i64 %chunk64 to i32
%shift48 = lshr i64 %chunk64, 48
%slice32_high = trunc i64 %shift48 to i32
diff --git a/llvm/test/CodeGen/X86/longlong-deadload.ll b/llvm/test/CodeGen/X86/longlong-deadload.ll
index 73e10127c06..3adaf49e372 100644
--- a/llvm/test/CodeGen/X86/longlong-deadload.ll
+++ b/llvm/test/CodeGen/X86/longlong-deadload.ll
@@ -6,7 +6,7 @@ define void @test(i64* %P) nounwind {
; CHECK: movl 4(%esp), %[[REGISTER:.*]]
; CHECK-NOT: 4(%[[REGISTER]])
; CHECK: ret
- %tmp1 = load i64* %P, align 8 ; <i64> [#uses=1]
+ %tmp1 = load i64, i64* %P, align 8 ; <i64> [#uses=1]
%tmp2 = xor i64 %tmp1, 1 ; <i64> [#uses=1]
store i64 %tmp2, i64* %P, align 8
ret void
diff --git a/llvm/test/CodeGen/X86/loop-strength-reduce4.ll b/llvm/test/CodeGen/X86/loop-strength-reduce4.ll
index 681051eef05..786534b00d3 100644
--- a/llvm/test/CodeGen/X86/loop-strength-reduce4.ll
+++ b/llvm/test/CodeGen/X86/loop-strength-reduce4.ll
@@ -27,30 +27,30 @@ bb: ; preds = %bb, %entry
%t.063.0 = phi i32 [ 0, %entry ], [ %tmp47, %bb ] ; <i32> [#uses=1]
%j.065.0 = shl i32 %indvar, 2 ; <i32> [#uses=4]
%tmp3 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %j.065.0 ; <i32*> [#uses=2]
- %tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp3, align 4 ; <i32> [#uses=1]
%tmp6 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %t.063.0 ; <i32*> [#uses=1]
- %tmp7 = load i32* %tmp6, align 4 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %tmp6, align 4 ; <i32> [#uses=1]
%tmp8 = xor i32 %tmp7, %tmp4 ; <i32> [#uses=2]
store i32 %tmp8, i32* %tmp3, align 4
%tmp1378 = or i32 %j.065.0, 1 ; <i32> [#uses=1]
%tmp16 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp1378 ; <i32*> [#uses=2]
- %tmp17 = load i32* %tmp16, align 4 ; <i32> [#uses=1]
+ %tmp17 = load i32, i32* %tmp16, align 4 ; <i32> [#uses=1]
%tmp19 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp8 ; <i32*> [#uses=1]
- %tmp20 = load i32* %tmp19, align 4 ; <i32> [#uses=1]
+ %tmp20 = load i32, i32* %tmp19, align 4 ; <i32> [#uses=1]
%tmp21 = xor i32 %tmp20, %tmp17 ; <i32> [#uses=2]
store i32 %tmp21, i32* %tmp16, align 4
%tmp2680 = or i32 %j.065.0, 2 ; <i32> [#uses=1]
%tmp29 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp2680 ; <i32*> [#uses=2]
- %tmp30 = load i32* %tmp29, align 4 ; <i32> [#uses=1]
+ %tmp30 = load i32, i32* %tmp29, align 4 ; <i32> [#uses=1]
%tmp32 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp21 ; <i32*> [#uses=1]
- %tmp33 = load i32* %tmp32, align 4 ; <i32> [#uses=1]
+ %tmp33 = load i32, i32* %tmp32, align 4 ; <i32> [#uses=1]
%tmp34 = xor i32 %tmp33, %tmp30 ; <i32> [#uses=2]
store i32 %tmp34, i32* %tmp29, align 4
%tmp3982 = or i32 %j.065.0, 3 ; <i32> [#uses=1]
%tmp42 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp3982 ; <i32*> [#uses=2]
- %tmp43 = load i32* %tmp42, align 4 ; <i32> [#uses=1]
+ %tmp43 = load i32, i32* %tmp42, align 4 ; <i32> [#uses=1]
%tmp45 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp34 ; <i32*> [#uses=1]
- %tmp46 = load i32* %tmp45, align 4 ; <i32> [#uses=1]
+ %tmp46 = load i32, i32* %tmp45, align 4 ; <i32> [#uses=1]
%tmp47 = xor i32 %tmp46, %tmp43 ; <i32> [#uses=3]
store i32 %tmp47, i32* %tmp42, align 4
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/loop-strength-reduce7.ll b/llvm/test/CodeGen/X86/loop-strength-reduce7.ll
index eb5a4b37bd2..92ec485e775 100644
--- a/llvm/test/CodeGen/X86/loop-strength-reduce7.ll
+++ b/llvm/test/CodeGen/X86/loop-strength-reduce7.ll
@@ -28,7 +28,7 @@ bb29.i38: ; preds = %bb33.i47, %bb28.i37
%indvar32.i = phi i32 [ %indvar.next33.i, %bb33.i47 ], [ 0, %bb28.i37 ] ; <i32> [#uses=2]
%sfb.314.i = add i32 %indvar32.i, 0 ; <i32> [#uses=3]
%1 = getelementptr [4 x [21 x double]], [4 x [21 x double]]* null, i32 0, i32 %0, i32 %sfb.314.i ; <double*> [#uses=1]
- %2 = load double* %1, align 8 ; <double> [#uses=0]
+ %2 = load double, double* %1, align 8 ; <double> [#uses=0]
br i1 false, label %bb30.i41, label %bb33.i47
bb30.i41: ; preds = %bb29.i38
diff --git a/llvm/test/CodeGen/X86/loop-strength-reduce8.ll b/llvm/test/CodeGen/X86/loop-strength-reduce8.ll
index 8e393aca9b1..716e1478c9a 100644
--- a/llvm/test/CodeGen/X86/loop-strength-reduce8.ll
+++ b/llvm/test/CodeGen/X86/loop-strength-reduce8.ll
@@ -54,8 +54,8 @@ entry:
call void @llvm.va_start(i8* %p1)
%0 = call fastcc %struct.tree_node* @make_node(i32 %code) nounwind ; <%struct.tree_node*> [#uses=2]
%1 = getelementptr [256 x i32], [256 x i32]* @tree_code_length, i32 0, i32 %code ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=2]
- %3 = load i32* @lineno, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 4 ; <i32> [#uses=2]
+ %3 = load i32, i32* @lineno, align 4 ; <i32> [#uses=1]
%4 = bitcast %struct.tree_node* %0 to %struct.tree_exp* ; <%struct.tree_exp*> [#uses=2]
%5 = getelementptr %struct.tree_exp, %struct.tree_exp* %4, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %3, i32* %5, align 4
@@ -64,11 +64,11 @@ entry:
bb: ; preds = %bb, %entry
%i.01 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ] ; <i32> [#uses=2]
- %7 = load i8** %p, align 4 ; <i8*> [#uses=2]
+ %7 = load i8*, i8** %p, align 4 ; <i8*> [#uses=2]
%8 = getelementptr i8, i8* %7, i32 4 ; <i8*> [#uses=1]
store i8* %8, i8** %p, align 4
%9 = bitcast i8* %7 to %struct.tree_node** ; <%struct.tree_node**> [#uses=1]
- %10 = load %struct.tree_node** %9, align 4 ; <%struct.tree_node*> [#uses=1]
+ %10 = load %struct.tree_node*, %struct.tree_node** %9, align 4 ; <%struct.tree_node*> [#uses=1]
%11 = getelementptr %struct.tree_exp, %struct.tree_exp* %4, i32 0, i32 2, i32 %i.01 ; <%struct.tree_node**> [#uses=1]
store %struct.tree_node* %10, %struct.tree_node** %11, align 4
%indvar.next = add i32 %i.01, 1 ; <i32> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/lsr-delayed-fold.ll b/llvm/test/CodeGen/X86/lsr-delayed-fold.ll
index 813591419cb..eaa52dec283 100644
--- a/llvm/test/CodeGen/X86/lsr-delayed-fold.ll
+++ b/llvm/test/CodeGen/X86/lsr-delayed-fold.ll
@@ -42,7 +42,7 @@ for.cond: ; preds = %lbl_264, %for.inc,
lbl_264: ; preds = %if.end, %lbl_264.preheader
%g_263.tmp.0 = phi i8 [ %g_263.tmp.1, %for.cond ] ; <i8> [#uses=1]
- %tmp7 = load i16* undef ; <i16> [#uses=1]
+ %tmp7 = load i16, i16* undef ; <i16> [#uses=1]
%conv8 = trunc i16 %tmp7 to i8 ; <i8> [#uses=1]
%mul.i = mul i8 %p_95.addr.0, %p_95.addr.0 ; <i8> [#uses=1]
%mul.i18 = mul i8 %mul.i, %conv8 ; <i8> [#uses=1]
@@ -99,7 +99,7 @@ lor.lhs.false: ; preds = %for.body
%add112 = trunc i64 %tmp45 to i32 ; <i32> [#uses=1]
%add118 = trunc i64 %tmp47 to i32 ; <i32> [#uses=1]
%tmp10 = getelementptr %struct.Bu, %struct.Bu* %bu, i64 %indvar, i32 2 ; <i32*> [#uses=1]
- %tmp11 = load i32* %tmp10 ; <i32> [#uses=0]
+ %tmp11 = load i32, i32* %tmp10 ; <i32> [#uses=0]
tail call void undef(i32 %add22)
tail call void undef(i32 %add28)
tail call void undef(i32 %add34)
diff --git a/llvm/test/CodeGen/X86/lsr-i386.ll b/llvm/test/CodeGen/X86/lsr-i386.ll
index 5829fe239c1..85e3d3a1983 100644
--- a/llvm/test/CodeGen/X86/lsr-i386.ll
+++ b/llvm/test/CodeGen/X86/lsr-i386.ll
@@ -22,7 +22,7 @@ entry:
bb1: ; preds = %bb6, %bb
%indvar11 = phi i32 [ %indvar.next12, %bb6 ], [ 0, %entry ] ; <i32> [#uses=2]
%tmp21 = add i32 %indvar11, 1 ; <i32> [#uses=1]
- %t = load i32* getelementptr inbounds (%struct.anon* @mp2grad_, i32 0, i32 1)
+ %t = load i32, i32* getelementptr inbounds (%struct.anon* @mp2grad_, i32 0, i32 1)
%tmp15 = mul i32 %n, %t ; <i32> [#uses=1]
%tmp16 = add i32 %tmp21, %tmp15 ; <i32> [#uses=1]
%tmp17 = shl i32 %tmp16, 3 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
index d23414e3f91..2e3929be31c 100644
--- a/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
+++ b/llvm/test/CodeGen/X86/lsr-loop-exit-cond.ll
@@ -17,9 +17,9 @@
define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind {
entry:
- %0 = load i32* %rk, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %rk, align 4 ; <i32> [#uses=1]
%1 = getelementptr i32, i32* %rk, i64 1 ; <i32*> [#uses=1]
- %2 = load i32* %1, align 4 ; <i32> [#uses=1]
+ %2 = load i32, i32* %1, align 4 ; <i32> [#uses=1]
%tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
%tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
br label %bb
@@ -33,36 +33,36 @@ bb: ; preds = %bb1, %entry
%3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
%4 = zext i32 %3 to i64 ; <i64> [#uses=1]
%5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
- %6 = load i32* %5, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* %5, align 4 ; <i32> [#uses=1]
%7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
%8 = and i32 %7, 255 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
%10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
- %11 = load i32* %10, align 4 ; <i32> [#uses=1]
+ %11 = load i32, i32* %10, align 4 ; <i32> [#uses=1]
%ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
%12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
%13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
- %14 = load i32* %13, align 4 ; <i32> [#uses=1]
+ %14 = load i32, i32* %13, align 4 ; <i32> [#uses=1]
%15 = xor i32 %11, %6 ; <i32> [#uses=1]
%16 = xor i32 %15, %14 ; <i32> [#uses=3]
%17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
%18 = zext i32 %17 to i64 ; <i64> [#uses=1]
%19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
- %20 = load i32* %19, align 4 ; <i32> [#uses=1]
+ %20 = load i32, i32* %19, align 4 ; <i32> [#uses=1]
%21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
%22 = zext i32 %21 to i64 ; <i64> [#uses=1]
%23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
- %24 = load i32* %23, align 4 ; <i32> [#uses=1]
+ %24 = load i32, i32* %23, align 4 ; <i32> [#uses=1]
%ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
%25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
%26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
- %27 = load i32* %26, align 4 ; <i32> [#uses=1]
+ %27 = load i32, i32* %26, align 4 ; <i32> [#uses=1]
%28 = xor i32 %24, %20 ; <i32> [#uses=1]
%29 = xor i32 %28, %27 ; <i32> [#uses=4]
%30 = lshr i32 %16, 24 ; <i32> [#uses=1]
%31 = zext i32 %30 to i64 ; <i64> [#uses=1]
%32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
- %33 = load i32* %32, align 4 ; <i32> [#uses=2]
+ %33 = load i32, i32* %32, align 4 ; <i32> [#uses=2]
%exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
br i1 %exitcond, label %bb2, label %bb1
@@ -74,22 +74,22 @@ bb1: ; preds = %bb
%37 = and i32 %36, 255 ; <i32> [#uses=1]
%38 = zext i32 %37 to i64 ; <i64> [#uses=1]
%39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
- %40 = load i32* %39, align 4 ; <i32> [#uses=1]
- %41 = load i32* %35, align 4 ; <i32> [#uses=1]
+ %40 = load i32, i32* %39, align 4 ; <i32> [#uses=1]
+ %41 = load i32, i32* %35, align 4 ; <i32> [#uses=1]
%42 = xor i32 %40, %33 ; <i32> [#uses=1]
%43 = xor i32 %42, %41 ; <i32> [#uses=1]
%44 = lshr i32 %29, 24 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
%46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
- %47 = load i32* %46, align 4 ; <i32> [#uses=1]
+ %47 = load i32, i32* %46, align 4 ; <i32> [#uses=1]
%48 = and i32 %16, 255 ; <i32> [#uses=1]
%49 = zext i32 %48 to i64 ; <i64> [#uses=1]
%50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
- %51 = load i32* %50, align 4 ; <i32> [#uses=1]
+ %51 = load i32, i32* %50, align 4 ; <i32> [#uses=1]
%ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
%52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
%53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
- %54 = load i32* %53, align 4 ; <i32> [#uses=1]
+ %54 = load i32, i32* %53, align 4 ; <i32> [#uses=1]
%55 = xor i32 %51, %47 ; <i32> [#uses=1]
%56 = xor i32 %55, %54 ; <i32> [#uses=1]
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=1]
@@ -105,26 +105,26 @@ bb2: ; preds = %bb
%60 = and i32 %59, 255 ; <i32> [#uses=1]
%61 = zext i32 %60 to i64 ; <i64> [#uses=1]
%62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
- %63 = load i32* %62, align 4 ; <i32> [#uses=1]
+ %63 = load i32, i32* %62, align 4 ; <i32> [#uses=1]
%64 = and i32 %63, 16711680 ; <i32> [#uses=1]
%65 = or i32 %64, %58 ; <i32> [#uses=1]
- %66 = load i32* %57, align 4 ; <i32> [#uses=1]
+ %66 = load i32, i32* %57, align 4 ; <i32> [#uses=1]
%67 = xor i32 %65, %66 ; <i32> [#uses=2]
%68 = lshr i32 %29, 8 ; <i32> [#uses=1]
%69 = zext i32 %68 to i64 ; <i64> [#uses=1]
%70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
- %71 = load i32* %70, align 4 ; <i32> [#uses=1]
+ %71 = load i32, i32* %70, align 4 ; <i32> [#uses=1]
%72 = and i32 %71, -16777216 ; <i32> [#uses=1]
%73 = and i32 %16, 255 ; <i32> [#uses=1]
%74 = zext i32 %73 to i64 ; <i64> [#uses=1]
%75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
- %76 = load i32* %75, align 4 ; <i32> [#uses=1]
+ %76 = load i32, i32* %75, align 4 ; <i32> [#uses=1]
%77 = and i32 %76, 16711680 ; <i32> [#uses=1]
%78 = or i32 %77, %72 ; <i32> [#uses=1]
%ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
%79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
%80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
- %81 = load i32* %80, align 4 ; <i32> [#uses=1]
+ %81 = load i32, i32* %80, align 4 ; <i32> [#uses=1]
%82 = xor i32 %78, %81 ; <i32> [#uses=2]
%83 = lshr i32 %67, 24 ; <i32> [#uses=1]
%84 = trunc i32 %83 to i8 ; <i8> [#uses=1]
@@ -176,7 +176,7 @@ for.body: ; preds = %for.body.lr.ph, %fo
%bi.06 = phi i32 [ 0, %for.body.lr.ph ], [ %i.addr.0.bi.0, %for.body ]
%b.05 = phi i32 [ 0, %for.body.lr.ph ], [ %.b.0, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%cmp1 = icmp ugt i32 %1, %b.05
%.b.0 = select i1 %cmp1, i32 %1, i32 %b.05
%2 = trunc i64 %indvars.iv to i32
diff --git a/llvm/test/CodeGen/X86/lsr-normalization.ll b/llvm/test/CodeGen/X86/lsr-normalization.ll
index 4e7cebe350d..a883338d7f3 100644
--- a/llvm/test/CodeGen/X86/lsr-normalization.ll
+++ b/llvm/test/CodeGen/X86/lsr-normalization.ll
@@ -39,7 +39,7 @@ bb8: ; preds = %bb
bb10: ; preds = %bb8, %bb
%tmp11 = bitcast i8* %tmp5 to %0* ; <%0*> [#uses=1]
call void @_ZNSt15_List_node_base4hookEPS_(%0* %tmp11, %0* %tmp) nounwind
- %tmp12 = load %0** %tmp3 ; <%0*> [#uses=3]
+ %tmp12 = load %0*, %0** %tmp3 ; <%0*> [#uses=3]
%tmp13 = icmp eq %0* %tmp12, %tmp ; <i1> [#uses=1]
br i1 %tmp13, label %bb14, label %bb16
@@ -51,7 +51,7 @@ bb16: ; preds = %bb16, %bb10
%tmp17 = phi i64 [ %tmp22, %bb16 ], [ 0, %bb10 ] ; <i64> [#uses=1]
%tmp18 = phi %0* [ %tmp20, %bb16 ], [ %tmp12, %bb10 ] ; <%0*> [#uses=1]
%tmp19 = getelementptr inbounds %0, %0* %tmp18, i64 0, i32 0 ; <%0**> [#uses=1]
- %tmp20 = load %0** %tmp19 ; <%0*> [#uses=2]
+ %tmp20 = load %0*, %0** %tmp19 ; <%0*> [#uses=2]
%tmp21 = icmp eq %0* %tmp20, %tmp ; <i1> [#uses=1]
%tmp22 = add i64 %tmp17, 1 ; <i64> [#uses=2]
br i1 %tmp21, label %bb23, label %bb16
@@ -64,7 +64,7 @@ bb25: ; preds = %bb25, %bb23
%tmp26 = phi i64 [ %tmp31, %bb25 ], [ 0, %bb23 ] ; <i64> [#uses=1]
%tmp27 = phi %0* [ %tmp29, %bb25 ], [ %tmp12, %bb23 ] ; <%0*> [#uses=1]
%tmp28 = getelementptr inbounds %0, %0* %tmp27, i64 0, i32 0 ; <%0**> [#uses=1]
- %tmp29 = load %0** %tmp28 ; <%0*> [#uses=2]
+ %tmp29 = load %0*, %0** %tmp28 ; <%0*> [#uses=2]
%tmp30 = icmp eq %0* %tmp29, %tmp ; <i1> [#uses=1]
%tmp31 = add i64 %tmp26, 1 ; <i64> [#uses=2]
br i1 %tmp30, label %bb32, label %bb25
@@ -75,14 +75,14 @@ bb32: ; preds = %bb25
br label %bb35
bb35: ; preds = %bb32, %bb14
- %tmp36 = load %0** %tmp3 ; <%0*> [#uses=2]
+ %tmp36 = load %0*, %0** %tmp3 ; <%0*> [#uses=2]
%tmp37 = icmp eq %0* %tmp36, %tmp ; <i1> [#uses=1]
br i1 %tmp37, label %bb44, label %bb38
bb38: ; preds = %bb38, %bb35
%tmp39 = phi %0* [ %tmp41, %bb38 ], [ %tmp36, %bb35 ] ; <%0*> [#uses=2]
%tmp40 = getelementptr inbounds %0, %0* %tmp39, i64 0, i32 0 ; <%0**> [#uses=1]
- %tmp41 = load %0** %tmp40 ; <%0*> [#uses=2]
+ %tmp41 = load %0*, %0** %tmp40 ; <%0*> [#uses=2]
%tmp42 = bitcast %0* %tmp39 to i8* ; <i8*> [#uses=1]
call void @_ZdlPv(i8* %tmp42) nounwind
%tmp43 = icmp eq %0* %tmp41, %tmp ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/lsr-redundant-addressing.ll b/llvm/test/CodeGen/X86/lsr-redundant-addressing.ll
index bd83efdf2d9..31a1859e3b2 100644
--- a/llvm/test/CodeGen/X86/lsr-redundant-addressing.ll
+++ b/llvm/test/CodeGen/X86/lsr-redundant-addressing.ll
@@ -23,7 +23,7 @@ bb38: ; preds = %bb200, %bb
%tmp39 = phi i64 [ %tmp201, %bb200 ], [ 0, %bb ]
%tmp40 = sub i64 0, %tmp39
%tmp47 = getelementptr [5 x %0], [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 0
- %tmp34 = load i32* %tmp47, align 16
+ %tmp34 = load i32, i32* %tmp47, align 16
%tmp203 = icmp slt i32 %tmp34, 12
br i1 %tmp203, label %bb215, label %bb200
@@ -39,13 +39,13 @@ bb215: ; preds = %bb38
store i32 %tmp216, i32* %tmp47, align 16
%tmp217 = sext i32 %tmp216 to i64
%tmp218 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 0
- %tmp219 = load i32* %tmp218, align 8
+ %tmp219 = load i32, i32* %tmp218, align 8
store i32 %tmp219, i32* %tmp48, align 4
%tmp220 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 1
- %tmp221 = load i32* %tmp220, align 4
+ %tmp221 = load i32, i32* %tmp220, align 4
store i32 %tmp221, i32* %tmp49, align 4
%tmp222 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 2
- %tmp223 = load i32* %tmp222, align 8
+ %tmp223 = load i32, i32* %tmp222, align 8
store i32 %tmp223, i32* %tmp50, align 4
ret void
}
diff --git a/llvm/test/CodeGen/X86/lsr-reuse-trunc.ll b/llvm/test/CodeGen/X86/lsr-reuse-trunc.ll
index c3dbd602f1f..7f73b6b9d1e 100644
--- a/llvm/test/CodeGen/X86/lsr-reuse-trunc.ll
+++ b/llvm/test/CodeGen/X86/lsr-reuse-trunc.ll
@@ -14,7 +14,7 @@
define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind {
entry:
- %0 = load i32* %n, align 4
+ %0 = load i32, i32* %n, align 4
%1 = icmp sgt i32 %0, 0
br i1 %1, label %bb, label %return
@@ -25,7 +25,7 @@ bb:
%scevgep9 = bitcast float* %scevgep to <4 x float>*
%scevgep10 = getelementptr float, float* %x, i64 %tmp
%scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
- %2 = load <4 x float>* %scevgep1011, align 16
+ %2 = load <4 x float>, <4 x float>* %scevgep1011, align 16
%3 = bitcast <4 x float> %2 to <4 x i32>
%4 = and <4 x i32> %3, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
%5 = bitcast <4 x i32> %4 to <4 x float>
@@ -48,7 +48,7 @@ bb:
store <4 x float> %19, <4 x float>* %scevgep9, align 16
%tmp12 = add i64 %tmp, 4
%tmp13 = trunc i64 %tmp12 to i32
- %20 = load i32* %n, align 4
+ %20 = load i32, i32* %n, align 4
%21 = icmp sgt i32 %20, %tmp13
%indvar.next = add i64 %indvar, 1
br i1 %21, label %bb, label %return
diff --git a/llvm/test/CodeGen/X86/lsr-reuse.ll b/llvm/test/CodeGen/X86/lsr-reuse.ll
index a8a18f1d551..dd1e40f6a1e 100644
--- a/llvm/test/CodeGen/X86/lsr-reuse.ll
+++ b/llvm/test/CodeGen/X86/lsr-reuse.ll
@@ -28,8 +28,8 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%i.next = add nsw i64 %i, 1
@@ -73,16 +73,16 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%j = add i64 %i, 256
%Aj = getelementptr inbounds double, double* %A, i64 %j
%Bj = getelementptr inbounds double, double* %B, i64 %j
%Cj = getelementptr inbounds double, double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
+ %t3 = load double, double* %Bj
+ %t4 = load double, double* %Cj
%o = fdiv double %t3, %t4
store double %o, double* %Aj
%i.next = add nsw i64 %i, 1
@@ -119,16 +119,16 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%j = sub i64 %i, 256
%Aj = getelementptr inbounds double, double* %A, i64 %j
%Bj = getelementptr inbounds double, double* %B, i64 %j
%Cj = getelementptr inbounds double, double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
+ %t3 = load double, double* %Bj
+ %t4 = load double, double* %Cj
%o = fdiv double %t3, %t4
store double %o, double* %Aj
%i.next = add nsw i64 %i, 1
@@ -165,16 +165,16 @@ loop:
%Ak = getelementptr inbounds double, double* %A, i64 %k
%Bk = getelementptr inbounds double, double* %B, i64 %k
%Ck = getelementptr inbounds double, double* %C, i64 %k
- %t1 = load double* %Bk
- %t2 = load double* %Ck
+ %t1 = load double, double* %Bk
+ %t2 = load double, double* %Ck
%m = fmul double %t1, %t2
store double %m, double* %Ak
%j = sub i64 %i, 256
%Aj = getelementptr inbounds double, double* %A, i64 %j
%Bj = getelementptr inbounds double, double* %B, i64 %j
%Cj = getelementptr inbounds double, double* %C, i64 %j
- %t3 = load double* %Bj
- %t4 = load double* %Cj
+ %t3 = load double, double* %Bj
+ %t4 = load double, double* %Cj
%o = fdiv double %t3, %t4
store double %o, double* %Aj
%i.next = add nsw i64 %i, 1
@@ -208,8 +208,8 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%i.next = add nsw i64 %i, 1
@@ -243,8 +243,8 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%i.next = add nsw i64 %i, 1
@@ -281,17 +281,17 @@ loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
%i5 = add i64 %i, 5
%Ai = getelementptr double, double* %A, i64 %i5
- %t2 = load double* %Ai
+ %t2 = load double, double* %Ai
%Bi = getelementptr double, double* %B, i64 %i5
- %t4 = load double* %Bi
+ %t4 = load double, double* %Bi
%t5 = fadd double %t2, %t4
%Ci = getelementptr double, double* %C, i64 %i5
store double %t5, double* %Ci
%i10 = add i64 %i, 10
%Ai10 = getelementptr double, double* %A, i64 %i10
- %t9 = load double* %Ai10
+ %t9 = load double, double* %Ai10
%Bi10 = getelementptr double, double* %B, i64 %i10
- %t11 = load double* %Bi10
+ %t11 = load double, double* %Bi10
%t12 = fsub double %t9, %t11
%Ci10 = getelementptr double, double* %C, i64 %i10
store double %t12, double* %Ci10
@@ -328,17 +328,17 @@ loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
%i5 = add i64 %i, 5
%Ai = getelementptr double, double* %A, i64 %i5
- %t2 = load double* %Ai
+ %t2 = load double, double* %Ai
%Bi = getelementptr double, double* %B, i64 %i5
- %t4 = load double* %Bi
+ %t4 = load double, double* %Bi
%t5 = fadd double %t2, %t4
%Ci = getelementptr double, double* %C, i64 %i5
store double %t5, double* %Ci
%i10 = add i64 %i, 10
%Ai10 = getelementptr double, double* %A, i64 %i10
- %t9 = load double* %Ai10
+ %t9 = load double, double* %Ai10
%Bi10 = getelementptr double, double* %B, i64 %i10
- %t11 = load double* %Bi10
+ %t11 = load double, double* %Bi10
%t12 = fsub double %t9, %t11
%Ci10 = getelementptr double, double* %C, i64 %i10
store double %t12, double* %Ci10
@@ -375,8 +375,8 @@ loop:
%Ai = getelementptr inbounds double, double* %A, i64 %i
%Bi = getelementptr inbounds double, double* %B, i64 %i
%Ci = getelementptr inbounds double, double* %C, i64 %i
- %t1 = load double* %Bi
- %t2 = load double* %Ci
+ %t1 = load double, double* %Bi
+ %t2 = load double, double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%i.next = add nsw i64 %i, 1
@@ -414,7 +414,7 @@ bb: ; preds = %bb3, %bb.nph14
%indvar16 = phi i64 [ 0, %bb.nph14 ], [ %indvar.next17, %bb3 ] ; <i64> [#uses=3]
%s.113 = phi i32 [ 0, %bb.nph14 ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=2]
%scevgep2526 = getelementptr [123123 x %struct.anon], [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 0 ; <i32*> [#uses=1]
- %1 = load i32* %scevgep2526, align 4 ; <i32> [#uses=2]
+ %1 = load i32, i32* %scevgep2526, align 4 ; <i32> [#uses=2]
%2 = icmp sgt i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %bb.nph, label %bb3
@@ -426,7 +426,7 @@ bb1: ; preds = %bb.nph, %bb1
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp19, %bb1 ] ; <i64> [#uses=2]
%s.07 = phi i32 [ %s.113, %bb.nph ], [ %4, %bb1 ] ; <i32> [#uses=1]
%c.08 = getelementptr [123123 x %struct.anon], [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 1, i64 %indvar ; <i32*> [#uses=1]
- %3 = load i32* %c.08, align 4 ; <i32> [#uses=1]
+ %3 = load i32, i32* %c.08, align 4 ; <i32> [#uses=1]
%4 = add nsw i32 %3, %s.07 ; <i32> [#uses=2]
%tmp19 = add i64 %indvar, 1 ; <i64> [#uses=2]
%5 = icmp sgt i64 %tmp23, %tmp19 ; <i1> [#uses=1]
@@ -493,7 +493,7 @@ define void @test(float* %arg, i64 %arg1, float* nocapture %arg2, float* nocaptu
bb:
%t = alloca float, align 4 ; <float*> [#uses=3]
%t7 = alloca float, align 4 ; <float*> [#uses=2]
- %t8 = load float* %arg3 ; <float> [#uses=8]
+ %t8 = load float, float* %arg3 ; <float> [#uses=8]
%t9 = ptrtoint float* %arg to i64 ; <i64> [#uses=1]
%t10 = ptrtoint float* %arg4 to i64 ; <i64> [#uses=1]
%t11 = xor i64 %t10, %t9 ; <i64> [#uses=1]
@@ -507,7 +507,7 @@ bb:
br i1 %t18, label %bb19, label %bb213
bb19: ; preds = %bb
- %t20 = load float* %arg2 ; <float> [#uses=1]
+ %t20 = load float, float* %arg2 ; <float> [#uses=1]
br label %bb21
bb21: ; preds = %bb32, %bb19
@@ -526,7 +526,7 @@ bb28: ; preds = %bb21
br i1 %t31, label %bb37, label %bb32
bb32: ; preds = %bb28
- %t33 = load float* %t26 ; <float> [#uses=1]
+ %t33 = load float, float* %t26 ; <float> [#uses=1]
%t34 = fmul float %t23, %t33 ; <float> [#uses=1]
store float %t34, float* %t25
%t35 = fadd float %t23, %t8 ; <float> [#uses=1]
@@ -604,10 +604,10 @@ bb68: ; preds = %bb68, %bb61
%t95 = bitcast float* %t94 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t96 = mul i64 %t69, -16 ; <i64> [#uses=1]
%t97 = add i64 %t67, %t96 ; <i64> [#uses=2]
- %t98 = load <4 x float>* %t77 ; <<4 x float>> [#uses=1]
- %t99 = load <4 x float>* %t81 ; <<4 x float>> [#uses=1]
- %t100 = load <4 x float>* %t84 ; <<4 x float>> [#uses=1]
- %t101 = load <4 x float>* %t87 ; <<4 x float>> [#uses=1]
+ %t98 = load <4 x float>, <4 x float>* %t77 ; <<4 x float>> [#uses=1]
+ %t99 = load <4 x float>, <4 x float>* %t81 ; <<4 x float>> [#uses=1]
+ %t100 = load <4 x float>, <4 x float>* %t84 ; <<4 x float>> [#uses=1]
+ %t101 = load <4 x float>, <4 x float>* %t87 ; <<4 x float>> [#uses=1]
%t102 = fmul <4 x float> %t98, %t71 ; <<4 x float>> [#uses=1]
%t103 = fadd <4 x float> %t71, %t55 ; <<4 x float>> [#uses=2]
%t104 = fmul <4 x float> %t99, %t73 ; <<4 x float>> [#uses=1]
@@ -644,7 +644,7 @@ bb122: ; preds = %bb118
%t123 = add i64 %t22, -1 ; <i64> [#uses=1]
%t124 = getelementptr inbounds float, float* %arg, i64 %t123 ; <float*> [#uses=1]
%t125 = bitcast float* %t124 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %t126 = load <4 x float>* %t125 ; <<4 x float>> [#uses=1]
+ %t126 = load <4 x float>, <4 x float>* %t125 ; <<4 x float>> [#uses=1]
%t127 = add i64 %t22, 16 ; <i64> [#uses=1]
%t128 = add i64 %t22, 3 ; <i64> [#uses=1]
%t129 = add i64 %t22, 7 ; <i64> [#uses=1]
@@ -692,10 +692,10 @@ bb137: ; preds = %bb137, %bb122
%t169 = bitcast float* %t168 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t170 = mul i64 %t138, -16 ; <i64> [#uses=1]
%t171 = add i64 %t136, %t170 ; <i64> [#uses=2]
- %t172 = load <4 x float>* %t148 ; <<4 x float>> [#uses=2]
- %t173 = load <4 x float>* %t151 ; <<4 x float>> [#uses=2]
- %t174 = load <4 x float>* %t154 ; <<4 x float>> [#uses=2]
- %t175 = load <4 x float>* %t157 ; <<4 x float>> [#uses=2]
+ %t172 = load <4 x float>, <4 x float>* %t148 ; <<4 x float>> [#uses=2]
+ %t173 = load <4 x float>, <4 x float>* %t151 ; <<4 x float>> [#uses=2]
+ %t174 = load <4 x float>, <4 x float>* %t154 ; <<4 x float>> [#uses=2]
+ %t175 = load <4 x float>, <4 x float>* %t157 ; <<4 x float>> [#uses=2]
%t176 = shufflevector <4 x float> %t143, <4 x float> %t172, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
%t177 = shufflevector <4 x float> %t176, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
%t178 = shufflevector <4 x float> %t172, <4 x float> %t173, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
@@ -734,7 +734,7 @@ bb201: ; preds = %bb201, %bb194
%t203 = phi float [ %t208, %bb201 ], [ %t199, %bb194 ] ; <float> [#uses=2]
%t204 = getelementptr float, float* %t198, i64 %t202 ; <float*> [#uses=1]
%t205 = getelementptr float, float* %t197, i64 %t202 ; <float*> [#uses=1]
- %t206 = load float* %t204 ; <float> [#uses=1]
+ %t206 = load float, float* %t204 ; <float> [#uses=1]
%t207 = fmul float %t203, %t206 ; <float> [#uses=1]
store float %t207, float* %t205
%t208 = fadd float %t203, %t8 ; <float> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/lsr-static-addr.ll b/llvm/test/CodeGen/X86/lsr-static-addr.ll
index 8b73a032b6d..1765ed7871d 100644
--- a/llvm/test/CodeGen/X86/lsr-static-addr.ll
+++ b/llvm/test/CodeGen/X86/lsr-static-addr.ll
@@ -30,7 +30,7 @@ entry:
for.body:
%i.06 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr [0 x double], [0 x double]* @A, i64 0, i64 %i.06
- %tmp3 = load double* %arrayidx, align 8
+ %tmp3 = load double, double* %arrayidx, align 8
%mul = fmul double %tmp3, 2.300000e+00
store double %mul, double* %arrayidx, align 8
%inc = add nsw i64 %i.06, 1
diff --git a/llvm/test/CodeGen/X86/lsr-wrap.ll b/llvm/test/CodeGen/X86/lsr-wrap.ll
index d605e4f14fe..adf95447779 100644
--- a/llvm/test/CodeGen/X86/lsr-wrap.ll
+++ b/llvm/test/CodeGen/X86/lsr-wrap.ll
@@ -20,7 +20,7 @@ bb: ; preds = %bb, %entry
%indvar = phi i16 [ 0, %entry ], [ %indvar.next, %bb ] ; <i16> [#uses=2]
%tmp = sub i16 0, %indvar ; <i16> [#uses=1]
%tmp27 = trunc i16 %tmp to i8 ; <i8> [#uses=1]
- %tmp1 = load i32* @g_19, align 4 ; <i32> [#uses=2]
+ %tmp1 = load i32, i32* @g_19, align 4 ; <i32> [#uses=2]
%tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
store i32 %tmp2, i32* @g_19, align 4
%tmp3 = trunc i32 %tmp1 to i8 ; <i8> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/lzcnt-tzcnt.ll b/llvm/test/CodeGen/X86/lzcnt-tzcnt.ll
index e98764a0d78..aa9ae2b7b10 100644
--- a/llvm/test/CodeGen/X86/lzcnt-tzcnt.ll
+++ b/llvm/test/CodeGen/X86/lzcnt-tzcnt.ll
@@ -106,7 +106,7 @@ define i64 @test9_ctlz(i64 %v) {
define i16 @test10_ctlz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 %v, 0
%cond = select i1 %tobool, i16 16, i16 %cnt
@@ -119,7 +119,7 @@ define i16 @test10_ctlz(i16* %ptr) {
define i32 @test11_ctlz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 %v, 0
%cond = select i1 %tobool, i32 32, i32 %cnt
@@ -132,7 +132,7 @@ define i32 @test11_ctlz(i32* %ptr) {
define i64 @test12_ctlz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 %v, 0
%cond = select i1 %tobool, i64 64, i64 %cnt
@@ -145,7 +145,7 @@ define i64 @test12_ctlz(i64* %ptr) {
define i16 @test13_ctlz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 0, %v
%cond = select i1 %tobool, i16 16, i16 %cnt
@@ -158,7 +158,7 @@ define i16 @test13_ctlz(i16* %ptr) {
define i32 @test14_ctlz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 0, %v
%cond = select i1 %tobool, i32 32, i32 %cnt
@@ -171,7 +171,7 @@ define i32 @test14_ctlz(i32* %ptr) {
define i64 @test15_ctlz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 0, %v
%cond = select i1 %tobool, i64 64, i64 %cnt
@@ -184,7 +184,7 @@ define i64 @test15_ctlz(i64* %ptr) {
define i16 @test16_ctlz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.ctlz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 0, %v
%cond = select i1 %tobool, i16 %cnt, i16 16
@@ -197,7 +197,7 @@ define i16 @test16_ctlz(i16* %ptr) {
define i32 @test17_ctlz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.ctlz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 0, %v
%cond = select i1 %tobool, i32 %cnt, i32 32
@@ -210,7 +210,7 @@ define i32 @test17_ctlz(i32* %ptr) {
define i64 @test18_ctlz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.ctlz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 0, %v
%cond = select i1 %tobool, i64 %cnt, i64 64
@@ -322,7 +322,7 @@ define i64 @test9_cttz(i64 %v) {
define i16 @test10_cttz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 %v, 0
%cond = select i1 %tobool, i16 16, i16 %cnt
@@ -335,7 +335,7 @@ define i16 @test10_cttz(i16* %ptr) {
define i32 @test11_cttz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 %v, 0
%cond = select i1 %tobool, i32 32, i32 %cnt
@@ -348,7 +348,7 @@ define i32 @test11_cttz(i32* %ptr) {
define i64 @test12_cttz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 %v, 0
%cond = select i1 %tobool, i64 64, i64 %cnt
@@ -361,7 +361,7 @@ define i64 @test12_cttz(i64* %ptr) {
define i16 @test13_cttz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 0, %v
%cond = select i1 %tobool, i16 16, i16 %cnt
@@ -374,7 +374,7 @@ define i16 @test13_cttz(i16* %ptr) {
define i32 @test14_cttz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 0, %v
%cond = select i1 %tobool, i32 32, i32 %cnt
@@ -387,7 +387,7 @@ define i32 @test14_cttz(i32* %ptr) {
define i64 @test15_cttz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 0, %v
%cond = select i1 %tobool, i64 64, i64 %cnt
@@ -400,7 +400,7 @@ define i64 @test15_cttz(i64* %ptr) {
define i16 @test16_cttz(i16* %ptr) {
- %v = load i16* %ptr
+ %v = load i16, i16* %ptr
%cnt = tail call i16 @llvm.cttz.i16(i16 %v, i1 true)
%tobool = icmp eq i16 0, %v
%cond = select i1 %tobool, i16 %cnt, i16 16
@@ -413,7 +413,7 @@ define i16 @test16_cttz(i16* %ptr) {
define i32 @test17_cttz(i32* %ptr) {
- %v = load i32* %ptr
+ %v = load i32, i32* %ptr
%cnt = tail call i32 @llvm.cttz.i32(i32 %v, i1 true)
%tobool = icmp eq i32 0, %v
%cond = select i1 %tobool, i32 %cnt, i32 32
@@ -426,7 +426,7 @@ define i32 @test17_cttz(i32* %ptr) {
define i64 @test18_cttz(i64* %ptr) {
- %v = load i64* %ptr
+ %v = load i64, i64* %ptr
%cnt = tail call i64 @llvm.cttz.i64(i64 %v, i1 true)
%tobool = icmp eq i64 0, %v
%cond = select i1 %tobool, i64 %cnt, i64 64
diff --git a/llvm/test/CodeGen/X86/machine-cse.ll b/llvm/test/CodeGen/X86/machine-cse.ll
index e5df214719e..ce3ab4c972f 100644
--- a/llvm/test/CodeGen/X86/machine-cse.ll
+++ b/llvm/test/CodeGen/X86/machine-cse.ll
@@ -147,7 +147,7 @@ define i32 @t2() {
br i1 %c, label %a, label %b
a:
- %l = load i32* @t2_global
+ %l = load i32, i32* @t2_global
ret i32 %l
b:
diff --git a/llvm/test/CodeGen/X86/masked-iv-safe.ll b/llvm/test/CodeGen/X86/masked-iv-safe.ll
index dcf2e1d3e9c..8c0a4d4f175 100644
--- a/llvm/test/CodeGen/X86/masked-iv-safe.ll
+++ b/llvm/test/CodeGen/X86/masked-iv-safe.ll
@@ -16,16 +16,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -49,16 +49,16 @@ loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -83,17 +83,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -118,17 +118,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -152,16 +152,16 @@ loop:
%indvar = phi i64 [ 18446744073709551615, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -185,16 +185,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -219,17 +219,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -254,17 +254,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
diff --git a/llvm/test/CodeGen/X86/masked-iv-unsafe.ll b/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
index 28c2444e090..974a1cfb90d 100644
--- a/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
+++ b/llvm/test/CodeGen/X86/masked-iv-unsafe.ll
@@ -14,16 +14,16 @@ loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -42,16 +42,16 @@ loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -71,17 +71,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -101,17 +101,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -130,16 +130,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -158,16 +158,16 @@ loop:
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -187,17 +187,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 1
@@ -217,17 +217,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -246,16 +246,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 1
@@ -274,16 +274,16 @@ loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 3
@@ -302,16 +302,16 @@ loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 3
@@ -331,17 +331,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = add i64 %indvar, 3
@@ -361,17 +361,17 @@ loop:
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
%t0 = getelementptr double, double* %d, i64 %indvar.i8
- %t1 = load double* %t0
+ %t1 = load double, double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double, double* %d, i64 %indvar.i24
- %t4 = load double* %t3
+ %t4 = load double, double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double, double* %d, i64 %indvar
- %t7 = load double* %t6
+ %t7 = load double, double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar.next = sub i64 %indvar, 3
diff --git a/llvm/test/CodeGen/X86/mcinst-lowering.ll b/llvm/test/CodeGen/X86/mcinst-lowering.ll
index a82cfc431ba..51b2895f1c7 100644
--- a/llvm/test/CodeGen/X86/mcinst-lowering.ll
+++ b/llvm/test/CodeGen/X86/mcinst-lowering.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-darwin10.0.0"
define i32 @f0(i32* nocapture %x) nounwind readonly ssp {
entry:
- %tmp1 = load i32* %x ; <i32> [#uses=2]
+ %tmp1 = load i32, i32* %x ; <i32> [#uses=2]
%tobool = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
br i1 %tobool, label %if.end, label %return
diff --git a/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll b/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll
index bf90d605524..9bace29e185 100644
--- a/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll
+++ b/llvm/test/CodeGen/X86/mem-intrin-base-reg.ll
@@ -25,8 +25,8 @@ no_vectors:
spill_vectors:
%vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
- %v0 = load <4 x i32>* %vp0
- %v1 = load <4 x i32>* %vp1
+ %v0 = load <4 x i32>, <4 x i32>* %vp0
+ %v1 = load <4 x i32>, <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
%icmp = extractelement <4 x i1> %vicmp, i32 0
call void @escape_vla_and_icmp(i8* null, i1 zeroext %icmp)
@@ -50,8 +50,8 @@ no_vectors:
spill_vectors:
%vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
- %v0 = load <4 x i32>* %vp0
- %v1 = load <4 x i32>* %vp1
+ %v0 = load <4 x i32>, <4 x i32>* %vp0
+ %v1 = load <4 x i32>, <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
%icmp = extractelement <4 x i1> %vicmp, i32 0
%vla = alloca i8, i32 %n
@@ -78,8 +78,8 @@ no_vectors:
spill_vectors:
%vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
- %v0 = load <4 x i32>* %vp0
- %v1 = load <4 x i32>* %vp1
+ %v0 = load <4 x i32>, <4 x i32>* %vp0
+ %v1 = load <4 x i32>, <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
%icmp = extractelement <4 x i1> %vicmp, i32 0
%vla = alloca i8, i32 %n
diff --git a/llvm/test/CodeGen/X86/mem-promote-integers.ll b/llvm/test/CodeGen/X86/mem-promote-integers.ll
index ea38b95a864..3023cf2e900 100644
--- a/llvm/test/CodeGen/X86/mem-promote-integers.ll
+++ b/llvm/test/CodeGen/X86/mem-promote-integers.ll
@@ -5,7 +5,7 @@
; RUN: llc -march=x86-64 < %s > /dev/null
define <1 x i8> @test_1xi8(<1 x i8> %x, <1 x i8>* %b) {
- %bb = load <1 x i8>* %b
+ %bb = load <1 x i8>, <1 x i8>* %b
%tt = xor <1 x i8> %x, %bb
store <1 x i8> %tt, <1 x i8>* %b
br label %next
@@ -16,7 +16,7 @@ next:
define <1 x i16> @test_1xi16(<1 x i16> %x, <1 x i16>* %b) {
- %bb = load <1 x i16>* %b
+ %bb = load <1 x i16>, <1 x i16>* %b
%tt = xor <1 x i16> %x, %bb
store <1 x i16> %tt, <1 x i16>* %b
br label %next
@@ -27,7 +27,7 @@ next:
define <1 x i32> @test_1xi32(<1 x i32> %x, <1 x i32>* %b) {
- %bb = load <1 x i32>* %b
+ %bb = load <1 x i32>, <1 x i32>* %b
%tt = xor <1 x i32> %x, %bb
store <1 x i32> %tt, <1 x i32>* %b
br label %next
@@ -38,7 +38,7 @@ next:
define <1 x i64> @test_1xi64(<1 x i64> %x, <1 x i64>* %b) {
- %bb = load <1 x i64>* %b
+ %bb = load <1 x i64>, <1 x i64>* %b
%tt = xor <1 x i64> %x, %bb
store <1 x i64> %tt, <1 x i64>* %b
br label %next
@@ -49,7 +49,7 @@ next:
define <1 x i128> @test_1xi128(<1 x i128> %x, <1 x i128>* %b) {
- %bb = load <1 x i128>* %b
+ %bb = load <1 x i128>, <1 x i128>* %b
%tt = xor <1 x i128> %x, %bb
store <1 x i128> %tt, <1 x i128>* %b
br label %next
@@ -60,7 +60,7 @@ next:
define <1 x i256> @test_1xi256(<1 x i256> %x, <1 x i256>* %b) {
- %bb = load <1 x i256>* %b
+ %bb = load <1 x i256>, <1 x i256>* %b
%tt = xor <1 x i256> %x, %bb
store <1 x i256> %tt, <1 x i256>* %b
br label %next
@@ -71,7 +71,7 @@ next:
define <1 x i512> @test_1xi512(<1 x i512> %x, <1 x i512>* %b) {
- %bb = load <1 x i512>* %b
+ %bb = load <1 x i512>, <1 x i512>* %b
%tt = xor <1 x i512> %x, %bb
store <1 x i512> %tt, <1 x i512>* %b
br label %next
@@ -82,7 +82,7 @@ next:
define <2 x i8> @test_2xi8(<2 x i8> %x, <2 x i8>* %b) {
- %bb = load <2 x i8>* %b
+ %bb = load <2 x i8>, <2 x i8>* %b
%tt = xor <2 x i8> %x, %bb
store <2 x i8> %tt, <2 x i8>* %b
br label %next
@@ -93,7 +93,7 @@ next:
define <2 x i16> @test_2xi16(<2 x i16> %x, <2 x i16>* %b) {
- %bb = load <2 x i16>* %b
+ %bb = load <2 x i16>, <2 x i16>* %b
%tt = xor <2 x i16> %x, %bb
store <2 x i16> %tt, <2 x i16>* %b
br label %next
@@ -104,7 +104,7 @@ next:
define <2 x i32> @test_2xi32(<2 x i32> %x, <2 x i32>* %b) {
- %bb = load <2 x i32>* %b
+ %bb = load <2 x i32>, <2 x i32>* %b
%tt = xor <2 x i32> %x, %bb
store <2 x i32> %tt, <2 x i32>* %b
br label %next
@@ -115,7 +115,7 @@ next:
define <2 x i64> @test_2xi64(<2 x i64> %x, <2 x i64>* %b) {
- %bb = load <2 x i64>* %b
+ %bb = load <2 x i64>, <2 x i64>* %b
%tt = xor <2 x i64> %x, %bb
store <2 x i64> %tt, <2 x i64>* %b
br label %next
@@ -126,7 +126,7 @@ next:
define <2 x i128> @test_2xi128(<2 x i128> %x, <2 x i128>* %b) {
- %bb = load <2 x i128>* %b
+ %bb = load <2 x i128>, <2 x i128>* %b
%tt = xor <2 x i128> %x, %bb
store <2 x i128> %tt, <2 x i128>* %b
br label %next
@@ -137,7 +137,7 @@ next:
define <2 x i256> @test_2xi256(<2 x i256> %x, <2 x i256>* %b) {
- %bb = load <2 x i256>* %b
+ %bb = load <2 x i256>, <2 x i256>* %b
%tt = xor <2 x i256> %x, %bb
store <2 x i256> %tt, <2 x i256>* %b
br label %next
@@ -148,7 +148,7 @@ next:
define <2 x i512> @test_2xi512(<2 x i512> %x, <2 x i512>* %b) {
- %bb = load <2 x i512>* %b
+ %bb = load <2 x i512>, <2 x i512>* %b
%tt = xor <2 x i512> %x, %bb
store <2 x i512> %tt, <2 x i512>* %b
br label %next
@@ -159,7 +159,7 @@ next:
define <3 x i8> @test_3xi8(<3 x i8> %x, <3 x i8>* %b) {
- %bb = load <3 x i8>* %b
+ %bb = load <3 x i8>, <3 x i8>* %b
%tt = xor <3 x i8> %x, %bb
store <3 x i8> %tt, <3 x i8>* %b
br label %next
@@ -170,7 +170,7 @@ next:
define <3 x i16> @test_3xi16(<3 x i16> %x, <3 x i16>* %b) {
- %bb = load <3 x i16>* %b
+ %bb = load <3 x i16>, <3 x i16>* %b
%tt = xor <3 x i16> %x, %bb
store <3 x i16> %tt, <3 x i16>* %b
br label %next
@@ -181,7 +181,7 @@ next:
define <3 x i32> @test_3xi32(<3 x i32> %x, <3 x i32>* %b) {
- %bb = load <3 x i32>* %b
+ %bb = load <3 x i32>, <3 x i32>* %b
%tt = xor <3 x i32> %x, %bb
store <3 x i32> %tt, <3 x i32>* %b
br label %next
@@ -192,7 +192,7 @@ next:
define <3 x i64> @test_3xi64(<3 x i64> %x, <3 x i64>* %b) {
- %bb = load <3 x i64>* %b
+ %bb = load <3 x i64>, <3 x i64>* %b
%tt = xor <3 x i64> %x, %bb
store <3 x i64> %tt, <3 x i64>* %b
br label %next
@@ -203,7 +203,7 @@ next:
define <3 x i128> @test_3xi128(<3 x i128> %x, <3 x i128>* %b) {
- %bb = load <3 x i128>* %b
+ %bb = load <3 x i128>, <3 x i128>* %b
%tt = xor <3 x i128> %x, %bb
store <3 x i128> %tt, <3 x i128>* %b
br label %next
@@ -214,7 +214,7 @@ next:
define <3 x i256> @test_3xi256(<3 x i256> %x, <3 x i256>* %b) {
- %bb = load <3 x i256>* %b
+ %bb = load <3 x i256>, <3 x i256>* %b
%tt = xor <3 x i256> %x, %bb
store <3 x i256> %tt, <3 x i256>* %b
br label %next
@@ -225,7 +225,7 @@ next:
define <3 x i512> @test_3xi512(<3 x i512> %x, <3 x i512>* %b) {
- %bb = load <3 x i512>* %b
+ %bb = load <3 x i512>, <3 x i512>* %b
%tt = xor <3 x i512> %x, %bb
store <3 x i512> %tt, <3 x i512>* %b
br label %next
@@ -236,7 +236,7 @@ next:
define <4 x i8> @test_4xi8(<4 x i8> %x, <4 x i8>* %b) {
- %bb = load <4 x i8>* %b
+ %bb = load <4 x i8>, <4 x i8>* %b
%tt = xor <4 x i8> %x, %bb
store <4 x i8> %tt, <4 x i8>* %b
br label %next
@@ -247,7 +247,7 @@ next:
define <4 x i16> @test_4xi16(<4 x i16> %x, <4 x i16>* %b) {
- %bb = load <4 x i16>* %b
+ %bb = load <4 x i16>, <4 x i16>* %b
%tt = xor <4 x i16> %x, %bb
store <4 x i16> %tt, <4 x i16>* %b
br label %next
@@ -258,7 +258,7 @@ next:
define <4 x i32> @test_4xi32(<4 x i32> %x, <4 x i32>* %b) {
- %bb = load <4 x i32>* %b
+ %bb = load <4 x i32>, <4 x i32>* %b
%tt = xor <4 x i32> %x, %bb
store <4 x i32> %tt, <4 x i32>* %b
br label %next
@@ -269,7 +269,7 @@ next:
define <4 x i64> @test_4xi64(<4 x i64> %x, <4 x i64>* %b) {
- %bb = load <4 x i64>* %b
+ %bb = load <4 x i64>, <4 x i64>* %b
%tt = xor <4 x i64> %x, %bb
store <4 x i64> %tt, <4 x i64>* %b
br label %next
@@ -280,7 +280,7 @@ next:
define <4 x i128> @test_4xi128(<4 x i128> %x, <4 x i128>* %b) {
- %bb = load <4 x i128>* %b
+ %bb = load <4 x i128>, <4 x i128>* %b
%tt = xor <4 x i128> %x, %bb
store <4 x i128> %tt, <4 x i128>* %b
br label %next
@@ -291,7 +291,7 @@ next:
define <4 x i256> @test_4xi256(<4 x i256> %x, <4 x i256>* %b) {
- %bb = load <4 x i256>* %b
+ %bb = load <4 x i256>, <4 x i256>* %b
%tt = xor <4 x i256> %x, %bb
store <4 x i256> %tt, <4 x i256>* %b
br label %next
@@ -302,7 +302,7 @@ next:
define <4 x i512> @test_4xi512(<4 x i512> %x, <4 x i512>* %b) {
- %bb = load <4 x i512>* %b
+ %bb = load <4 x i512>, <4 x i512>* %b
%tt = xor <4 x i512> %x, %bb
store <4 x i512> %tt, <4 x i512>* %b
br label %next
@@ -313,7 +313,7 @@ next:
define <5 x i8> @test_5xi8(<5 x i8> %x, <5 x i8>* %b) {
- %bb = load <5 x i8>* %b
+ %bb = load <5 x i8>, <5 x i8>* %b
%tt = xor <5 x i8> %x, %bb
store <5 x i8> %tt, <5 x i8>* %b
br label %next
@@ -324,7 +324,7 @@ next:
define <5 x i16> @test_5xi16(<5 x i16> %x, <5 x i16>* %b) {
- %bb = load <5 x i16>* %b
+ %bb = load <5 x i16>, <5 x i16>* %b
%tt = xor <5 x i16> %x, %bb
store <5 x i16> %tt, <5 x i16>* %b
br label %next
@@ -335,7 +335,7 @@ next:
define <5 x i32> @test_5xi32(<5 x i32> %x, <5 x i32>* %b) {
- %bb = load <5 x i32>* %b
+ %bb = load <5 x i32>, <5 x i32>* %b
%tt = xor <5 x i32> %x, %bb
store <5 x i32> %tt, <5 x i32>* %b
br label %next
@@ -346,7 +346,7 @@ next:
define <5 x i64> @test_5xi64(<5 x i64> %x, <5 x i64>* %b) {
- %bb = load <5 x i64>* %b
+ %bb = load <5 x i64>, <5 x i64>* %b
%tt = xor <5 x i64> %x, %bb
store <5 x i64> %tt, <5 x i64>* %b
br label %next
@@ -357,7 +357,7 @@ next:
define <5 x i128> @test_5xi128(<5 x i128> %x, <5 x i128>* %b) {
- %bb = load <5 x i128>* %b
+ %bb = load <5 x i128>, <5 x i128>* %b
%tt = xor <5 x i128> %x, %bb
store <5 x i128> %tt, <5 x i128>* %b
br label %next
@@ -368,7 +368,7 @@ next:
define <5 x i256> @test_5xi256(<5 x i256> %x, <5 x i256>* %b) {
- %bb = load <5 x i256>* %b
+ %bb = load <5 x i256>, <5 x i256>* %b
%tt = xor <5 x i256> %x, %bb
store <5 x i256> %tt, <5 x i256>* %b
br label %next
@@ -379,7 +379,7 @@ next:
define <5 x i512> @test_5xi512(<5 x i512> %x, <5 x i512>* %b) {
- %bb = load <5 x i512>* %b
+ %bb = load <5 x i512>, <5 x i512>* %b
%tt = xor <5 x i512> %x, %bb
store <5 x i512> %tt, <5 x i512>* %b
br label %next
diff --git a/llvm/test/CodeGen/X86/misaligned-memset.ll b/llvm/test/CodeGen/X86/misaligned-memset.ll
index 21f8bf2bf29..6e22e2ceed9 100644
--- a/llvm/test/CodeGen/X86/misaligned-memset.ll
+++ b/llvm/test/CodeGen/X86/misaligned-memset.ll
@@ -8,7 +8,7 @@ entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
call void @llvm.memset.p0i8.i64(i8* bitcast (i64* getelementptr inbounds ([3 x i64]* @a, i32 0, i64 1) to i8*), i8 0, i64 16, i32 1, i1 false)
- %0 = load i32* %retval
+ %0 = load i32, i32* %retval
ret i32 %0
}
diff --git a/llvm/test/CodeGen/X86/misched-aa-colored.ll b/llvm/test/CodeGen/X86/misched-aa-colored.ll
index 440b6614d79..ef7b98ac9c6 100644
--- a/llvm/test/CodeGen/X86/misched-aa-colored.ll
+++ b/llvm/test/CodeGen/X86/misched-aa-colored.ll
@@ -156,9 +156,9 @@ entry:
%Op.i = alloca %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", align 8
%0 = bitcast %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i to i8*
%retval.sroa.0.0.idx.i36 = getelementptr inbounds %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199", %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i, i64 0, i32 1, i32 0, i32 0
- %retval.sroa.0.0.copyload.i37 = load i32* %retval.sroa.0.0.idx.i36, align 8
+ %retval.sroa.0.0.copyload.i37 = load i32, i32* %retval.sroa.0.0.idx.i36, align 8
call void @llvm.lifetime.end(i64 24, i8* %0) #1
- %agg.tmp8.sroa.2.0.copyload = load i32* undef, align 8
+ %agg.tmp8.sroa.2.0.copyload = load i32, i32* undef, align 8
%1 = bitcast %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i to i8*
call void @llvm.lifetime.start(i64 16, i8* %1) #1
%2 = getelementptr %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i, i64 0, i32 1
diff --git a/llvm/test/CodeGen/X86/misched-aa-mmos.ll b/llvm/test/CodeGen/X86/misched-aa-mmos.ll
index 5d51c28788e..c457a5eb413 100644
--- a/llvm/test/CodeGen/X86/misched-aa-mmos.ll
+++ b/llvm/test/CodeGen/X86/misched-aa-mmos.ll
@@ -20,11 +20,11 @@ entry:
cond.end.i:
%significand.i18.i = getelementptr inbounds %c1, %c1* %temp_rhs, i64 0, i32 1
%exponent.i = getelementptr inbounds %c1, %c1* %temp_rhs, i64 0, i32 2
- %0 = load i16* %exponent.i, align 8
+ %0 = load i16, i16* %exponent.i, align 8
%sub.i = add i16 %0, -1
store i16 %sub.i, i16* %exponent.i, align 8
%parts.i.i = bitcast %u1* %significand.i18.i to i64**
- %1 = load i64** %parts.i.i, align 8
+ %1 = load i64*, i64** %parts.i.i, align 8
%call5.i = call zeroext i1 @bar(i64* %1, i32 undef) #1
unreachable
diff --git a/llvm/test/CodeGen/X86/misched-balance.ll b/llvm/test/CodeGen/X86/misched-balance.ll
index 954575ebd8b..ca3b57992a2 100644
--- a/llvm/test/CodeGen/X86/misched-balance.ll
+++ b/llvm/test/CodeGen/X86/misched-balance.ll
@@ -48,62 +48,62 @@ entry:
; CHECK-LABEL: %end
for.body:
%indvars.iv42.i = phi i64 [ %indvars.iv.next43.i, %for.body ], [ 0, %entry ]
- %tmp57 = load i32* %tmp56, align 4
+ %tmp57 = load i32, i32* %tmp56, align 4
%arrayidx12.us.i61 = getelementptr inbounds i32, i32* %pre, i64 %indvars.iv42.i
- %tmp58 = load i32* %arrayidx12.us.i61, align 4
+ %tmp58 = load i32, i32* %arrayidx12.us.i61, align 4
%mul.us.i = mul nsw i32 %tmp58, %tmp57
%arrayidx8.us.i.1 = getelementptr inbounds i32, i32* %tmp56, i64 1
- %tmp59 = load i32* %arrayidx8.us.i.1, align 4
+ %tmp59 = load i32, i32* %arrayidx8.us.i.1, align 4
%arrayidx12.us.i61.1 = getelementptr inbounds i32, i32* %pre94, i64 %indvars.iv42.i
- %tmp60 = load i32* %arrayidx12.us.i61.1, align 4
+ %tmp60 = load i32, i32* %arrayidx12.us.i61.1, align 4
%mul.us.i.1 = mul nsw i32 %tmp60, %tmp59
%add.us.i.1 = add nsw i32 %mul.us.i.1, %mul.us.i
%arrayidx8.us.i.2 = getelementptr inbounds i32, i32* %tmp56, i64 2
- %tmp61 = load i32* %arrayidx8.us.i.2, align 4
+ %tmp61 = load i32, i32* %arrayidx8.us.i.2, align 4
%arrayidx12.us.i61.2 = getelementptr inbounds i32, i32* %pre95, i64 %indvars.iv42.i
- %tmp62 = load i32* %arrayidx12.us.i61.2, align 4
+ %tmp62 = load i32, i32* %arrayidx12.us.i61.2, align 4
%mul.us.i.2 = mul nsw i32 %tmp62, %tmp61
%add.us.i.2 = add nsw i32 %mul.us.i.2, %add.us.i.1
%arrayidx8.us.i.3 = getelementptr inbounds i32, i32* %tmp56, i64 3
- %tmp63 = load i32* %arrayidx8.us.i.3, align 4
+ %tmp63 = load i32, i32* %arrayidx8.us.i.3, align 4
%arrayidx12.us.i61.3 = getelementptr inbounds i32, i32* %pre96, i64 %indvars.iv42.i
- %tmp64 = load i32* %arrayidx12.us.i61.3, align 4
+ %tmp64 = load i32, i32* %arrayidx12.us.i61.3, align 4
%mul.us.i.3 = mul nsw i32 %tmp64, %tmp63
%add.us.i.3 = add nsw i32 %mul.us.i.3, %add.us.i.2
%arrayidx8.us.i.4 = getelementptr inbounds i32, i32* %tmp56, i64 4
- %tmp65 = load i32* %arrayidx8.us.i.4, align 4
+ %tmp65 = load i32, i32* %arrayidx8.us.i.4, align 4
%arrayidx12.us.i61.4 = getelementptr inbounds i32, i32* %pre97, i64 %indvars.iv42.i
- %tmp66 = load i32* %arrayidx12.us.i61.4, align 4
+ %tmp66 = load i32, i32* %arrayidx12.us.i61.4, align 4
%mul.us.i.4 = mul nsw i32 %tmp66, %tmp65
%add.us.i.4 = add nsw i32 %mul.us.i.4, %add.us.i.3
%arrayidx8.us.i.5 = getelementptr inbounds i32, i32* %tmp56, i64 5
- %tmp67 = load i32* %arrayidx8.us.i.5, align 4
+ %tmp67 = load i32, i32* %arrayidx8.us.i.5, align 4
%arrayidx12.us.i61.5 = getelementptr inbounds i32, i32* %pre98, i64 %indvars.iv42.i
- %tmp68 = load i32* %arrayidx12.us.i61.5, align 4
+ %tmp68 = load i32, i32* %arrayidx12.us.i61.5, align 4
%mul.us.i.5 = mul nsw i32 %tmp68, %tmp67
%add.us.i.5 = add nsw i32 %mul.us.i.5, %add.us.i.4
%arrayidx8.us.i.6 = getelementptr inbounds i32, i32* %tmp56, i64 6
- %tmp69 = load i32* %arrayidx8.us.i.6, align 4
+ %tmp69 = load i32, i32* %arrayidx8.us.i.6, align 4
%arrayidx12.us.i61.6 = getelementptr inbounds i32, i32* %pre99, i64 %indvars.iv42.i
- %tmp70 = load i32* %arrayidx12.us.i61.6, align 4
+ %tmp70 = load i32, i32* %arrayidx12.us.i61.6, align 4
%mul.us.i.6 = mul nsw i32 %tmp70, %tmp69
%add.us.i.6 = add nsw i32 %mul.us.i.6, %add.us.i.5
%arrayidx8.us.i.7 = getelementptr inbounds i32, i32* %tmp56, i64 7
- %tmp71 = load i32* %arrayidx8.us.i.7, align 4
+ %tmp71 = load i32, i32* %arrayidx8.us.i.7, align 4
%arrayidx12.us.i61.7 = getelementptr inbounds i32, i32* %pre100, i64 %indvars.iv42.i
- %tmp72 = load i32* %arrayidx12.us.i61.7, align 4
+ %tmp72 = load i32, i32* %arrayidx12.us.i61.7, align 4
%mul.us.i.7 = mul nsw i32 %tmp72, %tmp71
%add.us.i.7 = add nsw i32 %mul.us.i.7, %add.us.i.6
%arrayidx8.us.i.8 = getelementptr inbounds i32, i32* %tmp56, i64 8
- %tmp73 = load i32* %arrayidx8.us.i.8, align 4
+ %tmp73 = load i32, i32* %arrayidx8.us.i.8, align 4
%arrayidx12.us.i61.8 = getelementptr inbounds i32, i32* %pre101, i64 %indvars.iv42.i
- %tmp74 = load i32* %arrayidx12.us.i61.8, align 4
+ %tmp74 = load i32, i32* %arrayidx12.us.i61.8, align 4
%mul.us.i.8 = mul nsw i32 %tmp74, %tmp73
%add.us.i.8 = add nsw i32 %mul.us.i.8, %add.us.i.7
%arrayidx8.us.i.9 = getelementptr inbounds i32, i32* %tmp56, i64 9
- %tmp75 = load i32* %arrayidx8.us.i.9, align 4
+ %tmp75 = load i32, i32* %arrayidx8.us.i.9, align 4
%arrayidx12.us.i61.9 = getelementptr inbounds i32, i32* %pre102, i64 %indvars.iv42.i
- %tmp76 = load i32* %arrayidx12.us.i61.9, align 4
+ %tmp76 = load i32, i32* %arrayidx12.us.i61.9, align 4
%mul.us.i.9 = mul nsw i32 %tmp76, %tmp75
%add.us.i.9 = add nsw i32 %mul.us.i.9, %add.us.i.8
%arrayidx16.us.i = getelementptr inbounds i32, i32* %tmp55, i64 %indvars.iv42.i
@@ -159,46 +159,46 @@ entry:
br label %for.body
for.body:
%indvars.iv42.i = phi i64 [ %indvars.iv.next43.i, %for.body ], [ 0, %entry ]
- %tmp57 = load i32* %tmp56, align 4
+ %tmp57 = load i32, i32* %tmp56, align 4
%arrayidx12.us.i61 = getelementptr inbounds i32, i32* %pre, i64 %indvars.iv42.i
- %tmp58 = load i32* %arrayidx12.us.i61, align 4
+ %tmp58 = load i32, i32* %arrayidx12.us.i61, align 4
%arrayidx8.us.i.1 = getelementptr inbounds i32, i32* %tmp56, i64 1
- %tmp59 = load i32* %arrayidx8.us.i.1, align 4
+ %tmp59 = load i32, i32* %arrayidx8.us.i.1, align 4
%arrayidx12.us.i61.1 = getelementptr inbounds i32, i32* %pre94, i64 %indvars.iv42.i
- %tmp60 = load i32* %arrayidx12.us.i61.1, align 4
+ %tmp60 = load i32, i32* %arrayidx12.us.i61.1, align 4
%arrayidx8.us.i.2 = getelementptr inbounds i32, i32* %tmp56, i64 2
- %tmp61 = load i32* %arrayidx8.us.i.2, align 4
+ %tmp61 = load i32, i32* %arrayidx8.us.i.2, align 4
%arrayidx12.us.i61.2 = getelementptr inbounds i32, i32* %pre95, i64 %indvars.iv42.i
- %tmp62 = load i32* %arrayidx12.us.i61.2, align 4
+ %tmp62 = load i32, i32* %arrayidx12.us.i61.2, align 4
%arrayidx8.us.i.3 = getelementptr inbounds i32, i32* %tmp56, i64 3
- %tmp63 = load i32* %arrayidx8.us.i.3, align 4
+ %tmp63 = load i32, i32* %arrayidx8.us.i.3, align 4
%arrayidx12.us.i61.3 = getelementptr inbounds i32, i32* %pre96, i64 %indvars.iv42.i
- %tmp64 = load i32* %arrayidx12.us.i61.3, align 4
+ %tmp64 = load i32, i32* %arrayidx12.us.i61.3, align 4
%arrayidx8.us.i.4 = getelementptr inbounds i32, i32* %tmp56, i64 4
- %tmp65 = load i32* %arrayidx8.us.i.4, align 4
+ %tmp65 = load i32, i32* %arrayidx8.us.i.4, align 4
%arrayidx12.us.i61.4 = getelementptr inbounds i32, i32* %pre97, i64 %indvars.iv42.i
- %tmp66 = load i32* %arrayidx12.us.i61.4, align 4
+ %tmp66 = load i32, i32* %arrayidx12.us.i61.4, align 4
%arrayidx8.us.i.5 = getelementptr inbounds i32, i32* %tmp56, i64 5
- %tmp67 = load i32* %arrayidx8.us.i.5, align 4
+ %tmp67 = load i32, i32* %arrayidx8.us.i.5, align 4
%arrayidx12.us.i61.5 = getelementptr inbounds i32, i32* %pre98, i64 %indvars.iv42.i
- %tmp68 = load i32* %arrayidx12.us.i61.5, align 4
+ %tmp68 = load i32, i32* %arrayidx12.us.i61.5, align 4
%arrayidx8.us.i.6 = getelementptr inbounds i32, i32* %tmp56, i64 6
- %tmp69 = load i32* %arrayidx8.us.i.6, align 4
+ %tmp69 = load i32, i32* %arrayidx8.us.i.6, align 4
%arrayidx12.us.i61.6 = getelementptr inbounds i32, i32* %pre99, i64 %indvars.iv42.i
- %tmp70 = load i32* %arrayidx12.us.i61.6, align 4
+ %tmp70 = load i32, i32* %arrayidx12.us.i61.6, align 4
%mul.us.i = mul nsw i32 %tmp58, %tmp57
%arrayidx8.us.i.7 = getelementptr inbounds i32, i32* %tmp56, i64 7
- %tmp71 = load i32* %arrayidx8.us.i.7, align 4
+ %tmp71 = load i32, i32* %arrayidx8.us.i.7, align 4
%arrayidx12.us.i61.7 = getelementptr inbounds i32, i32* %pre100, i64 %indvars.iv42.i
- %tmp72 = load i32* %arrayidx12.us.i61.7, align 4
+ %tmp72 = load i32, i32* %arrayidx12.us.i61.7, align 4
%arrayidx8.us.i.8 = getelementptr inbounds i32, i32* %tmp56, i64 8
- %tmp73 = load i32* %arrayidx8.us.i.8, align 4
+ %tmp73 = load i32, i32* %arrayidx8.us.i.8, align 4
%arrayidx12.us.i61.8 = getelementptr inbounds i32, i32* %pre101, i64 %indvars.iv42.i
- %tmp74 = load i32* %arrayidx12.us.i61.8, align 4
+ %tmp74 = load i32, i32* %arrayidx12.us.i61.8, align 4
%arrayidx8.us.i.9 = getelementptr inbounds i32, i32* %tmp56, i64 9
- %tmp75 = load i32* %arrayidx8.us.i.9, align 4
+ %tmp75 = load i32, i32* %arrayidx8.us.i.9, align 4
%arrayidx12.us.i61.9 = getelementptr inbounds i32, i32* %pre102, i64 %indvars.iv42.i
- %tmp76 = load i32* %arrayidx12.us.i61.9, align 4
+ %tmp76 = load i32, i32* %arrayidx12.us.i61.9, align 4
%mul.us.i.1 = mul nsw i32 %tmp60, %tmp59
%add.us.i.1 = add nsw i32 %mul.us.i.1, %mul.us.i
%mul.us.i.2 = mul nsw i32 %tmp62, %tmp61
@@ -243,20 +243,20 @@ end:
@d = external global i32, align 4
define i32 @encpc1() nounwind {
entry:
- %l1 = load i32* @a, align 16
+ %l1 = load i32, i32* @a, align 16
%conv = shl i32 %l1, 8
%s5 = lshr i32 %l1, 8
%add = or i32 %conv, %s5
store i32 %add, i32* @b
- %l6 = load i32* @a
- %l7 = load i32* @c
+ %l6 = load i32, i32* @a
+ %l7 = load i32, i32* @c
%add.i = add i32 %l7, %l6
%idxprom.i = zext i32 %l7 to i64
%arrayidx.i = getelementptr inbounds i32, i32* @d, i64 %idxprom.i
- %l8 = load i32* %arrayidx.i
+ %l8 = load i32, i32* %arrayidx.i
store i32 346, i32* @c
store i32 20021, i32* @d
- %l9 = load i32* @a
+ %l9 = load i32, i32* @a
store i32 %l8, i32* @a
store i32 %l9, i32* @b
store i32 %add.i, i32* @c
diff --git a/llvm/test/CodeGen/X86/misched-code-difference-with-debug.ll b/llvm/test/CodeGen/X86/misched-code-difference-with-debug.ll
index fb2a986e561..0fc7d002236 100644
--- a/llvm/test/CodeGen/X86/misched-code-difference-with-debug.ll
+++ b/llvm/test/CodeGen/X86/misched-code-difference-with-debug.ll
@@ -32,10 +32,10 @@ declare i32 @test_function(%class.C*, i8 signext, i8 signext, i8 signext, ...)
define void @test_without_debug() {
entry:
%c = alloca %class.C, align 1
- %0 = load i8* @argc, align 1
+ %0 = load i8, i8* @argc, align 1
%conv = sext i8 %0 to i32
%call = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %0, i8 signext 0, i32 %conv)
- %1 = load i8* @argc, align 1
+ %1 = load i8, i8* @argc, align 1
%call2 = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %1, i8 signext 0, i32 %conv)
ret void
}
@@ -46,12 +46,12 @@ entry:
define void @test_with_debug() {
entry:
%c = alloca %class.C, align 1
- %0 = load i8* @argc, align 1
+ %0 = load i8, i8* @argc, align 1
tail call void @llvm.dbg.value(metadata i8 %0, i64 0, metadata !19, metadata !29)
%conv = sext i8 %0 to i32
tail call void @llvm.dbg.value(metadata %class.C* %c, i64 0, metadata !18, metadata !29)
%call = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %0, i8 signext 0, i32 %conv)
- %1 = load i8* @argc, align 1
+ %1 = load i8, i8* @argc, align 1
call void @llvm.dbg.value(metadata %class.C* %c, i64 0, metadata !18, metadata !29)
%call2 = call i32 (%class.C*, i8, i8, i8, ...)* @test_function(%class.C* %c, i8 signext 0, i8 signext %1, i8 signext 0, i32 %conv)
ret void
diff --git a/llvm/test/CodeGen/X86/misched-crash.ll b/llvm/test/CodeGen/X86/misched-crash.ll
index 7ebfee85274..fa7de1a843e 100644
--- a/llvm/test/CodeGen/X86/misched-crash.ll
+++ b/llvm/test/CodeGen/X86/misched-crash.ll
@@ -9,7 +9,7 @@ entry:
%cmp = icmp ult i64 %_x1, %_x2
%cond = select i1 %cmp, i64 %_x1, i64 %_x2
%cond10 = select i1 %cmp, i64 %_x2, i64 %_x1
- %0 = load i64* null, align 8
+ %0 = load i64, i64* null, align 8
%cmp16 = icmp ult i64 %cond, %0
%cmp23 = icmp ugt i64 %cond10, 0
br i1 %cmp16, label %land.lhs.true21, label %return
@@ -27,7 +27,7 @@ if.then24: ; preds = %land.lhs.true21
for.body34.i: ; preds = %for.inc39.i, %if.then24
%index.178.i = phi i64 [ %add21.i, %if.then24 ], [ %inc41.i, %for.inc39.i ]
%arrayidx35.i = getelementptr inbounds i8, i8* %plane, i64 %index.178.i
- %1 = load i8* %arrayidx35.i, align 1
+ %1 = load i8, i8* %arrayidx35.i, align 1
%tobool36.i = icmp eq i8 %1, 0
br i1 %tobool36.i, label %for.inc39.i, label %return
diff --git a/llvm/test/CodeGen/X86/misched-fusion.ll b/llvm/test/CodeGen/X86/misched-fusion.ll
index 1f2ad3f3394..0975faacb9e 100644
--- a/llvm/test/CodeGen/X86/misched-fusion.ll
+++ b/llvm/test/CodeGen/X86/misched-fusion.ll
@@ -16,7 +16,7 @@ loop:
loop1:
%cond = icmp eq i32* %var, null
- %next.load = load i32** %next.ptr
+ %next.load = load i32*, i32** %next.ptr
br i1 %cond, label %loop, label %loop2
loop2: ; preds = %loop1
@@ -42,8 +42,8 @@ loop:
loop1:
%var2 = sub i32 %var, 1
%cond = icmp eq i32 %var2, 0
- %next.load = load i32** %next.ptr
- %next.var = load i32* %next.load
+ %next.load = load i32*, i32** %next.ptr
+ %next.var = load i32, i32* %next.load
br i1 %cond, label %loop, label %loop2
loop2:
@@ -70,8 +70,8 @@ loop2a: ; preds = %loop1, %body, %entr
loop1: ; preds = %loop2a, %loop2b
%var2 = sub i32 %var, 1
%cond = icmp slt i32 %var2, 0
- %next.load = load i32** %next.ptr
- %next.var = load i32* %next.load
+ %next.load = load i32*, i32** %next.ptr
+ %next.var = load i32, i32* %next.load
br i1 %cond, label %loop2a, label %loop2b
loop2b: ; preds = %loop1
@@ -97,8 +97,8 @@ loop2a: ; preds = %loop1, %body, %entr
loop1: ; preds = %loop2a, %loop2b
%var2 = sub i32 %var, 1
%cond = icmp ult i32 %var2, %n
- %next.load = load i32** %next.ptr
- %next.var = load i32* %next.load
+ %next.load = load i32*, i32** %next.ptr
+ %next.var = load i32, i32* %next.load
br i1 %cond, label %loop2a, label %loop2b
loop2b: ; preds = %loop1
diff --git a/llvm/test/CodeGen/X86/misched-matmul.ll b/llvm/test/CodeGen/X86/misched-matmul.ll
index 01720f2e45b..384344691f9 100644
--- a/llvm/test/CodeGen/X86/misched-matmul.ll
+++ b/llvm/test/CodeGen/X86/misched-matmul.ll
@@ -15,86 +15,86 @@
define void @wrap_mul4(double* nocapture %Out, [4 x double]* nocapture %A, [4 x double]* nocapture %B) #0 {
entry:
%arrayidx1.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 0
- %0 = load double* %arrayidx1.i, align 8
+ %0 = load double, double* %arrayidx1.i, align 8
%arrayidx3.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 0
- %1 = load double* %arrayidx3.i, align 8
+ %1 = load double, double* %arrayidx3.i, align 8
%mul.i = fmul double %0, %1
%arrayidx5.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 1
- %2 = load double* %arrayidx5.i, align 8
+ %2 = load double, double* %arrayidx5.i, align 8
%arrayidx7.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 0
- %3 = load double* %arrayidx7.i, align 8
+ %3 = load double, double* %arrayidx7.i, align 8
%mul8.i = fmul double %2, %3
%add.i = fadd double %mul.i, %mul8.i
%arrayidx10.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 2
- %4 = load double* %arrayidx10.i, align 8
+ %4 = load double, double* %arrayidx10.i, align 8
%arrayidx12.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 0
- %5 = load double* %arrayidx12.i, align 8
+ %5 = load double, double* %arrayidx12.i, align 8
%mul13.i = fmul double %4, %5
%add14.i = fadd double %add.i, %mul13.i
%arrayidx16.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 3
- %6 = load double* %arrayidx16.i, align 8
+ %6 = load double, double* %arrayidx16.i, align 8
%arrayidx18.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 0
- %7 = load double* %arrayidx18.i, align 8
+ %7 = load double, double* %arrayidx18.i, align 8
%mul19.i = fmul double %6, %7
%add20.i = fadd double %add14.i, %mul19.i
%arrayidx25.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 1
- %8 = load double* %arrayidx25.i, align 8
+ %8 = load double, double* %arrayidx25.i, align 8
%mul26.i = fmul double %0, %8
%arrayidx30.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 1
- %9 = load double* %arrayidx30.i, align 8
+ %9 = load double, double* %arrayidx30.i, align 8
%mul31.i = fmul double %2, %9
%add32.i = fadd double %mul26.i, %mul31.i
%arrayidx36.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 1
- %10 = load double* %arrayidx36.i, align 8
+ %10 = load double, double* %arrayidx36.i, align 8
%mul37.i = fmul double %4, %10
%add38.i = fadd double %add32.i, %mul37.i
%arrayidx42.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 1
- %11 = load double* %arrayidx42.i, align 8
+ %11 = load double, double* %arrayidx42.i, align 8
%mul43.i = fmul double %6, %11
%add44.i = fadd double %add38.i, %mul43.i
%arrayidx49.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 2
- %12 = load double* %arrayidx49.i, align 8
+ %12 = load double, double* %arrayidx49.i, align 8
%mul50.i = fmul double %0, %12
%arrayidx54.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 2
- %13 = load double* %arrayidx54.i, align 8
+ %13 = load double, double* %arrayidx54.i, align 8
%mul55.i = fmul double %2, %13
%add56.i = fadd double %mul50.i, %mul55.i
%arrayidx60.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 2
- %14 = load double* %arrayidx60.i, align 8
+ %14 = load double, double* %arrayidx60.i, align 8
%mul61.i = fmul double %4, %14
%add62.i = fadd double %add56.i, %mul61.i
%arrayidx66.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 2
- %15 = load double* %arrayidx66.i, align 8
+ %15 = load double, double* %arrayidx66.i, align 8
%mul67.i = fmul double %6, %15
%add68.i = fadd double %add62.i, %mul67.i
%arrayidx73.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 3
- %16 = load double* %arrayidx73.i, align 8
+ %16 = load double, double* %arrayidx73.i, align 8
%mul74.i = fmul double %0, %16
%arrayidx78.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 3
- %17 = load double* %arrayidx78.i, align 8
+ %17 = load double, double* %arrayidx78.i, align 8
%mul79.i = fmul double %2, %17
%add80.i = fadd double %mul74.i, %mul79.i
%arrayidx84.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 3
- %18 = load double* %arrayidx84.i, align 8
+ %18 = load double, double* %arrayidx84.i, align 8
%mul85.i = fmul double %4, %18
%add86.i = fadd double %add80.i, %mul85.i
%arrayidx90.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 3
- %19 = load double* %arrayidx90.i, align 8
+ %19 = load double, double* %arrayidx90.i, align 8
%mul91.i = fmul double %6, %19
%add92.i = fadd double %add86.i, %mul91.i
%arrayidx95.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 0
- %20 = load double* %arrayidx95.i, align 8
+ %20 = load double, double* %arrayidx95.i, align 8
%mul98.i = fmul double %1, %20
%arrayidx100.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 1
- %21 = load double* %arrayidx100.i, align 8
+ %21 = load double, double* %arrayidx100.i, align 8
%mul103.i = fmul double %3, %21
%add104.i = fadd double %mul98.i, %mul103.i
%arrayidx106.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 2
- %22 = load double* %arrayidx106.i, align 8
+ %22 = load double, double* %arrayidx106.i, align 8
%mul109.i = fmul double %5, %22
%add110.i = fadd double %add104.i, %mul109.i
%arrayidx112.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 3
- %23 = load double* %arrayidx112.i, align 8
+ %23 = load double, double* %arrayidx112.i, align 8
%mul115.i = fmul double %7, %23
%add116.i = fadd double %add110.i, %mul115.i
%mul122.i = fmul double %8, %20
@@ -119,18 +119,18 @@ entry:
%mul187.i = fmul double %19, %23
%add188.i = fadd double %add182.i, %mul187.i
%arrayidx191.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 0
- %24 = load double* %arrayidx191.i, align 8
+ %24 = load double, double* %arrayidx191.i, align 8
%mul194.i = fmul double %1, %24
%arrayidx196.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 1
- %25 = load double* %arrayidx196.i, align 8
+ %25 = load double, double* %arrayidx196.i, align 8
%mul199.i = fmul double %3, %25
%add200.i = fadd double %mul194.i, %mul199.i
%arrayidx202.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 2
- %26 = load double* %arrayidx202.i, align 8
+ %26 = load double, double* %arrayidx202.i, align 8
%mul205.i = fmul double %5, %26
%add206.i = fadd double %add200.i, %mul205.i
%arrayidx208.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 3
- %27 = load double* %arrayidx208.i, align 8
+ %27 = load double, double* %arrayidx208.i, align 8
%mul211.i = fmul double %7, %27
%add212.i = fadd double %add206.i, %mul211.i
%mul218.i = fmul double %8, %24
@@ -155,18 +155,18 @@ entry:
%mul283.i = fmul double %19, %27
%add284.i = fadd double %add278.i, %mul283.i
%arrayidx287.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 0
- %28 = load double* %arrayidx287.i, align 8
+ %28 = load double, double* %arrayidx287.i, align 8
%mul290.i = fmul double %1, %28
%arrayidx292.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 1
- %29 = load double* %arrayidx292.i, align 8
+ %29 = load double, double* %arrayidx292.i, align 8
%mul295.i = fmul double %3, %29
%add296.i = fadd double %mul290.i, %mul295.i
%arrayidx298.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 2
- %30 = load double* %arrayidx298.i, align 8
+ %30 = load double, double* %arrayidx298.i, align 8
%mul301.i = fmul double %5, %30
%add302.i = fadd double %add296.i, %mul301.i
%arrayidx304.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 3
- %31 = load double* %arrayidx304.i, align 8
+ %31 = load double, double* %arrayidx304.i, align 8
%mul307.i = fmul double %7, %31
%add308.i = fadd double %add302.i, %mul307.i
%mul314.i = fmul double %8, %28
diff --git a/llvm/test/CodeGen/X86/misched-matrix.ll b/llvm/test/CodeGen/X86/misched-matrix.ll
index ea632ba3da5..e62a1d04dad 100644
--- a/llvm/test/CodeGen/X86/misched-matrix.ll
+++ b/llvm/test/CodeGen/X86/misched-matrix.ll
@@ -94,57 +94,57 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx8 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 0
- %tmp = load i32* %arrayidx8, align 4
+ %tmp = load i32, i32* %arrayidx8, align 4
%arrayidx12 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 0
- %tmp1 = load i32* %arrayidx12, align 4
+ %tmp1 = load i32, i32* %arrayidx12, align 4
%arrayidx8.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 1
- %tmp2 = load i32* %arrayidx8.1, align 4
+ %tmp2 = load i32, i32* %arrayidx8.1, align 4
%arrayidx12.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 0
- %tmp3 = load i32* %arrayidx12.1, align 4
+ %tmp3 = load i32, i32* %arrayidx12.1, align 4
%arrayidx8.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 2
- %tmp4 = load i32* %arrayidx8.2, align 4
+ %tmp4 = load i32, i32* %arrayidx8.2, align 4
%arrayidx12.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 0
- %tmp5 = load i32* %arrayidx12.2, align 4
+ %tmp5 = load i32, i32* %arrayidx12.2, align 4
%arrayidx8.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 3
- %tmp6 = load i32* %arrayidx8.3, align 4
+ %tmp6 = load i32, i32* %arrayidx8.3, align 4
%arrayidx12.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 0
- %tmp8 = load i32* %arrayidx8, align 4
+ %tmp8 = load i32, i32* %arrayidx8, align 4
%arrayidx12.137 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 1
- %tmp9 = load i32* %arrayidx12.137, align 4
- %tmp10 = load i32* %arrayidx8.1, align 4
+ %tmp9 = load i32, i32* %arrayidx12.137, align 4
+ %tmp10 = load i32, i32* %arrayidx8.1, align 4
%arrayidx12.1.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 1
- %tmp11 = load i32* %arrayidx12.1.1, align 4
- %tmp12 = load i32* %arrayidx8.2, align 4
+ %tmp11 = load i32, i32* %arrayidx12.1.1, align 4
+ %tmp12 = load i32, i32* %arrayidx8.2, align 4
%arrayidx12.2.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 1
- %tmp13 = load i32* %arrayidx12.2.1, align 4
- %tmp14 = load i32* %arrayidx8.3, align 4
+ %tmp13 = load i32, i32* %arrayidx12.2.1, align 4
+ %tmp14 = load i32, i32* %arrayidx8.3, align 4
%arrayidx12.3.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 1
- %tmp15 = load i32* %arrayidx12.3.1, align 4
- %tmp16 = load i32* %arrayidx8, align 4
+ %tmp15 = load i32, i32* %arrayidx12.3.1, align 4
+ %tmp16 = load i32, i32* %arrayidx8, align 4
%arrayidx12.239 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 2
- %tmp17 = load i32* %arrayidx12.239, align 4
- %tmp18 = load i32* %arrayidx8.1, align 4
+ %tmp17 = load i32, i32* %arrayidx12.239, align 4
+ %tmp18 = load i32, i32* %arrayidx8.1, align 4
%arrayidx12.1.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 2
- %tmp19 = load i32* %arrayidx12.1.2, align 4
- %tmp20 = load i32* %arrayidx8.2, align 4
+ %tmp19 = load i32, i32* %arrayidx12.1.2, align 4
+ %tmp20 = load i32, i32* %arrayidx8.2, align 4
%arrayidx12.2.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 2
- %tmp21 = load i32* %arrayidx12.2.2, align 4
- %tmp22 = load i32* %arrayidx8.3, align 4
+ %tmp21 = load i32, i32* %arrayidx12.2.2, align 4
+ %tmp22 = load i32, i32* %arrayidx8.3, align 4
%arrayidx12.3.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 2
- %tmp23 = load i32* %arrayidx12.3.2, align 4
- %tmp24 = load i32* %arrayidx8, align 4
+ %tmp23 = load i32, i32* %arrayidx12.3.2, align 4
+ %tmp24 = load i32, i32* %arrayidx8, align 4
%arrayidx12.341 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 3
- %tmp25 = load i32* %arrayidx12.341, align 4
- %tmp26 = load i32* %arrayidx8.1, align 4
+ %tmp25 = load i32, i32* %arrayidx12.341, align 4
+ %tmp26 = load i32, i32* %arrayidx8.1, align 4
%arrayidx12.1.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 3
- %tmp27 = load i32* %arrayidx12.1.3, align 4
- %tmp28 = load i32* %arrayidx8.2, align 4
+ %tmp27 = load i32, i32* %arrayidx12.1.3, align 4
+ %tmp28 = load i32, i32* %arrayidx8.2, align 4
%arrayidx12.2.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 3
- %tmp29 = load i32* %arrayidx12.2.3, align 4
- %tmp30 = load i32* %arrayidx8.3, align 4
+ %tmp29 = load i32, i32* %arrayidx12.2.3, align 4
+ %tmp30 = load i32, i32* %arrayidx8.3, align 4
%arrayidx12.3.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 3
- %tmp31 = load i32* %arrayidx12.3.3, align 4
- %tmp7 = load i32* %arrayidx12.3, align 4
+ %tmp31 = load i32, i32* %arrayidx12.3.3, align 4
+ %tmp7 = load i32, i32* %arrayidx12.3, align 4
%mul = mul nsw i32 %tmp1, %tmp
%mul.1 = mul nsw i32 %tmp3, %tmp2
%mul.2 = mul nsw i32 %tmp5, %tmp4
diff --git a/llvm/test/CodeGen/X86/misched-new.ll b/llvm/test/CodeGen/X86/misched-new.ll
index 89e45b7cfc2..410a7f32064 100644
--- a/llvm/test/CodeGen/X86/misched-new.ll
+++ b/llvm/test/CodeGen/X86/misched-new.ll
@@ -90,12 +90,12 @@ define void @hasundef() unnamed_addr uwtable ssp align 2 {
; TOPDOWN: movzbl %al
; TOPDOWN: ret
define void @testSubregTracking() nounwind uwtable ssp align 2 {
- %tmp = load i8* undef, align 1
+ %tmp = load i8, i8* undef, align 1
%tmp6 = sub i8 0, %tmp
- %tmp7 = load i8* undef, align 1
+ %tmp7 = load i8, i8* undef, align 1
%tmp8 = udiv i8 %tmp6, %tmp7
%tmp9 = zext i8 %tmp8 to i64
- %tmp10 = load i8* undef, align 1
+ %tmp10 = load i8, i8* undef, align 1
%tmp11 = zext i8 %tmp10 to i64
%tmp12 = mul i64 %tmp11, %tmp9
%tmp13 = urem i8 %tmp6, %tmp7
diff --git a/llvm/test/CodeGen/X86/mmx-arg-passing-x86-64.ll b/llvm/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
index c536a39f7e8..89eb33e741a 100644
--- a/llvm/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
+++ b/llvm/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
@@ -12,7 +12,7 @@ define void @t3() nounwind {
; X86-64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-64-NEXT: movb $1, %al
; X86-64-NEXT: jmp _pass_v8qi ## TAILCALL
- %tmp3 = load <8 x i8>* @g_v8qi, align 8
+ %tmp3 = load <8 x i8>, <8 x i8>* @g_v8qi, align 8
%tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx
%tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
ret void
diff --git a/llvm/test/CodeGen/X86/mmx-arith.ll b/llvm/test/CodeGen/X86/mmx-arith.ll
index 0e46a297a7f..114d2535d60 100644
--- a/llvm/test/CodeGen/X86/mmx-arith.ll
+++ b/llvm/test/CodeGen/X86/mmx-arith.ll
@@ -8,48 +8,48 @@
; X64-LABEL: test0
define void @test0(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A
- %tmp3 = load x86_mmx* %B
+ %tmp1 = load x86_mmx, x86_mmx* %A
+ %tmp3 = load x86_mmx, x86_mmx* %B
%tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
%tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
%tmp4 = add <8 x i8> %tmp1a, %tmp3a
%tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
store x86_mmx %tmp4a, x86_mmx* %A
- %tmp7 = load x86_mmx* %B
+ %tmp7 = load x86_mmx, x86_mmx* %B
%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b(x86_mmx %tmp4a, x86_mmx %tmp7)
store x86_mmx %tmp12, x86_mmx* %A
- %tmp16 = load x86_mmx* %B
+ %tmp16 = load x86_mmx, x86_mmx* %B
%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %tmp12, x86_mmx %tmp16)
store x86_mmx %tmp21, x86_mmx* %A
- %tmp27 = load x86_mmx* %B
+ %tmp27 = load x86_mmx, x86_mmx* %B
%tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
%tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
%tmp28 = sub <8 x i8> %tmp21a, %tmp27a
%tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
store x86_mmx %tmp28a, x86_mmx* %A
- %tmp31 = load x86_mmx* %B
+ %tmp31 = load x86_mmx, x86_mmx* %B
%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx %tmp28a, x86_mmx %tmp31)
store x86_mmx %tmp36, x86_mmx* %A
- %tmp40 = load x86_mmx* %B
+ %tmp40 = load x86_mmx, x86_mmx* %B
%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx %tmp36, x86_mmx %tmp40)
store x86_mmx %tmp45, x86_mmx* %A
- %tmp51 = load x86_mmx* %B
+ %tmp51 = load x86_mmx, x86_mmx* %B
%tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
%tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
%tmp52 = mul <8 x i8> %tmp45a, %tmp51a
%tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
store x86_mmx %tmp52a, x86_mmx* %A
- %tmp57 = load x86_mmx* %B
+ %tmp57 = load x86_mmx, x86_mmx* %B
%tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
%tmp58 = and <8 x i8> %tmp52, %tmp57a
%tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
store x86_mmx %tmp58a, x86_mmx* %A
- %tmp63 = load x86_mmx* %B
+ %tmp63 = load x86_mmx, x86_mmx* %B
%tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
%tmp64 = or <8 x i8> %tmp58, %tmp63a
%tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
store x86_mmx %tmp64a, x86_mmx* %A
- %tmp69 = load x86_mmx* %B
+ %tmp69 = load x86_mmx, x86_mmx* %B
%tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
%tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
%tmp70 = xor <8 x i8> %tmp64b, %tmp69a
@@ -63,37 +63,37 @@ entry:
; X64-LABEL: test1
define void @test1(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A
- %tmp3 = load x86_mmx* %B
+ %tmp1 = load x86_mmx, x86_mmx* %A
+ %tmp3 = load x86_mmx, x86_mmx* %B
%tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
%tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
%tmp4 = add <2 x i32> %tmp1a, %tmp3a
%tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
store x86_mmx %tmp4a, x86_mmx* %A
- %tmp9 = load x86_mmx* %B
+ %tmp9 = load x86_mmx, x86_mmx* %B
%tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
%tmp10 = sub <2 x i32> %tmp4, %tmp9a
%tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
store x86_mmx %tmp10a, x86_mmx* %A
- %tmp15 = load x86_mmx* %B
+ %tmp15 = load x86_mmx, x86_mmx* %B
%tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
%tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
%tmp16 = mul <2 x i32> %tmp10b, %tmp15a
%tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
store x86_mmx %tmp16a, x86_mmx* %A
- %tmp21 = load x86_mmx* %B
+ %tmp21 = load x86_mmx, x86_mmx* %B
%tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
%tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
%tmp22 = and <2 x i32> %tmp16b, %tmp21a
%tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
store x86_mmx %tmp22a, x86_mmx* %A
- %tmp27 = load x86_mmx* %B
+ %tmp27 = load x86_mmx, x86_mmx* %B
%tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
%tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
%tmp28 = or <2 x i32> %tmp22b, %tmp27a
%tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
store x86_mmx %tmp28a, x86_mmx* %A
- %tmp33 = load x86_mmx* %B
+ %tmp33 = load x86_mmx, x86_mmx* %B
%tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
%tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
%tmp34 = xor <2 x i32> %tmp28b, %tmp33a
@@ -107,57 +107,57 @@ entry:
; X64-LABEL: test2
define void @test2(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load x86_mmx* %A
- %tmp3 = load x86_mmx* %B
+ %tmp1 = load x86_mmx, x86_mmx* %A
+ %tmp3 = load x86_mmx, x86_mmx* %B
%tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
%tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
%tmp4 = add <4 x i16> %tmp1a, %tmp3a
%tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
store x86_mmx %tmp4a, x86_mmx* %A
- %tmp7 = load x86_mmx* %B
+ %tmp7 = load x86_mmx, x86_mmx* %B
%tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w(x86_mmx %tmp4a, x86_mmx %tmp7)
store x86_mmx %tmp12, x86_mmx* %A
- %tmp16 = load x86_mmx* %B
+ %tmp16 = load x86_mmx, x86_mmx* %B
%tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %tmp12, x86_mmx %tmp16)
store x86_mmx %tmp21, x86_mmx* %A
- %tmp27 = load x86_mmx* %B
+ %tmp27 = load x86_mmx, x86_mmx* %B
%tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
%tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
%tmp28 = sub <4 x i16> %tmp21a, %tmp27a
%tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
store x86_mmx %tmp28a, x86_mmx* %A
- %tmp31 = load x86_mmx* %B
+ %tmp31 = load x86_mmx, x86_mmx* %B
%tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx %tmp28a, x86_mmx %tmp31)
store x86_mmx %tmp36, x86_mmx* %A
- %tmp40 = load x86_mmx* %B
+ %tmp40 = load x86_mmx, x86_mmx* %B
%tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx %tmp36, x86_mmx %tmp40)
store x86_mmx %tmp45, x86_mmx* %A
- %tmp51 = load x86_mmx* %B
+ %tmp51 = load x86_mmx, x86_mmx* %B
%tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
%tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
%tmp52 = mul <4 x i16> %tmp45a, %tmp51a
%tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
store x86_mmx %tmp52a, x86_mmx* %A
- %tmp55 = load x86_mmx* %B
+ %tmp55 = load x86_mmx, x86_mmx* %B
%tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx %tmp52a, x86_mmx %tmp55)
store x86_mmx %tmp60, x86_mmx* %A
- %tmp64 = load x86_mmx* %B
+ %tmp64 = load x86_mmx, x86_mmx* %B
%tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx %tmp60, x86_mmx %tmp64)
%tmp70 = bitcast x86_mmx %tmp69 to x86_mmx
store x86_mmx %tmp70, x86_mmx* %A
- %tmp75 = load x86_mmx* %B
+ %tmp75 = load x86_mmx, x86_mmx* %B
%tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
%tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
%tmp76 = and <4 x i16> %tmp70a, %tmp75a
%tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
store x86_mmx %tmp76a, x86_mmx* %A
- %tmp81 = load x86_mmx* %B
+ %tmp81 = load x86_mmx, x86_mmx* %B
%tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
%tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
%tmp82 = or <4 x i16> %tmp76b, %tmp81a
%tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
store x86_mmx %tmp82a, x86_mmx* %A
- %tmp87 = load x86_mmx* %B
+ %tmp87 = load x86_mmx, x86_mmx* %B
%tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
%tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
%tmp88 = xor <4 x i16> %tmp82b, %tmp87a
@@ -179,9 +179,9 @@ bb26:
%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]
%sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
%tmp13 = getelementptr <1 x i64>, <1 x i64>* %b, i32 %i.037.0
- %tmp14 = load <1 x i64>* %tmp13
+ %tmp14 = load <1 x i64>, <1 x i64>* %tmp13
%tmp18 = getelementptr <1 x i64>, <1 x i64>* %a, i32 %i.037.0
- %tmp19 = load <1 x i64>* %tmp18
+ %tmp19 = load <1 x i64>, <1 x i64>* %tmp18
%tmp21 = add <1 x i64> %tmp19, %tmp14
%tmp22 = add <1 x i64> %tmp21, %sum.035.0
%tmp25 = add i32 %i.037.0, 1
diff --git a/llvm/test/CodeGen/X86/mmx-bitcast.ll b/llvm/test/CodeGen/X86/mmx-bitcast.ll
index a2eb96a3429..4aa10a98d45 100644
--- a/llvm/test/CodeGen/X86/mmx-bitcast.ll
+++ b/llvm/test/CodeGen/X86/mmx-bitcast.ll
@@ -7,7 +7,7 @@ define i64 @t0(x86_mmx* %p) {
; CHECK-NEXT: paddq %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
; CHECK-NEXT: retq
- %t = load x86_mmx* %p
+ %t = load x86_mmx, x86_mmx* %p
%u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t)
%s = bitcast x86_mmx %u to i64
ret i64 %s
@@ -20,7 +20,7 @@ define i64 @t1(x86_mmx* %p) {
; CHECK-NEXT: paddd %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
; CHECK-NEXT: retq
- %t = load x86_mmx* %p
+ %t = load x86_mmx, x86_mmx* %p
%u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t)
%s = bitcast x86_mmx %u to i64
ret i64 %s
@@ -33,7 +33,7 @@ define i64 @t2(x86_mmx* %p) {
; CHECK-NEXT: paddw %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
; CHECK-NEXT: retq
- %t = load x86_mmx* %p
+ %t = load x86_mmx, x86_mmx* %p
%u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %t)
%s = bitcast x86_mmx %u to i64
ret i64 %s
@@ -46,7 +46,7 @@ define i64 @t3(x86_mmx* %p) {
; CHECK-NEXT: paddb %mm0, %mm0
; CHECK-NEXT: movd %mm0, %rax
; CHECK-NEXT: retq
- %t = load x86_mmx* %p
+ %t = load x86_mmx, x86_mmx* %p
%u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %t)
%s = bitcast x86_mmx %u to i64
ret i64 %s
diff --git a/llvm/test/CodeGen/X86/mmx-copy-gprs.ll b/llvm/test/CodeGen/X86/mmx-copy-gprs.ll
index 377875565bf..6d39713833e 100644
--- a/llvm/test/CodeGen/X86/mmx-copy-gprs.ll
+++ b/llvm/test/CodeGen/X86/mmx-copy-gprs.ll
@@ -11,7 +11,7 @@
define void @foo(<1 x i64>* %x, <1 x i64>* %y) nounwind {
entry:
- %tmp1 = load <1 x i64>* %y, align 8 ; <<1 x i64>> [#uses=1]
+ %tmp1 = load <1 x i64>, <1 x i64>* %y, align 8 ; <<1 x i64>> [#uses=1]
store <1 x i64> %tmp1, <1 x i64>* %x, align 8
ret void
}
diff --git a/llvm/test/CodeGen/X86/mmx-fold-load.ll b/llvm/test/CodeGen/X86/mmx-fold-load.ll
index d49edac8c5f..2b9d30f59fd 100644
--- a/llvm/test/CodeGen/X86/mmx-fold-load.ll
+++ b/llvm/test/CodeGen/X86/mmx-fold-load.ll
@@ -9,8 +9,8 @@ define i64 @t0(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -26,8 +26,8 @@ define i64 @t1(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -43,8 +43,8 @@ define i64 @t2(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -60,8 +60,8 @@ define i64 @t3(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -77,8 +77,8 @@ define i64 @t4(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -94,8 +94,8 @@ define i64 @t5(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -111,8 +111,8 @@ define i64 @t6(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -128,8 +128,8 @@ define i64 @t7(<1 x i64>* %a, i32* %b) {
; CHECK-NEXT: retq
entry:
%0 = bitcast <1 x i64>* %a to x86_mmx*
- %1 = load x86_mmx* %0, align 8
- %2 = load i32* %b, align 4
+ %1 = load x86_mmx, x86_mmx* %0, align 8
+ %2 = load i32, i32* %b, align 4
%3 = tail call x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx %1, i32 %2)
%4 = bitcast x86_mmx %3 to i64
ret i64 %4
@@ -144,7 +144,7 @@ define i64 @tt0(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -161,7 +161,7 @@ define i64 @tt1(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -177,7 +177,7 @@ define i64 @tt2(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -193,7 +193,7 @@ define i64 @tt3(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -209,7 +209,7 @@ define i64 @tt4(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -225,7 +225,7 @@ define i64 @tt5(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -241,7 +241,7 @@ define i64 @tt6(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -257,7 +257,7 @@ define i64 @tt7(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
@@ -273,7 +273,7 @@ define i64 @tt8(x86_mmx %t, x86_mmx* %q) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %v = load x86_mmx* %q
+ %v = load x86_mmx, x86_mmx* %q
%u = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %t, x86_mmx %v)
%s = bitcast x86_mmx %u to i64
call void @llvm.x86.mmx.emms()
diff --git a/llvm/test/CodeGen/X86/movbe.ll b/llvm/test/CodeGen/X86/movbe.ll
index e248410b202..49e765de2e7 100644
--- a/llvm/test/CodeGen/X86/movbe.ll
+++ b/llvm/test/CodeGen/X86/movbe.ll
@@ -16,7 +16,7 @@ define void @test1(i16* nocapture %x, i16 %y) nounwind {
}
define i16 @test2(i16* %x) nounwind {
- %load = load i16* %x, align 2
+ %load = load i16, i16* %x, align 2
%bswap = call i16 @llvm.bswap.i16(i16 %load)
ret i16 %bswap
; CHECK-LABEL: test2:
@@ -36,7 +36,7 @@ define void @test3(i32* nocapture %x, i32 %y) nounwind {
}
define i32 @test4(i32* %x) nounwind {
- %load = load i32* %x, align 4
+ %load = load i32, i32* %x, align 4
%bswap = call i32 @llvm.bswap.i32(i32 %load)
ret i32 %bswap
; CHECK-LABEL: test4:
@@ -56,7 +56,7 @@ define void @test5(i64* %x, i64 %y) nounwind {
}
define i64 @test6(i64* %x) nounwind {
- %load = load i64* %x, align 8
+ %load = load i64, i64* %x, align 8
%bswap = call i64 @llvm.bswap.i64(i64 %load)
ret i64 %bswap
; CHECK-LABEL: test6:
diff --git a/llvm/test/CodeGen/X86/movfs.ll b/llvm/test/CodeGen/X86/movfs.ll
index 823e98689e7..a337927b713 100644
--- a/llvm/test/CodeGen/X86/movfs.ll
+++ b/llvm/test/CodeGen/X86/movfs.ll
@@ -2,7 +2,7 @@
define i32 @foo() nounwind readonly {
entry:
- %tmp = load i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
+ %tmp = load i32*, i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
+ %tmp1 = load i32, i32* %tmp ; <i32> [#uses=1]
ret i32 %tmp1
}
diff --git a/llvm/test/CodeGen/X86/movgs.ll b/llvm/test/CodeGen/X86/movgs.ll
index 96c5dbb8ea9..da4c9b7f1ea 100644
--- a/llvm/test/CodeGen/X86/movgs.ll
+++ b/llvm/test/CodeGen/X86/movgs.ll
@@ -15,8 +15,8 @@ define i32 @test1() nounwind readonly {
; X64-NEXT: movl (%rax), %eax
; X64-NEXT: retq
entry:
- %tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
+ %tmp = load i32*, i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
+ %tmp1 = load i32, i32* %tmp ; <i32> [#uses=1]
ret i32 %tmp1
}
@@ -39,7 +39,7 @@ define i64 @test2(void (i8*)* addrspace(256)* %tmp8) nounwind {
; X64-NEXT: {{(addq.*%rsp|popq)}}
; X64-NEXT: retq
entry:
- %tmp9 = load void (i8*)* addrspace(256)* %tmp8, align 8
+ %tmp9 = load void (i8*)*, void (i8*)* addrspace(256)* %tmp8, align 8
tail call void %tmp9(i8* undef) nounwind optsize
ret i64 0
}
@@ -56,7 +56,7 @@ define <2 x i64> @pmovsxwd_1(i64 addrspace(256)* %p) nounwind readonly {
; X64-NEXT: pmovsxwd %gs:(%{{(rcx|rdi)}}), %xmm0
; X64-NEXT: retq
entry:
- %0 = load i64 addrspace(256)* %p
+ %0 = load i64, i64 addrspace(256)* %p
%tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0
%1 = bitcast <2 x i64> %tmp2 to <8 x i16>
%2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone
@@ -83,10 +83,10 @@ define i32 @test_no_cse() nounwind readonly {
; X64-NEXT: addl (%rcx), %eax
; X64-NEXT: retq
entry:
- %tmp = load i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
- %tmp1 = load i32* %tmp ; <i32> [#uses=1]
- %tmp2 = load i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2 ; <i32> [#uses=1]
+ %tmp = load i32*, i32* addrspace(256)* getelementptr (i32* addrspace(256)* inttoptr (i32 72 to i32* addrspace(256)*), i32 31) ; <i32*> [#uses=1]
+ %tmp1 = load i32, i32* %tmp ; <i32> [#uses=1]
+ %tmp2 = load i32*, i32* addrspace(257)* getelementptr (i32* addrspace(257)* inttoptr (i32 72 to i32* addrspace(257)*), i32 31) ; <i32*> [#uses=1]
+ %tmp3 = load i32, i32* %tmp2 ; <i32> [#uses=1]
%tmp4 = add i32 %tmp1, %tmp3
ret i32 %tmp4
}
diff --git a/llvm/test/CodeGen/X86/movmsk.ll b/llvm/test/CodeGen/X86/movmsk.ll
index a474d1a8179..a7ebebca4b7 100644
--- a/llvm/test/CodeGen/X86/movmsk.ll
+++ b/llvm/test/CodeGen/X86/movmsk.ll
@@ -105,7 +105,7 @@ entry:
%0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind
%idxprom = sext i32 %0 to i64
%arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
ret i32 %1
}
@@ -118,7 +118,7 @@ entry:
%1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind
%idxprom = sext i32 %1 to i64
%arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
- %2 = load i32* %arrayidx, align 4
+ %2 = load i32, i32* %arrayidx, align 4
ret i32 %2
}
diff --git a/llvm/test/CodeGen/X86/movtopush.ll b/llvm/test/CodeGen/X86/movtopush.ll
index 4a5d9037394..4278910253a 100644
--- a/llvm/test/CodeGen/X86/movtopush.ll
+++ b/llvm/test/CodeGen/X86/movtopush.ll
@@ -196,7 +196,7 @@ bb:
; NORMAL-NEXT: addl $16, %esp
define void @test7(i32* %ptr) optsize {
entry:
- %val = load i32* %ptr
+ %val = load i32, i32* %ptr
call void @good(i32 1, i32 2, i32 %val, i32 4)
ret void
}
@@ -263,7 +263,7 @@ entry:
define void @test10() optsize {
%stack_fptr = alloca void (i32, i32, i32, i32)*
store void (i32, i32, i32, i32)* @good, void (i32, i32, i32, i32)** %stack_fptr
- %good_ptr = load volatile void (i32, i32, i32, i32)** %stack_fptr
+ %good_ptr = load volatile void (i32, i32, i32, i32)*, void (i32, i32, i32, i32)** %stack_fptr
call void asm sideeffect "nop", "~{ax},~{bx},~{cx},~{dx},~{bp},~{si},~{di}"()
call void (i32, i32, i32, i32)* %good_ptr(i32 1, i32 2, i32 3, i32 4)
ret void
@@ -282,7 +282,7 @@ define void @test10() optsize {
; NORMAL-NEXT: addl $16, %esp
@the_global = external global i32
define void @test11() optsize {
- %myload = load i32* @the_global
+ %myload = load i32, i32* @the_global
store i32 42, i32* @the_global
call void @good(i32 %myload, i32 2, i32 3, i32 4)
ret void
diff --git a/llvm/test/CodeGen/X86/ms-inline-asm.ll b/llvm/test/CodeGen/X86/ms-inline-asm.ll
index 65b897cc6c8..428eb1b7190 100644
--- a/llvm/test/CodeGen/X86/ms-inline-asm.ll
+++ b/llvm/test/CodeGen/X86/ms-inline-asm.ll
@@ -50,7 +50,7 @@ entry:
store i32 2, i32* %b, align 4
call void asm sideeffect inteldialect "lea ebx, foo\0A\09mov eax, [ebx].0\0A\09mov [ebx].4, ecx", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind
%b1 = getelementptr inbounds %struct.t18_type, %struct.t18_type* %foo, i32 0, i32 1
- %0 = load i32* %b1, align 4
+ %0 = load i32, i32* %b1, align 4
ret i32 %0
; CHECK: t18
; CHECK: {{## InlineAsm Start|#APP}}
@@ -87,7 +87,7 @@ entry:
%res = alloca i32*, align 4
call void asm sideeffect inteldialect "lea edi, dword ptr $0", "*m,~{edi},~{dirflag},~{fpsr},~{flags}"([2 x i32]* @results) nounwind
call void asm sideeffect inteldialect "mov dword ptr $0, edi", "=*m,~{dirflag},~{fpsr},~{flags}"(i32** %res) nounwind
- %0 = load i32** %res, align 4
+ %0 = load i32*, i32** %res, align 4
ret i32* %0
; CHECK-LABEL: t30:
; CHECK: {{## InlineAsm Start|#APP}}
@@ -111,7 +111,7 @@ entry:
%val = alloca i32, align 64
store i32 -1, i32* %val, align 64
call void asm sideeffect inteldialect "mov dword ptr $0, esp", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %val)
- %sp = load i32* %val, align 64
+ %sp = load i32, i32* %val, align 64
ret i32 %sp
; CHECK-LABEL: t31:
; CHECK: pushl %ebp
diff --git a/llvm/test/CodeGen/X86/mul128_sext_loop.ll b/llvm/test/CodeGen/X86/mul128_sext_loop.ll
index 88c5156405a..efb7e02720b 100644
--- a/llvm/test/CodeGen/X86/mul128_sext_loop.ll
+++ b/llvm/test/CodeGen/X86/mul128_sext_loop.ll
@@ -15,7 +15,7 @@ define void @test(i64* nocapture %arr, i64 %arrsize, i64 %factor) nounwind uwtab
%carry.02 = phi i128 [ 0, %.lr.ph ], [ %10, %3 ]
%i.01 = phi i64 [ 0, %.lr.ph ], [ %11, %3 ]
%4 = getelementptr inbounds i64, i64* %arr, i64 %i.01
- %5 = load i64* %4, align 8
+ %5 = load i64, i64* %4, align 8
%6 = sext i64 %5 to i128
%7 = mul nsw i128 %6, %2
%8 = add nsw i128 %7, %carry.02
diff --git a/llvm/test/CodeGen/X86/muloti.ll b/llvm/test/CodeGen/X86/muloti.ll
index 523b9703a40..6c6198e400f 100644
--- a/llvm/test/CodeGen/X86/muloti.ll
+++ b/llvm/test/CodeGen/X86/muloti.ll
@@ -45,17 +45,17 @@ entry:
store i64 %a.coerce0, i64* %1
%2 = getelementptr %0, %0* %0, i32 0, i32 1
store i64 %a.coerce1, i64* %2
- %a = load i128* %coerce, align 16
+ %a = load i128, i128* %coerce, align 16
store i128 %a, i128* %a.addr, align 16
%3 = bitcast i128* %coerce1 to %0*
%4 = getelementptr %0, %0* %3, i32 0, i32 0
store i64 %b.coerce0, i64* %4
%5 = getelementptr %0, %0* %3, i32 0, i32 1
store i64 %b.coerce1, i64* %5
- %b = load i128* %coerce1, align 16
+ %b = load i128, i128* %coerce1, align 16
store i128 %b, i128* %b.addr, align 16
- %tmp = load i128* %a.addr, align 16
- %tmp2 = load i128* %b.addr, align 16
+ %tmp = load i128, i128* %a.addr, align 16
+ %tmp2 = load i128, i128* %b.addr, align 16
%6 = call %1 @llvm.umul.with.overflow.i128(i128 %tmp, i128 %tmp2)
; CHECK: cmov
; CHECK: divti3
@@ -70,7 +70,7 @@ overflow: ; preds = %entry
nooverflow: ; preds = %entry
store i128 %7, i128* %retval
%9 = bitcast i128* %retval to %0*
- %10 = load %0* %9, align 1
+ %10 = load %0, %0* %9, align 1
ret %0 %10
}
diff --git a/llvm/test/CodeGen/X86/mult-alt-generic-i686.ll b/llvm/test/CodeGen/X86/mult-alt-generic-i686.ll
index 54bc3a42f03..3472e94b1d0 100644
--- a/llvm/test/CodeGen/X86/mult-alt-generic-i686.ll
+++ b/llvm/test/CodeGen/X86/mult-alt-generic-i686.ll
@@ -33,10 +33,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -48,10 +48,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -63,7 +63,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -120,10 +120,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -137,10 +137,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -165,7 +165,7 @@ entry:
define void @multi_m() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*m|r,m|r,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -190,10 +190,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -205,10 +205,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -220,7 +220,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -277,10 +277,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -294,10 +294,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
diff --git a/llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll b/llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll
index 84a9c814094..7f92a0de0d8 100644
--- a/llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll
+++ b/llvm/test/CodeGen/X86/mult-alt-generic-x86_64.ll
@@ -33,10 +33,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,<r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r<,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -48,10 +48,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,>r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r,r>,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -63,7 +63,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -120,10 +120,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,imr,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -137,10 +137,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r,X,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -165,7 +165,7 @@ entry:
define void @multi_m() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*m|r,m|r,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -190,10 +190,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|<r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r<,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -205,10 +205,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|>r,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* %in1, align 4
+ %tmp1 = load i32, i32* %in1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|r>,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
ret void
@@ -220,7 +220,7 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|m,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
ret void
@@ -277,10 +277,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|imr,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
@@ -294,10 +294,10 @@ entry:
%in1 = alloca i32, align 4
store i32 0, i32* %out0, align 4
store i32 1, i32* %in1, align 4
- %tmp = load i32* %in1, align 4
+ %tmp = load i32, i32* %in1, align 4
%0 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* %out0, align 4
- %tmp1 = load i32* @min1, align 4
+ %tmp1 = load i32, i32* @min1, align 4
%1 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 %tmp1) nounwind
store i32 %1, i32* %out0, align 4
%2 = call i32 asm "foo $1,$0", "=r|r,r|X,~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
diff --git a/llvm/test/CodeGen/X86/mult-alt-x86.ll b/llvm/test/CodeGen/X86/mult-alt-x86.ll
index cb2219a6ed7..5174f85adb9 100644
--- a/llvm/test/CodeGen/X86/mult-alt-x86.ll
+++ b/llvm/test/CodeGen/X86/mult-alt-x86.ll
@@ -11,7 +11,7 @@ target triple = "i686-pc-win32"
define void @single_R() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "=R,R,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -19,7 +19,7 @@ entry:
define void @single_q() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "=q,q,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -27,7 +27,7 @@ entry:
define void @single_Q() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "=Q,Q,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -35,7 +35,7 @@ entry:
define void @single_a() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={ax},{ax},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -43,7 +43,7 @@ entry:
define void @single_b() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={bx},{bx},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -51,7 +51,7 @@ entry:
define void @single_c() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={cx},{cx},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -59,7 +59,7 @@ entry:
define void @single_d() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={dx},{dx},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -67,7 +67,7 @@ entry:
define void @single_S() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={si},{si},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -75,7 +75,7 @@ entry:
define void @single_D() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "={di},{di},~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -83,7 +83,7 @@ entry:
define void @single_A() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
%0 = call i32 asm "foo $1,$0", "=A,A,~{dirflag},~{fpsr},~{flags}"(i32 %tmp) nounwind
store i32 %0, i32* @mout0, align 4
ret void
@@ -106,7 +106,7 @@ entry:
define void @single_y() nounwind {
entry:
- %tmp = load double* @din1, align 8
+ %tmp = load double, double* @din1, align 8
%0 = call double asm "foo $1,$0", "=y,y,~{dirflag},~{fpsr},~{flags}"(double %tmp) nounwind
store double %0, double* @dout0, align 8
ret void
@@ -114,7 +114,7 @@ entry:
define void @single_x() nounwind {
entry:
- %tmp = load double* @din1, align 8
+ %tmp = load double, double* @din1, align 8
%0 = call double asm "foo $1,$0", "=x,x,~{dirflag},~{fpsr},~{flags}"(double %tmp) nounwind
store double %0, double* @dout0, align 8
ret void
@@ -191,70 +191,70 @@ entry:
define void @multi_R() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|R|m,r|R|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_q() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|q|m,r|q|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_Q() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|Q|m,r|Q|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_a() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{ax}|m,r|{ax}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_b() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{bx}|m,r|{bx}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_c() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{cx}|m,r|{cx}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_d() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{dx}|m,r|{dx},~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_S() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{si}|m,r|{si}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_D() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|{di}|m,r|{di}|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
define void @multi_A() nounwind {
entry:
- %tmp = load i32* @min1, align 4
+ %tmp = load i32, i32* @min1, align 4
call void asm "foo $1,$0", "=*r|A|m,r|A|m,~{dirflag},~{fpsr},~{flags}"(i32* @mout0, i32 %tmp) nounwind
ret void
}
@@ -276,14 +276,14 @@ entry:
define void @multi_y() nounwind {
entry:
- %tmp = load double* @din1, align 8
+ %tmp = load double, double* @din1, align 8
call void asm "foo $1,$0", "=*r|y|m,r|y|m,~{dirflag},~{fpsr},~{flags}"(double* @dout0, double %tmp) nounwind
ret void
}
define void @multi_x() nounwind {
entry:
- %tmp = load double* @din1, align 8
+ %tmp = load double, double* @din1, align 8
call void asm "foo $1,$0", "=*r|x|m,r|x|m,~{dirflag},~{fpsr},~{flags}"(double* @dout0, double %tmp) nounwind
ret void
}
diff --git a/llvm/test/CodeGen/X86/multiple-loop-post-inc.ll b/llvm/test/CodeGen/X86/multiple-loop-post-inc.ll
index ab2420b3324..be778da5733 100644
--- a/llvm/test/CodeGen/X86/multiple-loop-post-inc.ll
+++ b/llvm/test/CodeGen/X86/multiple-loop-post-inc.ll
@@ -19,7 +19,7 @@ define void @foo(float* %I, i64 %IS, float* nocapture %Start, float* nocapture %
entry:
%times4 = alloca float, align 4 ; <float*> [#uses=3]
%timesN = alloca float, align 4 ; <float*> [#uses=2]
- %0 = load float* %Step, align 4 ; <float> [#uses=8]
+ %0 = load float, float* %Step, align 4 ; <float> [#uses=8]
%1 = ptrtoint float* %I to i64 ; <i64> [#uses=1]
%2 = ptrtoint float* %O to i64 ; <i64> [#uses=1]
%tmp = xor i64 %2, %1 ; <i64> [#uses=1]
@@ -34,11 +34,11 @@ entry:
br i1 %9, label %bb, label %return
bb: ; preds = %entry
- %10 = load float* %Start, align 4 ; <float> [#uses=1]
+ %10 = load float, float* %Start, align 4 ; <float> [#uses=1]
br label %bb2
bb1: ; preds = %bb3
- %11 = load float* %I_addr.0, align 4 ; <float> [#uses=1]
+ %11 = load float, float* %I_addr.0, align 4 ; <float> [#uses=1]
%12 = fmul float %11, %x.0 ; <float> [#uses=1]
store float %12, float* %O_addr.0, align 4
%13 = fadd float %x.0, %0 ; <float> [#uses=1]
@@ -127,10 +127,10 @@ bb5: ; preds = %bb.nph43, %bb5
%scevgep130131 = bitcast float* %scevgep130 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp132 = mul i64 %indvar102, -16 ; <i64> [#uses=1]
%tmp136 = add i64 %tmp135, %tmp132 ; <i64> [#uses=2]
- %36 = load <4 x float>* %scevgep106107, align 16 ; <<4 x float>> [#uses=1]
- %37 = load <4 x float>* %scevgep113114, align 16 ; <<4 x float>> [#uses=1]
- %38 = load <4 x float>* %scevgep117118, align 16 ; <<4 x float>> [#uses=1]
- %39 = load <4 x float>* %scevgep121122, align 16 ; <<4 x float>> [#uses=1]
+ %36 = load <4 x float>, <4 x float>* %scevgep106107, align 16 ; <<4 x float>> [#uses=1]
+ %37 = load <4 x float>, <4 x float>* %scevgep113114, align 16 ; <<4 x float>> [#uses=1]
+ %38 = load <4 x float>, <4 x float>* %scevgep117118, align 16 ; <<4 x float>> [#uses=1]
+ %39 = load <4 x float>, <4 x float>* %scevgep121122, align 16 ; <<4 x float>> [#uses=1]
%40 = fmul <4 x float> %36, %vX0.039 ; <<4 x float>> [#uses=1]
%41 = fadd <4 x float> %vX0.039, %asmtmp.i18 ; <<4 x float>> [#uses=2]
%42 = fmul <4 x float> %37, %vX1.036 ; <<4 x float>> [#uses=1]
@@ -168,7 +168,7 @@ bb.nph: ; preds = %bb8
%I_addr.0.sum = add i64 %14, -1 ; <i64> [#uses=1]
%49 = getelementptr inbounds float, float* %I, i64 %I_addr.0.sum ; <float*> [#uses=1]
%50 = bitcast float* %49 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %51 = load <4 x float>* %50, align 16 ; <<4 x float>> [#uses=1]
+ %51 = load <4 x float>, <4 x float>* %50, align 16 ; <<4 x float>> [#uses=1]
%tmp54 = add i64 %14, 16 ; <i64> [#uses=1]
%tmp56 = add i64 %14, 3 ; <i64> [#uses=1]
%tmp60 = add i64 %14, 7 ; <i64> [#uses=1]
@@ -216,10 +216,10 @@ bb9: ; preds = %bb.nph, %bb9
%scevgep8687 = bitcast float* %scevgep86 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp88 = mul i64 %indvar, -16 ; <i64> [#uses=1]
%tmp92 = add i64 %tmp91, %tmp88 ; <i64> [#uses=2]
- %52 = load <4 x float>* %scevgep5859, align 16 ; <<4 x float>> [#uses=2]
- %53 = load <4 x float>* %scevgep6263, align 16 ; <<4 x float>> [#uses=2]
- %54 = load <4 x float>* %scevgep6667, align 16 ; <<4 x float>> [#uses=2]
- %55 = load <4 x float>* %scevgep7071, align 16 ; <<4 x float>> [#uses=2]
+ %52 = load <4 x float>, <4 x float>* %scevgep5859, align 16 ; <<4 x float>> [#uses=2]
+ %53 = load <4 x float>, <4 x float>* %scevgep6263, align 16 ; <<4 x float>> [#uses=2]
+ %54 = load <4 x float>, <4 x float>* %scevgep6667, align 16 ; <<4 x float>> [#uses=2]
+ %55 = load <4 x float>, <4 x float>* %scevgep7071, align 16 ; <<4 x float>> [#uses=2]
%56 = shufflevector <4 x float> %vI0.019, <4 x float> %52, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
%57 = shufflevector <4 x float> %56, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
%58 = shufflevector <4 x float> %52, <4 x float> %53, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
@@ -263,7 +263,7 @@ bb12: ; preds = %bb11, %bb12
%x.130 = phi float [ %77, %bb12 ], [ %73, %bb11 ] ; <float> [#uses=2]
%I_addr.433 = getelementptr float, float* %I_addr.2, i64 %indvar94 ; <float*> [#uses=1]
%O_addr.432 = getelementptr float, float* %O_addr.2, i64 %indvar94 ; <float*> [#uses=1]
- %75 = load float* %I_addr.433, align 4 ; <float> [#uses=1]
+ %75 = load float, float* %I_addr.433, align 4 ; <float> [#uses=1]
%76 = fmul float %75, %x.130 ; <float> [#uses=1]
store float %76, float* %O_addr.432, align 4
%77 = fadd float %x.130, %0 ; <float> [#uses=2]
@@ -293,7 +293,7 @@ outer: ; preds = %bb1, %entry
inner: ; preds = %bb0, %if.end275
%i8 = phi i32 [ %a, %outer ], [ %indvar.next159, %bb0 ] ; <i32> [#uses=2]
- %t338 = load i32* undef ; <i32> [#uses=1]
+ %t338 = load i32, i32* undef ; <i32> [#uses=1]
%t191 = mul i32 %i8, %t338 ; <i32> [#uses=1]
%t179 = add i32 %i6, %t191 ; <i32> [#uses=1]
br label %bb0
diff --git a/llvm/test/CodeGen/X86/mulx32.ll b/llvm/test/CodeGen/X86/mulx32.ll
index b75ac009e76..42ef2eb6f64 100644
--- a/llvm/test/CodeGen/X86/mulx32.ll
+++ b/llvm/test/CodeGen/X86/mulx32.ll
@@ -11,7 +11,7 @@ define i64 @f1(i32 %a, i32 %b) {
}
define i64 @f2(i32 %a, i32* %p) {
- %b = load i32* %p
+ %b = load i32, i32* %p
%x = zext i32 %a to i64
%y = zext i32 %b to i64
%r = mul i64 %x, %y
diff --git a/llvm/test/CodeGen/X86/mulx64.ll b/llvm/test/CodeGen/X86/mulx64.ll
index d5730282a13..808c02290b7 100644
--- a/llvm/test/CodeGen/X86/mulx64.ll
+++ b/llvm/test/CodeGen/X86/mulx64.ll
@@ -11,7 +11,7 @@ define i128 @f1(i64 %a, i64 %b) {
}
define i128 @f2(i64 %a, i64* %p) {
- %b = load i64* %p
+ %b = load i64, i64* %p
%x = zext i64 %a to i128
%y = zext i64 %b to i128
%r = mul i128 %x, %y
diff --git a/llvm/test/CodeGen/X86/musttail-indirect.ll b/llvm/test/CodeGen/X86/musttail-indirect.ll
index 3156090d67b..7bb71c3fb03 100644
--- a/llvm/test/CodeGen/X86/musttail-indirect.ll
+++ b/llvm/test/CodeGen/X86/musttail-indirect.ll
@@ -31,8 +31,8 @@
define x86_thiscallcc i32 @f_thunk(%struct.B* %this, i32) {
entry:
%1 = bitcast %struct.B* %this to i32 (%struct.B*, i32)***
- %vtable = load i32 (%struct.B*, i32)*** %1
- %2 = load i32 (%struct.B*, i32)** %vtable
+ %vtable = load i32 (%struct.B*, i32)**, i32 (%struct.B*, i32)*** %1
+ %2 = load i32 (%struct.B*, i32)*, i32 (%struct.B*, i32)** %vtable
%3 = musttail call x86_thiscallcc i32 %2(%struct.B* %this, i32 %0)
ret i32 %3
}
@@ -45,9 +45,9 @@ entry:
define x86_thiscallcc i32 @g_thunk(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca) {
entry:
%1 = bitcast %struct.B* %this to i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)***
- %vtable = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
+ %vtable = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)**, i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
%vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 1
- %2 = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
+ %2 = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
%3 = musttail call x86_thiscallcc i32 %2(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca %0)
ret i32 %3
}
@@ -59,9 +59,9 @@ entry:
define x86_thiscallcc void @h_thunk(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca) {
entry:
%1 = bitcast %struct.B* %this to void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)***
- %vtable = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
+ %vtable = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)**, void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
%vfn = getelementptr inbounds void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 2
- %2 = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
+ %2 = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
musttail call x86_thiscallcc void %2(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca %0)
ret void
}
@@ -73,9 +73,9 @@ entry:
define x86_thiscallcc %struct.A* @i_thunk(%struct.B* %this, <{ %struct.A*, %struct.A, i32, %struct.A }>* inalloca) {
entry:
%1 = bitcast %struct.B* %this to %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)***
- %vtable = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*** %1
+ %vtable = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)**, %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*** %1
%vfn = getelementptr inbounds %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*, %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vtable, i32 3
- %2 = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vfn
+ %2 = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*, %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vfn
%3 = musttail call x86_thiscallcc %struct.A* %2(%struct.B* %this, <{ %struct.A*, %struct.A, i32, %struct.A }>* inalloca %0)
ret %struct.A* %3
}
@@ -86,9 +86,9 @@ entry:
define x86_thiscallcc void @j_thunk(%struct.A* noalias sret %agg.result, %struct.B* %this, i32) {
entry:
%1 = bitcast %struct.B* %this to void (%struct.A*, %struct.B*, i32)***
- %vtable = load void (%struct.A*, %struct.B*, i32)*** %1
+ %vtable = load void (%struct.A*, %struct.B*, i32)**, void (%struct.A*, %struct.B*, i32)*** %1
%vfn = getelementptr inbounds void (%struct.A*, %struct.B*, i32)*, void (%struct.A*, %struct.B*, i32)** %vtable, i32 4
- %2 = load void (%struct.A*, %struct.B*, i32)** %vfn
+ %2 = load void (%struct.A*, %struct.B*, i32)*, void (%struct.A*, %struct.B*, i32)** %vfn
musttail call x86_thiscallcc void %2(%struct.A* sret %agg.result, %struct.B* %this, i32 %0)
ret void
}
@@ -100,11 +100,11 @@ entry:
define x86_stdcallcc i32 @stdcall_thunk(<{ %struct.B*, %struct.A }>* inalloca) {
entry:
%this_ptr = getelementptr inbounds <{ %struct.B*, %struct.A }>, <{ %struct.B*, %struct.A }>* %0, i32 0, i32 0
- %this = load %struct.B** %this_ptr
+ %this = load %struct.B*, %struct.B** %this_ptr
%1 = bitcast %struct.B* %this to i32 (<{ %struct.B*, %struct.A }>*)***
- %vtable = load i32 (<{ %struct.B*, %struct.A }>*)*** %1
+ %vtable = load i32 (<{ %struct.B*, %struct.A }>*)**, i32 (<{ %struct.B*, %struct.A }>*)*** %1
%vfn = getelementptr inbounds i32 (<{ %struct.B*, %struct.A }>*)*, i32 (<{ %struct.B*, %struct.A }>*)** %vtable, i32 1
- %2 = load i32 (<{ %struct.B*, %struct.A }>*)** %vfn
+ %2 = load i32 (<{ %struct.B*, %struct.A }>*)*, i32 (<{ %struct.B*, %struct.A }>*)** %vfn
%3 = musttail call x86_stdcallcc i32 %2(<{ %struct.B*, %struct.A }>* inalloca %0)
ret i32 %3
}
@@ -116,9 +116,9 @@ entry:
define x86_fastcallcc i32 @fastcall_thunk(%struct.B* inreg %this, <{ %struct.A }>* inalloca) {
entry:
%1 = bitcast %struct.B* %this to i32 (%struct.B*, <{ %struct.A }>*)***
- %vtable = load i32 (%struct.B*, <{ %struct.A }>*)*** %1
+ %vtable = load i32 (%struct.B*, <{ %struct.A }>*)**, i32 (%struct.B*, <{ %struct.A }>*)*** %1
%vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A }>*)** %vtable, i32 1
- %2 = load i32 (%struct.B*, <{ %struct.A }>*)** %vfn
+ %2 = load i32 (%struct.B*, <{ %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A }>*)** %vfn
%3 = musttail call x86_fastcallcc i32 %2(%struct.B* inreg %this, <{ %struct.A }>* inalloca %0)
ret i32 %3
}
diff --git a/llvm/test/CodeGen/X86/musttail-varargs.ll b/llvm/test/CodeGen/X86/musttail-varargs.ll
index f72160ff924..52115b28199 100644
--- a/llvm/test/CodeGen/X86/musttail-varargs.ll
+++ b/llvm/test/CodeGen/X86/musttail-varargs.ll
@@ -107,19 +107,19 @@ define void @g_thunk(i8* %fptr_i8, ...) {
define void @h_thunk(%struct.Foo* %this, ...) {
%cond_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 0
- %cond = load i1* %cond_p
+ %cond = load i1, i1* %cond_p
br i1 %cond, label %then, label %else
then:
%a_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 1
- %a_i8 = load i8** %a_p
+ %a_i8 = load i8*, i8** %a_p
%a = bitcast i8* %a_i8 to void (%struct.Foo*, ...)*
musttail call void (%struct.Foo*, ...)* %a(%struct.Foo* %this, ...)
ret void
else:
%b_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 2
- %b_i8 = load i8** %b_p
+ %b_i8 = load i8*, i8** %b_p
%b = bitcast i8* %b_i8 to void (%struct.Foo*, ...)*
store i32 42, i32* @g
musttail call void (%struct.Foo*, ...)* %b(%struct.Foo* %this, ...)
diff --git a/llvm/test/CodeGen/X86/nancvt.ll b/llvm/test/CodeGen/X86/nancvt.ll
index 0c1f8b9d75e..9222f6b7682 100644
--- a/llvm/test/CodeGen/X86/nancvt.ll
+++ b/llvm/test/CodeGen/X86/nancvt.ll
@@ -29,38 +29,38 @@ entry:
br label %bb23
bb: ; preds = %bb23
- %tmp = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp1 = getelementptr [3 x i32], [3 x i32]* @fnan, i32 0, i32 %tmp ; <i32*> [#uses=1]
- %tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %tmp1, align 4 ; <i32> [#uses=1]
%tmp3 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp34 = bitcast float* %tmp3 to i32* ; <i32*> [#uses=1]
store i32 %tmp2, i32* %tmp34, align 4
%tmp5 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp6 = load float* %tmp5, align 4 ; <float> [#uses=1]
+ %tmp6 = load float, float* %tmp5, align 4 ; <float> [#uses=1]
%tmp67 = fpext float %tmp6 to double ; <double> [#uses=1]
%tmp8 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
store double %tmp67, double* %tmp8, align 8
%tmp9 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp910 = bitcast double* %tmp9 to i64* ; <i64*> [#uses=1]
- %tmp11 = load i64* %tmp910, align 8 ; <i64> [#uses=1]
+ %tmp11 = load i64, i64* %tmp910, align 8 ; <i64> [#uses=1]
%tmp1112 = trunc i64 %tmp11 to i32 ; <i32> [#uses=1]
%tmp13 = and i32 %tmp1112, -1 ; <i32> [#uses=1]
%tmp14 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp1415 = bitcast double* %tmp14 to i64* ; <i64*> [#uses=1]
- %tmp16 = load i64* %tmp1415, align 8 ; <i64> [#uses=1]
+ %tmp16 = load i64, i64* %tmp1415, align 8 ; <i64> [#uses=1]
%.cast = zext i32 32 to i64 ; <i64> [#uses=1]
%tmp17 = ashr i64 %tmp16, %.cast ; <i64> [#uses=1]
%tmp1718 = trunc i64 %tmp17 to i32 ; <i32> [#uses=1]
%tmp19 = getelementptr [10 x i8], [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
store volatile i32 %tmp1718, i32* @var
store volatile i32 %tmp13, i32* @var
- %tmp21 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp21 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp22 = add i32 %tmp21, 1 ; <i32> [#uses=1]
store i32 %tmp22, i32* %i, align 4
br label %bb23
bb23: ; preds = %bb, %entry
- %tmp24 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp24 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp25 = icmp sle i32 %tmp24, 2 ; <i1> [#uses=1]
%tmp2526 = zext i1 %tmp25 to i8 ; <i8> [#uses=1]
%toBool = icmp ne i8 %tmp2526, 0 ; <i1> [#uses=1]
@@ -71,29 +71,29 @@ bb27: ; preds = %bb23
br label %bb46
bb28: ; preds = %bb46
- %tmp29 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp29 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp30 = getelementptr [3 x i64], [3 x i64]* @dnan, i32 0, i32 %tmp29 ; <i64*> [#uses=1]
- %tmp31 = load i64* %tmp30, align 8 ; <i64> [#uses=1]
+ %tmp31 = load i64, i64* %tmp30, align 8 ; <i64> [#uses=1]
%tmp32 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp3233 = bitcast double* %tmp32 to i64* ; <i64*> [#uses=1]
store i64 %tmp31, i64* %tmp3233, align 8
%tmp35 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp36 = load double* %tmp35, align 8 ; <double> [#uses=1]
+ %tmp36 = load double, double* %tmp35, align 8 ; <double> [#uses=1]
%tmp3637 = fptrunc double %tmp36 to float ; <float> [#uses=1]
%tmp38 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
store float %tmp3637, float* %tmp38, align 4
%tmp39 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp3940 = bitcast float* %tmp39 to i32* ; <i32*> [#uses=1]
- %tmp41 = load i32* %tmp3940, align 4 ; <i32> [#uses=1]
+ %tmp41 = load i32, i32* %tmp3940, align 4 ; <i32> [#uses=1]
%tmp42 = getelementptr [6 x i8], [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
store volatile i32 %tmp41, i32* @var
- %tmp44 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp44 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp45 = add i32 %tmp44, 1 ; <i32> [#uses=1]
store i32 %tmp45, i32* %i, align 4
br label %bb46
bb46: ; preds = %bb28, %bb27
- %tmp47 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp47 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp48 = icmp sle i32 %tmp47, 2 ; <i1> [#uses=1]
%tmp4849 = zext i1 %tmp48 to i8 ; <i8> [#uses=1]
%toBool50 = icmp ne i8 %tmp4849, 0 ; <i1> [#uses=1]
@@ -104,38 +104,38 @@ bb51: ; preds = %bb46
br label %bb78
bb52: ; preds = %bb78
- %tmp53 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp53 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp54 = getelementptr [3 x i32], [3 x i32]* @fsnan, i32 0, i32 %tmp53 ; <i32*> [#uses=1]
- %tmp55 = load i32* %tmp54, align 4 ; <i32> [#uses=1]
+ %tmp55 = load i32, i32* %tmp54, align 4 ; <i32> [#uses=1]
%tmp56 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp5657 = bitcast float* %tmp56 to i32* ; <i32*> [#uses=1]
store i32 %tmp55, i32* %tmp5657, align 4
%tmp58 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp59 = load float* %tmp58, align 4 ; <float> [#uses=1]
+ %tmp59 = load float, float* %tmp58, align 4 ; <float> [#uses=1]
%tmp5960 = fpext float %tmp59 to double ; <double> [#uses=1]
%tmp61 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
store double %tmp5960, double* %tmp61, align 8
%tmp62 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp6263 = bitcast double* %tmp62 to i64* ; <i64*> [#uses=1]
- %tmp64 = load i64* %tmp6263, align 8 ; <i64> [#uses=1]
+ %tmp64 = load i64, i64* %tmp6263, align 8 ; <i64> [#uses=1]
%tmp6465 = trunc i64 %tmp64 to i32 ; <i32> [#uses=1]
%tmp66 = and i32 %tmp6465, -1 ; <i32> [#uses=1]
%tmp68 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp6869 = bitcast double* %tmp68 to i64* ; <i64*> [#uses=1]
- %tmp70 = load i64* %tmp6869, align 8 ; <i64> [#uses=1]
+ %tmp70 = load i64, i64* %tmp6869, align 8 ; <i64> [#uses=1]
%.cast71 = zext i32 32 to i64 ; <i64> [#uses=1]
%tmp72 = ashr i64 %tmp70, %.cast71 ; <i64> [#uses=1]
%tmp7273 = trunc i64 %tmp72 to i32 ; <i32> [#uses=1]
%tmp74 = getelementptr [10 x i8], [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
store volatile i32 %tmp7273, i32* @var
store volatile i32 %tmp66, i32* @var
- %tmp76 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp76 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp77 = add i32 %tmp76, 1 ; <i32> [#uses=1]
store i32 %tmp77, i32* %i, align 4
br label %bb78
bb78: ; preds = %bb52, %bb51
- %tmp79 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp79 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp80 = icmp sle i32 %tmp79, 2 ; <i1> [#uses=1]
%tmp8081 = zext i1 %tmp80 to i8 ; <i8> [#uses=1]
%toBool82 = icmp ne i8 %tmp8081, 0 ; <i1> [#uses=1]
@@ -146,29 +146,29 @@ bb83: ; preds = %bb78
br label %bb101
bb84: ; preds = %bb101
- %tmp85 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp85 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp86 = getelementptr [3 x i64], [3 x i64]* @dsnan, i32 0, i32 %tmp85 ; <i64*> [#uses=1]
- %tmp87 = load i64* %tmp86, align 8 ; <i64> [#uses=1]
+ %tmp87 = load i64, i64* %tmp86, align 8 ; <i64> [#uses=1]
%tmp88 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp8889 = bitcast double* %tmp88 to i64* ; <i64*> [#uses=1]
store i64 %tmp87, i64* %tmp8889, align 8
%tmp90 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp91 = load double* %tmp90, align 8 ; <double> [#uses=1]
+ %tmp91 = load double, double* %tmp90, align 8 ; <double> [#uses=1]
%tmp9192 = fptrunc double %tmp91 to float ; <float> [#uses=1]
%tmp93 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
store float %tmp9192, float* %tmp93, align 4
%tmp94 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp9495 = bitcast float* %tmp94 to i32* ; <i32*> [#uses=1]
- %tmp96 = load i32* %tmp9495, align 4 ; <i32> [#uses=1]
+ %tmp96 = load i32, i32* %tmp9495, align 4 ; <i32> [#uses=1]
%tmp97 = getelementptr [6 x i8], [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
store volatile i32 %tmp96, i32* @var
- %tmp99 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp99 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp100 = add i32 %tmp99, 1 ; <i32> [#uses=1]
store i32 %tmp100, i32* %i, align 4
br label %bb101
bb101: ; preds = %bb84, %bb83
- %tmp102 = load i32* %i, align 4 ; <i32> [#uses=1]
+ %tmp102 = load i32, i32* %i, align 4 ; <i32> [#uses=1]
%tmp103 = icmp sle i32 %tmp102, 2 ; <i1> [#uses=1]
%tmp103104 = zext i1 %tmp103 to i8 ; <i8> [#uses=1]
%toBool105 = icmp ne i8 %tmp103104, 0 ; <i1> [#uses=1]
@@ -178,6 +178,6 @@ bb106: ; preds = %bb101
br label %return
return: ; preds = %bb106
- %retval107 = load i32* %retval ; <i32> [#uses=1]
+ %retval107 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval107
}
diff --git a/llvm/test/CodeGen/X86/narrow-shl-load.ll b/llvm/test/CodeGen/X86/narrow-shl-load.ll
index 5175bfc2bcb..9dc0d749cb2 100644
--- a/llvm/test/CodeGen/X86/narrow-shl-load.ll
+++ b/llvm/test/CodeGen/X86/narrow-shl-load.ll
@@ -11,7 +11,7 @@ bb.nph:
br label %while.cond
while.cond: ; preds = %while.cond, %bb.nph
- %tmp6 = load i32* undef, align 4
+ %tmp6 = load i32, i32* undef, align 4
%and = or i64 undef, undef
%conv11 = zext i32 undef to i64
%conv14 = zext i32 %tmp6 to i64
@@ -20,7 +20,7 @@ while.cond: ; preds = %while.cond, %bb.nph
%and17 = or i64 %shl15.masked, %conv11
%add = add i64 %and17, 1
%xor = xor i64 %add, %and
- %tmp20 = load i64* undef, align 8
+ %tmp20 = load i64, i64* undef, align 8
%add21 = add i64 %xor, %tmp20
%conv22 = trunc i64 %add21 to i32
store i32 %conv22, i32* undef, align 4
@@ -34,7 +34,7 @@ while.end: ; preds = %while.cond
; PR8757
define i32 @test3(i32 *%P) nounwind ssp {
store volatile i32 128, i32* %P
- %tmp4.pre = load i32* %P
+ %tmp4.pre = load i32, i32* %P
%phitmp = trunc i32 %tmp4.pre to i16
%phitmp13 = shl i16 %phitmp, 8
%phitmp14 = ashr i16 %phitmp13, 8
diff --git a/llvm/test/CodeGen/X86/narrow_op-1.ll b/llvm/test/CodeGen/X86/narrow_op-1.ll
index 89ae3f1a335..836a23d131e 100644
--- a/llvm/test/CodeGen/X86/narrow_op-1.ll
+++ b/llvm/test/CodeGen/X86/narrow_op-1.ll
@@ -5,7 +5,7 @@
define void @t1() nounwind optsize ssp {
entry:
- %0 = load i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
+ %0 = load i32, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
%1 = or i32 %0, 65536
store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
ret void
@@ -17,7 +17,7 @@ entry:
define void @t2() nounwind optsize ssp {
entry:
- %0 = load i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
+ %0 = load i32, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
%1 = or i32 %0, 16842752
store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
ret void
diff --git a/llvm/test/CodeGen/X86/negate-add-zero.ll b/llvm/test/CodeGen/X86/negate-add-zero.ll
index 923c74b80c4..629395e126b 100644
--- a/llvm/test/CodeGen/X86/negate-add-zero.ll
+++ b/llvm/test/CodeGen/X86/negate-add-zero.ll
@@ -828,22 +828,22 @@ declare void @_ZN21HNodeTranslateRotate311toCartesianEv(%struct.HNodeTranslateRo
define linkonce void @_ZN21HNodeTranslateRotate36setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate3* %this, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"* %velv) {
%1 = getelementptr double, double* null, i32 -1 ; <double*> [#uses=1]
- %2 = load double* %1, align 8 ; <double> [#uses=1]
- %3 = load double* null, align 8 ; <double> [#uses=2]
- %4 = load double* null, align 8 ; <double> [#uses=2]
- %5 = load double* null, align 8 ; <double> [#uses=3]
+ %2 = load double, double* %1, align 8 ; <double> [#uses=1]
+ %3 = load double, double* null, align 8 ; <double> [#uses=2]
+ %4 = load double, double* null, align 8 ; <double> [#uses=2]
+ %5 = load double, double* null, align 8 ; <double> [#uses=3]
%6 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
%7 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=0]
%8 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=0]
%9 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0]
- %10 = load double* null, align 8 ; <double> [#uses=2]
+ %10 = load double, double* null, align 8 ; <double> [#uses=2]
%11 = fsub double -0.000000e+00, %10 ; <double> [#uses=1]
- %12 = load double* null, align 8 ; <double> [#uses=2]
+ %12 = load double, double* null, align 8 ; <double> [#uses=2]
%13 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=1]
- %14 = load double* %13, align 8 ; <double> [#uses=2]
+ %14 = load double, double* %13, align 8 ; <double> [#uses=2]
%15 = fsub double -0.000000e+00, %14 ; <double> [#uses=1]
%16 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
- %17 = load double* %16, align 8 ; <double> [#uses=2]
+ %17 = load double, double* %16, align 8 ; <double> [#uses=2]
%18 = fsub double -0.000000e+00, %17 ; <double> [#uses=1]
%19 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
%20 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0]
diff --git a/llvm/test/CodeGen/X86/no-cmov.ll b/llvm/test/CodeGen/X86/no-cmov.ll
index e13edf26cad..8fc0f7075c0 100644
--- a/llvm/test/CodeGen/X86/no-cmov.ll
+++ b/llvm/test/CodeGen/X86/no-cmov.ll
@@ -2,7 +2,7 @@
define i32 @test1(i32 %g, i32* %j) {
%tobool = icmp eq i32 %g, 0
- %cmp = load i32* %j, align 4
+ %cmp = load i32, i32* %j, align 4
%retval.0 = select i1 %tobool, i32 1, i32 %cmp
ret i32 %retval.0
diff --git a/llvm/test/CodeGen/X86/norex-subreg.ll b/llvm/test/CodeGen/X86/norex-subreg.ll
index fb41dede287..dd47af9ae9a 100644
--- a/llvm/test/CodeGen/X86/norex-subreg.ll
+++ b/llvm/test/CodeGen/X86/norex-subreg.ll
@@ -15,17 +15,17 @@ target triple = "x86_64-apple-macosx10.7"
define void @f() nounwind uwtable ssp {
entry:
- %0 = load i32* undef, align 4
+ %0 = load i32, i32* undef, align 4
%add = add i32 0, %0
%conv1 = trunc i32 %add to i16
%bf.value = and i16 %conv1, 255
%1 = and i16 %bf.value, 255
%2 = shl i16 %1, 8
- %3 = load i16* undef, align 1
+ %3 = load i16, i16* undef, align 1
%4 = and i16 %3, 255
%5 = or i16 %4, %2
store i16 %5, i16* undef, align 1
- %6 = load i16* undef, align 1
+ %6 = load i16, i16* undef, align 1
%7 = lshr i16 %6, 8
%bf.clear2 = and i16 %7, 255
%conv3 = zext i16 %bf.clear2 to i32
diff --git a/llvm/test/CodeGen/X86/nosse-error1.ll b/llvm/test/CodeGen/X86/nosse-error1.ll
index 291379eeaec..7617d59f4a0 100644
--- a/llvm/test/CodeGen/X86/nosse-error1.ll
+++ b/llvm/test/CodeGen/X86/nosse-error1.ll
@@ -12,16 +12,16 @@ target triple = "x86_64-unknown-linux-gnu"
define void @test() nounwind {
entry:
- %0 = load float* @f, align 4 ; <float> [#uses=1]
+ %0 = load float, float* @f, align 4 ; <float> [#uses=1]
%1 = tail call float @foo1(float %0) nounwind ; <float> [#uses=1]
store float %1, float* @f, align 4
- %2 = load double* @d, align 8 ; <double> [#uses=1]
+ %2 = load double, double* @d, align 8 ; <double> [#uses=1]
%3 = tail call double @foo2(double %2) nounwind ; <double> [#uses=1]
store double %3, double* @d, align 8
- %4 = load float* @f, align 4 ; <float> [#uses=1]
+ %4 = load float, float* @f, align 4 ; <float> [#uses=1]
%5 = tail call float @foo3(float %4) nounwind ; <float> [#uses=1]
store float %5, float* @f, align 4
- %6 = load double* @d, align 8 ; <double> [#uses=1]
+ %6 = load double, double* @d, align 8 ; <double> [#uses=1]
%7 = tail call double @foo4(double %6) nounwind ; <double> [#uses=1]
store double %7, double* @d, align 8
ret void
diff --git a/llvm/test/CodeGen/X86/nosse-error2.ll b/llvm/test/CodeGen/X86/nosse-error2.ll
index a7cee2dd821..3da80aae686 100644
--- a/llvm/test/CodeGen/X86/nosse-error2.ll
+++ b/llvm/test/CodeGen/X86/nosse-error2.ll
@@ -12,16 +12,16 @@ target triple = "i386-unknown-linux-gnu"
define void @test() nounwind {
entry:
- %0 = load float* @f, align 4 ; <float> [#uses=1]
+ %0 = load float, float* @f, align 4 ; <float> [#uses=1]
%1 = tail call inreg float @foo1(float inreg %0) nounwind ; <float> [#uses=1]
store float %1, float* @f, align 4
- %2 = load double* @d, align 8 ; <double> [#uses=1]
+ %2 = load double, double* @d, align 8 ; <double> [#uses=1]
%3 = tail call inreg double @foo2(double inreg %2) nounwind ; <double> [#uses=1]
store double %3, double* @d, align 8
- %4 = load float* @f, align 4 ; <float> [#uses=1]
+ %4 = load float, float* @f, align 4 ; <float> [#uses=1]
%5 = tail call inreg float @foo3(float inreg %4) nounwind ; <float> [#uses=1]
store float %5, float* @f, align 4
- %6 = load double* @d, align 8 ; <double> [#uses=1]
+ %6 = load double, double* @d, align 8 ; <double> [#uses=1]
%7 = tail call inreg double @foo4(double inreg %6) nounwind ; <double> [#uses=1]
store double %7, double* @d, align 8
ret void
diff --git a/llvm/test/CodeGen/X86/nosse-varargs.ll b/llvm/test/CodeGen/X86/nosse-varargs.ll
index c3ea1c34b25..8a81d0e7195 100644
--- a/llvm/test/CodeGen/X86/nosse-varargs.ll
+++ b/llvm/test/CodeGen/X86/nosse-varargs.ll
@@ -13,13 +13,13 @@ entry:
%ap12 = bitcast [1 x %struct.__va_list_tag]* %ap to i8* ; <i8*> [#uses=2]
call void @llvm.va_start(i8* %ap12)
%0 = getelementptr [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 0 ; <i32*> [#uses=2]
- %1 = load i32* %0, align 8 ; <i32> [#uses=3]
+ %1 = load i32, i32* %0, align 8 ; <i32> [#uses=3]
%2 = icmp ult i32 %1, 48 ; <i1> [#uses=1]
br i1 %2, label %bb, label %bb3
bb: ; preds = %entry
%3 = getelementptr [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 3 ; <i8**> [#uses=1]
- %4 = load i8** %3, align 8 ; <i8*> [#uses=1]
+ %4 = load i8*, i8** %3, align 8 ; <i8*> [#uses=1]
%5 = inttoptr i32 %1 to i8* ; <i8*> [#uses=1]
%6 = ptrtoint i8* %5 to i64 ; <i64> [#uses=1]
%ctg2 = getelementptr i8, i8* %4, i64 %6 ; <i8*> [#uses=1]
@@ -29,7 +29,7 @@ bb: ; preds = %entry
bb3: ; preds = %entry
%8 = getelementptr [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 2 ; <i8**> [#uses=2]
- %9 = load i8** %8, align 8 ; <i8*> [#uses=2]
+ %9 = load i8*, i8** %8, align 8 ; <i8*> [#uses=2]
%10 = getelementptr i8, i8* %9, i64 8 ; <i8*> [#uses=1]
store i8* %10, i8** %8, align 8
br label %bb4
@@ -37,7 +37,7 @@ bb3: ; preds = %entry
bb4: ; preds = %bb3, %bb
%addr.0.0 = phi i8* [ %ctg2, %bb ], [ %9, %bb3 ] ; <i8*> [#uses=1]
%11 = bitcast i8* %addr.0.0 to i32* ; <i32*> [#uses=1]
- %12 = load i32* %11, align 4 ; <i32> [#uses=1]
+ %12 = load i32, i32* %11, align 4 ; <i32> [#uses=1]
call void @llvm.va_end(i8* %ap12)
ret i32 %12
}
diff --git a/llvm/test/CodeGen/X86/object-size.ll b/llvm/test/CodeGen/X86/object-size.ll
index 0610f0b6de2..12f99db1cfc 100644
--- a/llvm/test/CodeGen/X86/object-size.ll
+++ b/llvm/test/CodeGen/X86/object-size.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-apple-darwin10.0"
define void @bar() nounwind ssp {
entry:
- %tmp = load i8** @p ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** @p ; <i8*> [#uses=1]
%0 = call i64 @llvm.objectsize.i64.p0i8(i8* %tmp, i1 0) ; <i64> [#uses=1]
%cmp = icmp ne i64 %0, -1 ; <i1> [#uses=1]
; CHECK: movq $-1, [[RAX:%r..]]
@@ -17,14 +17,14 @@ entry:
br i1 %cmp, label %cond.true, label %cond.false
cond.true: ; preds = %entry
- %tmp1 = load i8** @p ; <i8*> [#uses=1]
- %tmp2 = load i8** @p ; <i8*> [#uses=1]
+ %tmp1 = load i8*, i8** @p ; <i8*> [#uses=1]
+ %tmp2 = load i8*, i8** @p ; <i8*> [#uses=1]
%1 = call i64 @llvm.objectsize.i64.p0i8(i8* %tmp2, i1 1) ; <i64> [#uses=1]
%call = call i8* @__strcpy_chk(i8* %tmp1, i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 %1) ssp ; <i8*> [#uses=1]
br label %cond.end
cond.false: ; preds = %entry
- %tmp3 = load i8** @p ; <i8*> [#uses=1]
+ %tmp3 = load i8*, i8** @p ; <i8*> [#uses=1]
%call4 = call i8* @__inline_strcpy_chk(i8* %tmp3, i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0)) ssp ; <i8*> [#uses=1]
br label %cond.end
@@ -44,12 +44,12 @@ entry:
%__src.addr = alloca i8* ; <i8**> [#uses=2]
store i8* %__dest, i8** %__dest.addr
store i8* %__src, i8** %__src.addr
- %tmp = load i8** %__dest.addr ; <i8*> [#uses=1]
- %tmp1 = load i8** %__src.addr ; <i8*> [#uses=1]
- %tmp2 = load i8** %__dest.addr ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** %__dest.addr ; <i8*> [#uses=1]
+ %tmp1 = load i8*, i8** %__src.addr ; <i8*> [#uses=1]
+ %tmp2 = load i8*, i8** %__dest.addr ; <i8*> [#uses=1]
%0 = call i64 @llvm.objectsize.i64.p0i8(i8* %tmp2, i1 1) ; <i64> [#uses=1]
%call = call i8* @__strcpy_chk(i8* %tmp, i8* %tmp1, i64 %0) ssp ; <i8*> [#uses=1]
store i8* %call, i8** %retval
- %1 = load i8** %retval ; <i8*> [#uses=1]
+ %1 = load i8*, i8** %retval ; <i8*> [#uses=1]
ret i8* %1
}
diff --git a/llvm/test/CodeGen/X86/opt-ext-uses.ll b/llvm/test/CodeGen/X86/opt-ext-uses.ll
index 72fb38b27df..5d05ad9c454 100644
--- a/llvm/test/CodeGen/X86/opt-ext-uses.ll
+++ b/llvm/test/CodeGen/X86/opt-ext-uses.ll
@@ -2,7 +2,7 @@
define signext i16 @t() {
entry:
- %tmp180 = load i16* null, align 2 ; <i16> [#uses=3]
+ %tmp180 = load i16, i16* null, align 2 ; <i16> [#uses=3]
%tmp180181 = sext i16 %tmp180 to i32 ; <i32> [#uses=1]
%tmp182 = add i16 %tmp180, 10
%tmp185 = icmp slt i16 %tmp182, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index effd53a8c7f..006592aaade 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -33,7 +33,7 @@ bb6: ; preds = %bb7, %bb.nph7
%8 = shl i32 %x.06, 1
%9 = add i32 %6, %8
%10 = getelementptr i8, i8* %r, i32 %9
- %11 = load i8* %10, align 1
+ %11 = load i8, i8* %10, align 1
%12 = getelementptr i8, i8* %j, i32 %7
store i8 %11, i8* %12, align 1
br label %bb7
@@ -104,7 +104,7 @@ bb14: ; preds = %bb15, %bb.nph3
%25 = shl i32 %x.12, 2
%26 = add i32 %25, %21
%27 = getelementptr i8, i8* %r, i32 %26
- %28 = load i8* %27, align 1
+ %28 = load i8, i8* %27, align 1
%.sum = add i32 %22, %x.12
%29 = getelementptr i8, i8* %j, i32 %.sum
store i8 %28, i8* %29, align 1
@@ -112,7 +112,7 @@ bb14: ; preds = %bb15, %bb.nph3
%31 = or i32 %30, 2
%32 = add i32 %31, %21
%33 = getelementptr i8, i8* %r, i32 %32
- %34 = load i8* %33, align 1
+ %34 = load i8, i8* %33, align 1
%.sum6 = add i32 %23, %x.12
%35 = getelementptr i8, i8* %j, i32 %.sum6
store i8 %34, i8* %35, align 1
@@ -258,7 +258,7 @@ bb6: ; preds = %bb7, %bb.nph7
%8 = shl i32 %x.06, 1
%9 = add i32 %6, %8
%10 = getelementptr i8, i8* %r, i32 %9
- %11 = load i8* %10, align 1
+ %11 = load i8, i8* %10, align 1
%12 = getelementptr i8, i8* %j, i32 %7
store i8 %11, i8* %12, align 1
br label %bb7
@@ -329,7 +329,7 @@ bb14: ; preds = %bb15, %bb.nph3
%25 = shl i32 %x.12, 2
%26 = add i32 %25, %21
%27 = getelementptr i8, i8* %r, i32 %26
- %28 = load i8* %27, align 1
+ %28 = load i8, i8* %27, align 1
%.sum = add i32 %22, %x.12
%29 = getelementptr i8, i8* %j, i32 %.sum
store i8 %28, i8* %29, align 1
@@ -337,7 +337,7 @@ bb14: ; preds = %bb15, %bb.nph3
%31 = or i32 %30, 2
%32 = add i32 %31, %21
%33 = getelementptr i8, i8* %r, i32 %32
- %34 = load i8* %33, align 1
+ %34 = load i8, i8* %33, align 1
%.sum6 = add i32 %23, %x.12
%35 = getelementptr i8, i8* %j, i32 %.sum6
store i8 %34, i8* %35, align 1
diff --git a/llvm/test/CodeGen/X86/optimize-max-2.ll b/llvm/test/CodeGen/X86/optimize-max-2.ll
index 5e6c8ee91aa..45b542e2267 100644
--- a/llvm/test/CodeGen/X86/optimize-max-2.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-2.ll
@@ -20,7 +20,7 @@ entry:
bb4: ; preds = %bb4, %entry
%i.07 = phi i64 [ 0, %entry ], [ %2, %bb4 ] ; <i64> [#uses=2]
%scevgep = getelementptr double, double* %p, i64 %i.07 ; <double*> [#uses=2]
- %0 = load double* %scevgep, align 8 ; <double> [#uses=1]
+ %0 = load double, double* %scevgep, align 8 ; <double> [#uses=1]
%1 = fmul double %0, 2.000000e+00 ; <double> [#uses=1]
store double %1, double* %scevgep, align 8
%2 = add i64 %i.07, 1 ; <i64> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/optimize-max-3.ll b/llvm/test/CodeGen/X86/optimize-max-3.ll
index 7ffca0a2eaf..71885efbd31 100644
--- a/llvm/test/CodeGen/X86/optimize-max-3.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-3.ll
@@ -21,7 +21,7 @@ for.body.preheader: ; preds = %entry
for.body: ; preds = %for.body.preheader, %for.body
%i = phi i64 [ %i.next, %for.body ], [ 0, %for.body.preheader ] ; <i64> [#uses=2]
%arrayidx = getelementptr double, double* %p, i64 %i ; <double*> [#uses=2]
- %t4 = load double* %arrayidx ; <double> [#uses=1]
+ %t4 = load double, double* %arrayidx ; <double> [#uses=1]
%mul = fmul double %t4, 2.200000e+00 ; <double> [#uses=1]
store double %mul, double* %arrayidx
%i.next = add nsw i64 %i, 1 ; <i64> [#uses=2]
diff --git a/llvm/test/CodeGen/X86/packed_struct.ll b/llvm/test/CodeGen/X86/packed_struct.ll
index da6e8f8745f..3670fc68bfd 100644
--- a/llvm/test/CodeGen/X86/packed_struct.ll
+++ b/llvm/test/CodeGen/X86/packed_struct.ll
@@ -17,9 +17,9 @@ target triple = "i686-pc-linux-gnu"
define i32 @foo() nounwind {
entry:
- %tmp = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp6 = load i32* getelementptr (%struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1]
+ %tmp = load i32, i32* getelementptr (%struct.anon* @foos, i32 0, i32 1) ; <i32> [#uses=1]
+ %tmp3 = load i32, i32* getelementptr (%struct.anon* @foos, i32 0, i32 2) ; <i32> [#uses=1]
+ %tmp6 = load i32, i32* getelementptr (%struct.anon* @foos, i32 0, i32 3) ; <i32> [#uses=1]
%tmp4 = add i32 %tmp3, %tmp ; <i32> [#uses=1]
%tmp7 = add i32 %tmp4, %tmp6 ; <i32> [#uses=1]
ret i32 %tmp7
@@ -27,8 +27,8 @@ entry:
define i8 @bar() nounwind {
entry:
- %tmp = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 0, i32 1) ; <i8> [#uses=1]
- %tmp4 = load i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 3, i32 1) ; <i8> [#uses=1]
+ %tmp = load i8, i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 0, i32 1) ; <i8> [#uses=1]
+ %tmp4 = load i8, i8* getelementptr ([4 x <{ i32, i8 }>]* @bara, i32 0, i32 3, i32 1) ; <i8> [#uses=1]
%tmp5 = add i8 %tmp4, %tmp ; <i8> [#uses=1]
ret i8 %tmp5
}
diff --git a/llvm/test/CodeGen/X86/palignr-2.ll b/llvm/test/CodeGen/X86/palignr-2.ll
index 4df9a2284cb..8b32387e700 100644
--- a/llvm/test/CodeGen/X86/palignr-2.ll
+++ b/llvm/test/CodeGen/X86/palignr-2.ll
@@ -20,8 +20,8 @@ define void @t2() nounwind ssp {
entry:
; CHECK-LABEL: t2:
; palignr $4, _b, %xmm0
- %0 = load <2 x i64>* bitcast ([4 x i32]* @b to <2 x i64>*), align 16 ; <<2 x i64>> [#uses=1]
- %1 = load <2 x i64>* bitcast ([4 x i32]* @a to <2 x i64>*), align 16 ; <<2 x i64>> [#uses=1]
+ %0 = load <2 x i64>, <2 x i64>* bitcast ([4 x i32]* @b to <2 x i64>*), align 16 ; <<2 x i64>> [#uses=1]
+ %1 = load <2 x i64>, <2 x i64>* bitcast ([4 x i32]* @a to <2 x i64>*), align 16 ; <<2 x i64>> [#uses=1]
%2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 32) nounwind readnone
store <2 x i64> %2, <2 x i64>* bitcast ([4 x i32]* @c to <2 x i64>*), align 16
ret void
diff --git a/llvm/test/CodeGen/X86/patchpoint.ll b/llvm/test/CodeGen/X86/patchpoint.ll
index 05f0bab1ff6..24e324fda47 100644
--- a/llvm/test/CodeGen/X86/patchpoint.ll
+++ b/llvm/test/CodeGen/X86/patchpoint.ll
@@ -47,13 +47,13 @@ define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64
entry:
%tmp80 = add i64 %tmp79, -16
%tmp81 = inttoptr i64 %tmp80 to i64*
- %tmp82 = load i64* %tmp81, align 8
+ %tmp82 = load i64, i64* %tmp81, align 8
tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 5, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 30, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
- %tmp83 = load i64* %tmp33, align 8
+ %tmp83 = load i64, i64* %tmp33, align 8
%tmp84 = add i64 %tmp83, -24
%tmp85 = inttoptr i64 %tmp84 to i64*
- %tmp86 = load i64* %tmp85, align 8
+ %tmp86 = load i64, i64* %tmp85, align 8
tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 5, i64 %arg, i64 %tmp10, i64 %tmp86)
tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 30, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
ret i64 10
diff --git a/llvm/test/CodeGen/X86/peep-test-0.ll b/llvm/test/CodeGen/X86/peep-test-0.ll
index 7f06ed8ed8c..1772f008b94 100644
--- a/llvm/test/CodeGen/X86/peep-test-0.ll
+++ b/llvm/test/CodeGen/X86/peep-test-0.ll
@@ -10,7 +10,7 @@ bb:
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %bb ]
%i.03 = add i64 %indvar, %n
%0 = getelementptr double, double* %d, i64 %i.03
- %1 = load double* %0, align 8
+ %1 = load double, double* %0, align 8
%2 = fmul double %1, 3.000000e+00
store double %2, double* %0, align 8
%indvar.next = add i64 %indvar, 1
diff --git a/llvm/test/CodeGen/X86/peep-test-1.ll b/llvm/test/CodeGen/X86/peep-test-1.ll
index 80f3a877807..7448da3894d 100644
--- a/llvm/test/CodeGen/X86/peep-test-1.ll
+++ b/llvm/test/CodeGen/X86/peep-test-1.ll
@@ -10,7 +10,7 @@ bb:
%indvar = phi i32 [ 0, %0 ], [ %indvar.next, %bb ]
%i.03 = sub i32 %n, %indvar
%1 = getelementptr double, double* %p, i32 %i.03
- %2 = load double* %1, align 4
+ %2 = load double, double* %1, align 4
%3 = fmul double %2, 2.930000e+00
store double %3, double* %1, align 4
%4 = add i32 %i.03, -1
diff --git a/llvm/test/CodeGen/X86/peephole-fold-movsd.ll b/llvm/test/CodeGen/X86/peephole-fold-movsd.ll
index f174e4b07cd..818040a6f02 100644
--- a/llvm/test/CodeGen/X86/peephole-fold-movsd.ll
+++ b/llvm/test/CodeGen/X86/peephole-fold-movsd.ll
@@ -18,9 +18,9 @@ define void @foo1(double %a.coerce0, double %a.coerce1, double %b.coerce0, doubl
%tmpcast = bitcast <2 x double>* %1 to %struct.S1*
call void @foo3(%struct.S1* %tmpcast) #2
%p2 = getelementptr inbounds %struct.S1, %struct.S1* %tmpcast, i64 0, i32 0
- %2 = load double* %p2, align 16
+ %2 = load double, double* %p2, align 16
%p3 = getelementptr inbounds %struct.S1, %struct.S1* %tmpcast, i64 0, i32 1
- %3 = load double* %p3, align 8
+ %3 = load double, double* %p3, align 8
%4 = insertelement <2 x double> undef, double %2, i32 0
%5 = insertelement <2 x double> %4, double 0.000000e+00, i32 1
%6 = insertelement <2 x double> undef, double %3, i32 1
diff --git a/llvm/test/CodeGen/X86/peephole-multiple-folds.ll b/llvm/test/CodeGen/X86/peephole-multiple-folds.ll
index a6cec66c73c..9fcc1a20798 100644
--- a/llvm/test/CodeGen/X86/peephole-multiple-folds.ll
+++ b/llvm/test/CodeGen/X86/peephole-multiple-folds.ll
@@ -13,8 +13,8 @@ loopbody:
; CHECK: vfmadd231ps ({{%rsi|%rdx}}),
%vsum1 = phi <8 x float> [ %vsum1.next, %loopbody ], [ zeroinitializer, %entry ]
%vsum2 = phi <8 x float> [ %vsum2.next, %loopbody ], [ zeroinitializer, %entry ]
- %m1 = load <8 x float>* %p1, align 1
- %m2 = load <8 x float>* %p2, align 1
+ %m1 = load <8 x float>, <8 x float>* %p1, align 1
+ %m2 = load <8 x float>, <8 x float>* %p2, align 1
%vsum1.next = tail call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %m1, <8 x float> zeroinitializer, <8 x float> %vsum1)
%vsum2.next = tail call <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float> %m2, <8 x float> zeroinitializer, <8 x float> %vsum2)
%vsum1.next.1 = extractelement <8 x float> %vsum1.next, i32 0
diff --git a/llvm/test/CodeGen/X86/phi-bit-propagation.ll b/llvm/test/CodeGen/X86/phi-bit-propagation.ll
index bcffe3cfec0..37f3f096556 100644
--- a/llvm/test/CodeGen/X86/phi-bit-propagation.ll
+++ b/llvm/test/CodeGen/X86/phi-bit-propagation.ll
@@ -15,10 +15,10 @@ for.cond: ; preds = %for.inc, %entry
for.body: ; preds = %for.cond
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %conv
- %tmp5 = load i32* %arrayidx, align 4
+ %tmp5 = load i32, i32* %arrayidx, align 4
%conv6 = zext i32 %tmp5 to i64
%rem.i.i.i.i = and i64 %conv6, 63
- %tmp3.i = load i64* %tmp.i.i.i.i, align 8
+ %tmp3.i = load i64, i64* %tmp.i.i.i.i, align 8
%shl.i.i = shl i64 1, %rem.i.i.i.i
%and.i = and i64 %shl.i.i, %tmp3.i
%cmp.i = icmp eq i64 %and.i, 0
diff --git a/llvm/test/CodeGen/X86/phielim-split.ll b/llvm/test/CodeGen/X86/phielim-split.ll
index bcf61c20df7..148b8033161 100644
--- a/llvm/test/CodeGen/X86/phielim-split.ll
+++ b/llvm/test/CodeGen/X86/phielim-split.ll
@@ -19,7 +19,7 @@ entry:
for.cond: ; preds = %entry, %for.cond
%p.addr.0 = phi i8* [ %incdec.ptr, %for.cond ], [ %p, %entry ]
%incdec.ptr = getelementptr inbounds i8, i8* %p.addr.0, i64 1
- %0 = load i8* %p.addr.0, align 1
+ %0 = load i8, i8* %p.addr.0, align 1
%tobool = icmp eq i8 %0, 0
br i1 %tobool, label %for.cond, label %if.end2
diff --git a/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll b/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
index 37eca1ce0a7..e72483e35cb 100644
--- a/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
+++ b/llvm/test/CodeGen/X86/phys-reg-local-regalloc.ll
@@ -50,15 +50,15 @@ entry:
store i32 %asmtmp2, i32* %"%eax"
%3 = call i32 asm "", "={ax}"() nounwind ; <i32> [#uses=1]
call void asm sideeffect alignstack "movl $0, $1", "{eax},*m,~{dirflag},~{fpsr},~{flags},~{memory}"(i32 %3, i32* %result) nounwind
- %4 = load i32* %result, align 4 ; <i32> [#uses=1]
+ %4 = load i32, i32* %result, align 4 ; <i32> [#uses=1]
%5 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 %4) nounwind ; <i32> [#uses=0]
store i32 0, i32* %0, align 4
- %6 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %6 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
store i32 %6, i32* %retval, align 4
br label %return
return: ; preds = %entry
- %retval3 = load i32* %retval ; <i32> [#uses=1]
+ %retval3 = load i32, i32* %retval ; <i32> [#uses=1]
ret i32 %retval3
}
diff --git a/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll b/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
index c6af23d149f..74e3d1291c0 100644
--- a/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
+++ b/llvm/test/CodeGen/X86/phys_subreg_coalesce-3.ll
@@ -26,7 +26,7 @@ bb: ; preds = %bb, %bb.nph
%j.06 = sub i32 %j.03, %indvar ; <i32> [#uses=1]
%tmp11 = sub i32 %tmp10, %indvar ; <i32> [#uses=1]
%scevgep = getelementptr i32, i32* %ptr, i32 %tmp11 ; <i32*> [#uses=1]
- %1 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %scevgep, align 4 ; <i32> [#uses=1]
%2 = ashr i32 %j.06, %shifts ; <i32> [#uses=1]
%3 = and i32 %2, 65535 ; <i32> [#uses=1]
%4 = getelementptr inbounds i32, i32* %quadrant, i32 %1 ; <i32*> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/pic.ll b/llvm/test/CodeGen/X86/pic.ll
index da1e2248065..faaf73b18d9 100644
--- a/llvm/test/CodeGen/X86/pic.ll
+++ b/llvm/test/CodeGen/X86/pic.ll
@@ -7,7 +7,7 @@
define void @test0() nounwind {
entry:
store i32* @dst, i32** @ptr
- %tmp.s = load i32* @src
+ %tmp.s = load i32, i32* @src
store i32 %tmp.s, i32* @dst
ret void
@@ -29,7 +29,7 @@ entry:
define void @test1() nounwind {
entry:
store i32* @dst2, i32** @ptr2
- %tmp.s = load i32* @src2
+ %tmp.s = load i32, i32* @src2
store i32 %tmp.s, i32* @dst2
ret void
@@ -71,7 +71,7 @@ define void @test3() nounwind {
entry:
%tmp = call void(...)*(...)* @afoo()
store void(...)* %tmp, void(...)** @pfoo
- %tmp1 = load void(...)** @pfoo
+ %tmp1 = load void(...)*, void(...)** @pfoo
call void(...)* %tmp1()
ret void
; LINUX-LABEL: test3:
@@ -107,7 +107,7 @@ declare void @foo(...)
define void @test5() nounwind {
entry:
store i32* @dst6, i32** @ptr6
- %tmp.s = load i32* @src6
+ %tmp.s = load i32, i32* @src6
store i32 %tmp.s, i32* @dst6
ret void
diff --git a/llvm/test/CodeGen/X86/pic_jumptable.ll b/llvm/test/CodeGen/X86/pic_jumptable.ll
index d66ff0c59db..dd0a8ac0431 100644
--- a/llvm/test/CodeGen/X86/pic_jumptable.ll
+++ b/llvm/test/CodeGen/X86/pic_jumptable.ll
@@ -31,7 +31,7 @@ entry:
%Y_addr = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %Y, i32* %Y_addr
- %tmp = load i32* %Y_addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %Y_addr ; <i32> [#uses=1]
switch i32 %tmp, label %bb10 [
i32 0, label %bb3
i32 1, label %bb
diff --git a/llvm/test/CodeGen/X86/pmovext.ll b/llvm/test/CodeGen/X86/pmovext.ll
index f0e468f53cb..6c76949fb78 100644
--- a/llvm/test/CodeGen/X86/pmovext.ll
+++ b/llvm/test/CodeGen/X86/pmovext.ll
@@ -8,7 +8,7 @@
;CHECK-NEXT: ret
define void @intrin_pmov(i16* noalias %dest, i8* noalias %src) nounwind uwtable ssp {
%1 = bitcast i8* %src to <2 x i64>*
- %2 = load <2 x i64>* %1, align 16
+ %2 = load <2 x i64>, <2 x i64>* %1, align 16
%3 = bitcast <2 x i64> %2 to <16 x i8>
%4 = tail call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %3) nounwind
%5 = bitcast i16* %dest to i8*
diff --git a/llvm/test/CodeGen/X86/pmovsx-inreg.ll b/llvm/test/CodeGen/X86/pmovsx-inreg.ll
index 07979f61ddd..2897d6bd814 100644
--- a/llvm/test/CodeGen/X86/pmovsx-inreg.ll
+++ b/llvm/test/CodeGen/X86/pmovsx-inreg.ll
@@ -6,7 +6,7 @@
; These tests inject a store into the chain to test the inreg versions of pmovsx
define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind {
- %wide.load35 = load <2 x i8>* %in, align 1
+ %wide.load35 = load <2 x i8>, <2 x i8>* %in, align 1
%sext = sext <2 x i8> %wide.load35 to <2 x i64>
store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
store <2 x i64> %sext, <2 x i64>* %out, align 8
@@ -23,7 +23,7 @@ define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind {
}
define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
- %wide.load35 = load <4 x i8>* %in, align 1
+ %wide.load35 = load <4 x i8>, <4 x i8>* %in, align 1
%sext = sext <4 x i8> %wide.load35 to <4 x i64>
store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
store <4 x i64> %sext, <4 x i64>* %out, align 8
@@ -34,7 +34,7 @@ define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
}
define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind {
- %wide.load35 = load <4 x i8>* %in, align 1
+ %wide.load35 = load <4 x i8>, <4 x i8>* %in, align 1
%sext = sext <4 x i8> %wide.load35 to <4 x i32>
store <4 x i32> zeroinitializer, <4 x i32>* undef, align 8
store <4 x i32> %sext, <4 x i32>* %out, align 8
@@ -51,7 +51,7 @@ define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind {
}
define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
- %wide.load35 = load <8 x i8>* %in, align 1
+ %wide.load35 = load <8 x i8>, <8 x i8>* %in, align 1
%sext = sext <8 x i8> %wide.load35 to <8 x i32>
store <8 x i32> zeroinitializer, <8 x i32>* undef, align 8
store <8 x i32> %sext, <8 x i32>* %out, align 8
@@ -62,7 +62,7 @@ define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
}
define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind {
- %wide.load35 = load <8 x i8>* %in, align 1
+ %wide.load35 = load <8 x i8>, <8 x i8>* %in, align 1
%sext = sext <8 x i8> %wide.load35 to <8 x i16>
store <8 x i16> zeroinitializer, <8 x i16>* undef, align 8
store <8 x i16> %sext, <8 x i16>* %out, align 8
@@ -79,7 +79,7 @@ define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind {
}
define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
- %wide.load35 = load <16 x i8>* %in, align 1
+ %wide.load35 = load <16 x i8>, <16 x i8>* %in, align 1
%sext = sext <16 x i8> %wide.load35 to <16 x i16>
store <16 x i16> zeroinitializer, <16 x i16>* undef, align 8
store <16 x i16> %sext, <16 x i16>* %out, align 8
@@ -90,7 +90,7 @@ define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
}
define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind {
- %wide.load35 = load <2 x i16>* %in, align 1
+ %wide.load35 = load <2 x i16>, <2 x i16>* %in, align 1
%sext = sext <2 x i16> %wide.load35 to <2 x i64>
store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
store <2 x i64> %sext, <2 x i64>* %out, align 8
@@ -108,7 +108,7 @@ define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind {
}
define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
- %wide.load35 = load <4 x i16>* %in, align 1
+ %wide.load35 = load <4 x i16>, <4 x i16>* %in, align 1
%sext = sext <4 x i16> %wide.load35 to <4 x i64>
store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
store <4 x i64> %sext, <4 x i64>* %out, align 8
@@ -119,7 +119,7 @@ define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
}
define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind {
- %wide.load35 = load <4 x i16>* %in, align 1
+ %wide.load35 = load <4 x i16>, <4 x i16>* %in, align 1
%sext = sext <4 x i16> %wide.load35 to <4 x i32>
store <4 x i32> zeroinitializer, <4 x i32>* undef, align 8
store <4 x i32> %sext, <4 x i32>* %out, align 8
@@ -136,7 +136,7 @@ define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind {
}
define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
- %wide.load35 = load <8 x i16>* %in, align 1
+ %wide.load35 = load <8 x i16>, <8 x i16>* %in, align 1
%sext = sext <8 x i16> %wide.load35 to <8 x i32>
store <8 x i32> zeroinitializer, <8 x i32>* undef, align 8
store <8 x i32> %sext, <8 x i32>* %out, align 8
@@ -147,7 +147,7 @@ define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
}
define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind {
- %wide.load35 = load <2 x i32>* %in, align 1
+ %wide.load35 = load <2 x i32>, <2 x i32>* %in, align 1
%sext = sext <2 x i32> %wide.load35 to <2 x i64>
store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
store <2 x i64> %sext, <2 x i64>* %out, align 8
@@ -164,7 +164,7 @@ define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind {
}
define void @test12(<4 x i32>* %in, <4 x i64>* %out) nounwind {
- %wide.load35 = load <4 x i32>* %in, align 1
+ %wide.load35 = load <4 x i32>, <4 x i32>* %in, align 1
%sext = sext <4 x i32> %wide.load35 to <4 x i64>
store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
store <4 x i64> %sext, <4 x i64>* %out, align 8
diff --git a/llvm/test/CodeGen/X86/pmulld.ll b/llvm/test/CodeGen/X86/pmulld.ll
index 3db0f73954d..3fe3ebc9763 100644
--- a/llvm/test/CodeGen/X86/pmulld.ll
+++ b/llvm/test/CodeGen/X86/pmulld.ll
@@ -20,7 +20,7 @@ define <4 x i32> @test1a(<4 x i32> %A, <4 x i32> *%Bp) nounwind {
; WIN64-NEXT: movdqa (%rcx), %xmm0
; WIN64-NEXT: pmulld (%rdx), %xmm0
- %B = load <4 x i32>* %Bp
+ %B = load <4 x i32>, <4 x i32>* %Bp
%C = mul <4 x i32> %A, %B
ret <4 x i32> %C
}
diff --git a/llvm/test/CodeGen/X86/pointer-vector.ll b/llvm/test/CodeGen/X86/pointer-vector.ll
index 5e0c2dae083..48c8b2376bd 100644
--- a/llvm/test/CodeGen/X86/pointer-vector.ll
+++ b/llvm/test/CodeGen/X86/pointer-vector.ll
@@ -31,7 +31,7 @@ entry:
;CHECK: LOAD0
define <4 x i8*> @LOAD0(<4 x i8*>* %p) nounwind {
entry:
- %G = load <4 x i8*>* %p
+ %G = load <4 x i8*>, <4 x i8*>* %p
;CHECK: movaps
ret <4 x i8*> %G
;CHECK: ret
@@ -40,7 +40,7 @@ entry:
;CHECK: LOAD1
define <4 x i8*> @LOAD1(<4 x i8*>* %p) nounwind {
entry:
- %G = load <4 x i8*>* %p
+ %G = load <4 x i8*>, <4 x i8*>* %p
;CHECK: movdqa
;CHECK: pshufd
;CHECK: movdqa
@@ -55,11 +55,11 @@ define <4 x i8*> @LOAD2(<4 x i8*>* %p) nounwind {
entry:
%I = alloca <4 x i8*>
;CHECK: sub
- %G = load <4 x i8*>* %p
+ %G = load <4 x i8*>, <4 x i8*>* %p
;CHECK: movaps
store <4 x i8*> %G, <4 x i8*>* %I
;CHECK: movaps
- %Z = load <4 x i8*>* %I
+ %Z = load <4 x i8*>, <4 x i8*>* %I
ret <4 x i8*> %Z
;CHECK: add
;CHECK: ret
@@ -68,7 +68,7 @@ entry:
;CHECK: INT2PTR0
define <4 x i32> @INT2PTR0(<4 x i8*>* %p) nounwind {
entry:
- %G = load <4 x i8*>* %p
+ %G = load <4 x i8*>, <4 x i8*>* %p
;CHECK: movl
;CHECK: movaps
%K = ptrtoint <4 x i8*> %G to <4 x i32>
@@ -79,7 +79,7 @@ entry:
;CHECK: INT2PTR1
define <4 x i32*> @INT2PTR1(<4 x i8>* %p) nounwind {
entry:
- %G = load <4 x i8>* %p
+ %G = load <4 x i8>, <4 x i8>* %p
;CHECK: movl
;CHECK: pmovzxbd (%
%K = inttoptr <4 x i8> %G to <4 x i32*>
@@ -90,7 +90,7 @@ entry:
;CHECK: BITCAST0
define <4 x i32*> @BITCAST0(<4 x i8*>* %p) nounwind {
entry:
- %G = load <4 x i8*>* %p
+ %G = load <4 x i8*>, <4 x i8*>* %p
;CHECK: movl
%T = bitcast <4 x i8*> %G to <4 x i32*>
;CHECK: movaps
@@ -101,7 +101,7 @@ entry:
;CHECK: BITCAST1
define <2 x i32*> @BITCAST1(<2 x i8*>* %p) nounwind {
entry:
- %G = load <2 x i8*>* %p
+ %G = load <2 x i8*>, <2 x i8*>* %p
;CHECK: movl
;CHECK: pmovzxdq
%T = bitcast <2 x i8*> %G to <2 x i32*>
@@ -112,8 +112,8 @@ entry:
;CHECK: ICMP0
define <4 x i32> @ICMP0(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
entry:
- %g0 = load <4 x i8*>* %p0
- %g1 = load <4 x i8*>* %p1
+ %g0 = load <4 x i8*>, <4 x i8*>* %p0
+ %g1 = load <4 x i8*>, <4 x i8*>* %p1
%k = icmp sgt <4 x i8*> %g0, %g1
;CHECK: pcmpgtd
%j = select <4 x i1> %k, <4 x i32> <i32 0, i32 1, i32 2, i32 4>, <4 x i32> <i32 9, i32 8, i32 7, i32 6>
@@ -124,8 +124,8 @@ entry:
;CHECK: ICMP1
define <4 x i32> @ICMP1(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
entry:
- %g0 = load <4 x i8*>* %p0
- %g1 = load <4 x i8*>* %p1
+ %g0 = load <4 x i8*>, <4 x i8*>* %p0
+ %g1 = load <4 x i8*>, <4 x i8*>* %p1
%k = icmp eq <4 x i8*> %g0, %g1
;CHECK: pcmpeqd
%j = select <4 x i1> %k, <4 x i32> <i32 0, i32 1, i32 2, i32 4>, <4 x i32> <i32 9, i32 8, i32 7, i32 6>
diff --git a/llvm/test/CodeGen/X86/postra-licm.ll b/llvm/test/CodeGen/X86/postra-licm.ll
index 00d04daa51c..c5978dec3f7 100644
--- a/llvm/test/CodeGen/X86/postra-licm.ll
+++ b/llvm/test/CodeGen/X86/postra-licm.ll
@@ -113,7 +113,7 @@ bb45: ; preds = %bb39.preheader
br i1 false, label %bb47, label %bb32.loopexit
bb47: ; preds = %bb45
- %10 = load i32* %7, align 4 ; <i32> [#uses=0]
+ %10 = load i32, i32* %7, align 4 ; <i32> [#uses=0]
unreachable
bb70: ; preds = %bb32.loopexit, %bb30
@@ -165,16 +165,16 @@ bb: ; preds = %bb, %bb.nph
%scevgep13 = getelementptr i8, i8* %bufp, i64 %tmp12 ; <i8*> [#uses=1]
%tmp15 = add i64 %tmp14, %tmp9 ; <i64> [#uses=1]
%scevgep16 = getelementptr i8, i8* %bufp, i64 %tmp15 ; <i8*> [#uses=1]
- %0 = load i8* undef, align 1 ; <i8> [#uses=1]
+ %0 = load i8, i8* undef, align 1 ; <i8> [#uses=1]
%1 = zext i8 %0 to i32 ; <i32> [#uses=1]
%2 = getelementptr inbounds [16 x i16], [16 x i16]* @map_4_to_16, i64 0, i64 0 ; <i16*> [#uses=1]
- %3 = load i16* %2, align 2 ; <i16> [#uses=1]
+ %3 = load i16, i16* %2, align 2 ; <i16> [#uses=1]
%4 = trunc i16 %3 to i8 ; <i8> [#uses=1]
store i8 %4, i8* undef, align 1
%5 = and i32 %1, 15 ; <i32> [#uses=1]
%6 = zext i32 %5 to i64 ; <i64> [#uses=1]
%7 = getelementptr inbounds [16 x i16], [16 x i16]* @map_4_to_16, i64 0, i64 %6 ; <i16*> [#uses=1]
- %8 = load i16* %7, align 2 ; <i16> [#uses=2]
+ %8 = load i16, i16* %7, align 2 ; <i16> [#uses=2]
%9 = lshr i16 %8, 8 ; <i16> [#uses=1]
%10 = trunc i16 %9 to i8 ; <i8> [#uses=1]
store i8 %10, i8* %scevgep13, align 1
diff --git a/llvm/test/CodeGen/X86/pr10475.ll b/llvm/test/CodeGen/X86/pr10475.ll
index 3efc39ee9f1..d81fce8bc4f 100644
--- a/llvm/test/CodeGen/X86/pr10475.ll
+++ b/llvm/test/CodeGen/X86/pr10475.ll
@@ -10,7 +10,7 @@ CF79: ; preds = %CF79, %BB
br i1 undef, label %CF79, label %CF84.critedge.critedge
CF84.critedge.critedge: ; preds = %CF79
- %L35 = load <8 x i32>* undef
+ %L35 = load <8 x i32>, <8 x i32>* undef
br label %CF85
CF85: ; preds = %CF85, %CF84.critedge.critedge
diff --git a/llvm/test/CodeGen/X86/pr10525.ll b/llvm/test/CodeGen/X86/pr10525.ll
index 30ce2979e8e..436d89caabe 100644
--- a/llvm/test/CodeGen/X86/pr10525.ll
+++ b/llvm/test/CodeGen/X86/pr10525.ll
@@ -4,7 +4,7 @@
define void @autogen_163411_5000() {
BB:
- %L = load <2 x i64>* undef
+ %L = load <2 x i64>, <2 x i64>* undef
%Shuff11 = shufflevector <2 x i64> %L, <2 x i64> %L, <2 x i32> <i32 2, i32 0>
%I51 = insertelement <2 x i64> undef, i64 undef, i32 0
%Shuff152 = shufflevector <2 x i64> %I51, <2 x i64> %Shuff11, <2 x i32> <i32 1, i32 3>
diff --git a/llvm/test/CodeGen/X86/pr11334.ll b/llvm/test/CodeGen/X86/pr11334.ll
index 0bdb0ec7cf4..6da4697c3b2 100644
--- a/llvm/test/CodeGen/X86/pr11334.ll
+++ b/llvm/test/CodeGen/X86/pr11334.ll
@@ -57,7 +57,7 @@ entry:
define void @test_vector_creation() nounwind {
%1 = insertelement <4 x double> undef, double 0.000000e+00, i32 2
- %2 = load double addrspace(1)* null
+ %2 = load double, double addrspace(1)* null
%3 = insertelement <4 x double> %1, double %2, i32 3
store <4 x double> %3, <4 x double>* undef
ret void
diff --git a/llvm/test/CodeGen/X86/pr12360.ll b/llvm/test/CodeGen/X86/pr12360.ll
index 67340362458..3e762da545d 100644
--- a/llvm/test/CodeGen/X86/pr12360.ll
+++ b/llvm/test/CodeGen/X86/pr12360.ll
@@ -6,7 +6,7 @@ define zeroext i1 @f1(i8* %x) {
; CHECK-NEXT: ret
entry:
- %0 = load i8* %x, align 1, !range !0
+ %0 = load i8, i8* %x, align 1, !range !0
%tobool = trunc i8 %0 to i1
ret i1 %tobool
}
@@ -17,7 +17,7 @@ define zeroext i1 @f2(i8* %x) {
; CHECK-NEXT: ret
entry:
- %0 = load i8* %x, align 1, !range !0
+ %0 = load i8, i8* %x, align 1, !range !0
%tobool = icmp ne i8 %0, 0
ret i1 %tobool
}
diff --git a/llvm/test/CodeGen/X86/pr12889.ll b/llvm/test/CodeGen/X86/pr12889.ll
index 428e9b760b7..8234fcc67e0 100644
--- a/llvm/test/CodeGen/X86/pr12889.ll
+++ b/llvm/test/CodeGen/X86/pr12889.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-unknown-linux-gnu"
define void @func() nounwind uwtable {
entry:
- %0 = load i8* @c0, align 1
+ %0 = load i8, i8* @c0, align 1
%tobool = icmp ne i8 %0, 0
%conv = zext i1 %tobool to i8
%storemerge = shl nuw nsw i8 %conv, %conv
diff --git a/llvm/test/CodeGen/X86/pr13209.ll b/llvm/test/CodeGen/X86/pr13209.ll
index 8e5eca2b2c2..0d5196fc7c8 100644
--- a/llvm/test/CodeGen/X86/pr13209.ll
+++ b/llvm/test/CodeGen/X86/pr13209.ll
@@ -11,37 +11,37 @@ indirectgoto.preheader:
%frombool.i5915.ph = phi i8 [ undef, %if.end51 ], [ %frombool.i5917, %jit_return ]
br label %indirectgoto
do.end165:
- %tmp92 = load i8** %x, align 8
+ %tmp92 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end209:
- %tmp104 = load i8** %x, align 8
+ %tmp104 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end220:
- %tmp107 = load i8** %x, align 8
+ %tmp107 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end231:
- %tmp110 = load i8** %x, align 8
+ %tmp110 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end242:
- %tmp113 = load i8** %x, align 8
+ %tmp113 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end253:
- %tmp116 = load i8** %x, align 8
+ %tmp116 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end286:
- %tmp125 = load i8** %x, align 8
+ %tmp125 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end297:
- %tmp128 = load i8** %x, align 8
+ %tmp128 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end308:
- %tmp131 = load i8** %x, align 8
+ %tmp131 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end429:
- %tmp164 = load i8** %x, align 8
+ %tmp164 = load i8*, i8** %x, align 8
br label %indirectgoto
do.end440:
- %tmp167 = load i8** %x, align 8
+ %tmp167 = load i8*, i8** %x, align 8
br label %indirectgoto
do.body482:
br i1 false, label %indirectgoto, label %do.body495
@@ -55,16 +55,16 @@ inline_return:
jit_return:
br label %indirectgoto.preheader
L_JSOP_UINT24:
- %tmp864 = load i8** %x, align 8
+ %tmp864 = load i8*, i8** %x, align 8
br label %indirectgoto
L_JSOP_THROWING:
- %tmp1201 = load i8** %x, align 8
+ %tmp1201 = load i8*, i8** %x, align 8
br label %indirectgoto
do.body4936:
- %tmp1240 = load i8** %x, align 8
+ %tmp1240 = load i8*, i8** %x, align 8
br label %indirectgoto
do.body5184:
- %tmp1340 = load i8** %x, align 8
+ %tmp1340 = load i8*, i8** %x, align 8
br label %indirectgoto
if.end5571:
br label %inline_return
diff --git a/llvm/test/CodeGen/X86/pr13859.ll b/llvm/test/CodeGen/X86/pr13859.ll
index 719721dfd87..1ebc79647ee 100644
--- a/llvm/test/CodeGen/X86/pr13859.ll
+++ b/llvm/test/CodeGen/X86/pr13859.ll
@@ -7,7 +7,7 @@ entry:
%aMyAlloca = alloca i32, align 32
%dest = alloca <1 x i64>, align 32
- %a32 = load i32* %aMyAlloca, align 4
+ %a32 = load i32, i32* %aMyAlloca, align 4
%aconv = trunc i32 %a32 to i16
%a36 = insertelement <4 x i16> undef, i16 %aconv, i32 0
%a37 = insertelement <4 x i16> %a36, i16 %aconv, i32 1
diff --git a/llvm/test/CodeGen/X86/pr13899.ll b/llvm/test/CodeGen/X86/pr13899.ll
index 978d813aa4b..abfb918ff0f 100644
--- a/llvm/test/CodeGen/X86/pr13899.ll
+++ b/llvm/test/CodeGen/X86/pr13899.ll
@@ -26,25 +26,25 @@ declare <8 x float> @bar64(<8 x float> %i0, <8 x float> %i1,
<8 x float> %i8, <8 x float> %i9)
define <8 x float> @foo64(<8 x float>* %p) {
- %1 = load <8 x float>* %p
+ %1 = load <8 x float>, <8 x float>* %p
%idx1 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 1
- %2 = load <8 x float>* %idx1
+ %2 = load <8 x float>, <8 x float>* %idx1
%idx2 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 2
- %3 = load <8 x float>* %idx2
+ %3 = load <8 x float>, <8 x float>* %idx2
%idx3 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 3
- %4 = load <8 x float>* %idx3
+ %4 = load <8 x float>, <8 x float>* %idx3
%idx4 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 4
- %5 = load <8 x float>* %idx4
+ %5 = load <8 x float>, <8 x float>* %idx4
%idx5 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 5
- %6 = load <8 x float>* %idx5
+ %6 = load <8 x float>, <8 x float>* %idx5
%idx6 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 6
- %7 = load <8 x float>* %idx6
+ %7 = load <8 x float>, <8 x float>* %idx6
%idx7 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 7
- %8 = load <8 x float>* %idx7
+ %8 = load <8 x float>, <8 x float>* %idx7
%idx8 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 8
- %9 = load <8 x float>* %idx8
+ %9 = load <8 x float>, <8 x float>* %idx8
%idx9 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 9
- %10 = load <8 x float>* %idx9
+ %10 = load <8 x float>, <8 x float>* %idx9
%r = tail call <8 x float> @bar64(<8 x float> %1, <8 x float> %2,
<8 x float> %3, <8 x float> %4,
<8 x float> %5, <8 x float> %6,
diff --git a/llvm/test/CodeGen/X86/pr14161.ll b/llvm/test/CodeGen/X86/pr14161.ll
index c2bb8d3df8f..b7084c02274 100644
--- a/llvm/test/CodeGen/X86/pr14161.ll
+++ b/llvm/test/CodeGen/X86/pr14161.ll
@@ -10,7 +10,7 @@ define <2 x i16> @good(<4 x i32>*, <4 x i8>*) {
; CHECK-NEXT: pmovzxwq %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
- %2 = load <4 x i32>* %0, align 16
+ %2 = load <4 x i32>, <4 x i32>* %0, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %2, <4 x i32> <i32 127, i32 127, i32 127, i32 127>)
%4 = extractelement <4 x i32> %3, i32 0
%5 = extractelement <4 x i32> %3, i32 1
@@ -31,7 +31,7 @@ define <2 x i16> @bad(<4 x i32>*, <4 x i8>*) {
; CHECK-NEXT: pmovzxwq %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
- %2 = load <4 x i32>* %0, align 16
+ %2 = load <4 x i32>, <4 x i32>* %0, align 16
%3 = call <4 x i32> @llvm.x86.sse41.pminud(<4 x i32> %2, <4 x i32> <i32 127, i32 127, i32 127, i32 127>)
%4 = extractelement <4 x i32> %3, i32 0
%5 = extractelement <4 x i32> %3, i32 1
diff --git a/llvm/test/CodeGen/X86/pr14562.ll b/llvm/test/CodeGen/X86/pr14562.ll
index e66f1752a30..31674546423 100644
--- a/llvm/test/CodeGen/X86/pr14562.ll
+++ b/llvm/test/CodeGen/X86/pr14562.ll
@@ -3,7 +3,7 @@
@temp1 = global i64 -77129852189294865, align 8
define void @foo() nounwind {
- %x = load i64* @temp1, align 8
+ %x = load i64, i64* @temp1, align 8
%s = shl i64 %x, 32
%t = trunc i64 %s to i32
%z = zext i32 %t to i64
diff --git a/llvm/test/CodeGen/X86/pr1505b.ll b/llvm/test/CodeGen/X86/pr1505b.ll
index c348fec5467..95804978ffe 100644
--- a/llvm/test/CodeGen/X86/pr1505b.ll
+++ b/llvm/test/CodeGen/X86/pr1505b.ll
@@ -33,7 +33,7 @@ declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*)
define i32 @main() {
entry:
; CHECK: flds
- %tmp6 = load volatile float* @a ; <float> [#uses=1]
+ %tmp6 = load volatile float, float* @a ; <float> [#uses=1]
; CHECK: fstps (%esp)
; CHECK: tanf
%tmp9 = tail call float @tanf( float %tmp6 ) ; <float> [#uses=1]
@@ -41,7 +41,7 @@ entry:
; CHECK: fstp
; CHECK: fldl
- %tmp12 = load volatile double* @b ; <double> [#uses=1]
+ %tmp12 = load volatile double, double* @b ; <double> [#uses=1]
; CHECK: fstpl (%esp)
; CHECK: tan
%tmp13 = tail call double @tan( double %tmp12 ) ; <double> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/pr15267.ll b/llvm/test/CodeGen/X86/pr15267.ll
index 90df9905fe1..e9f41d9ded4 100644
--- a/llvm/test/CodeGen/X86/pr15267.ll
+++ b/llvm/test/CodeGen/X86/pr15267.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7-avx | FileCheck %s
define <4 x i3> @test1(<4 x i3>* %in) nounwind {
- %ret = load <4 x i3>* %in, align 1
+ %ret = load <4 x i3>, <4 x i3>* %in, align 1
ret <4 x i3> %ret
}
; CHECK-LABEL: test1
@@ -20,7 +20,7 @@ define <4 x i3> @test1(<4 x i3>* %in) nounwind {
; CHECK: ret
define <4 x i1> @test2(<4 x i1>* %in) nounwind {
- %ret = load <4 x i1>* %in, align 1
+ %ret = load <4 x i1>, <4 x i1>* %in, align 1
ret <4 x i1> %ret
}
@@ -40,7 +40,7 @@ define <4 x i1> @test2(<4 x i1>* %in) nounwind {
; CHECK: ret
define <4 x i64> @test3(<4 x i1>* %in) nounwind {
- %wide.load35 = load <4 x i1>* %in, align 1
+ %wide.load35 = load <4 x i1>, <4 x i1>* %in, align 1
%sext = sext <4 x i1> %wide.load35 to <4 x i64>
ret <4 x i64> %sext
}
@@ -68,7 +68,7 @@ define <4 x i64> @test3(<4 x i1>* %in) nounwind {
; CHECK: ret
define <16 x i4> @test4(<16 x i4>* %in) nounwind {
- %ret = load <16 x i4>* %in, align 1
+ %ret = load <16 x i4>, <16 x i4>* %in, align 1
ret <16 x i4> %ret
}
diff --git a/llvm/test/CodeGen/X86/pr15309.ll b/llvm/test/CodeGen/X86/pr15309.ll
index fdda4718c5b..e9d9b9e54c1 100644
--- a/llvm/test/CodeGen/X86/pr15309.ll
+++ b/llvm/test/CodeGen/X86/pr15309.ll
@@ -3,7 +3,7 @@
define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>* nocapture %dest) noinline {
L.entry:
%0 = getelementptr <2 x i64>, <2 x i64>* %src, i32 10
- %1 = load <2 x i64>* %0, align 16
+ %1 = load <2 x i64>, <2 x i64>* %0, align 16
%2 = uitofp <2 x i64> %1 to <2 x float>
%3 = getelementptr <2 x float>, <2 x float>* %dest, i32 10
store <2 x float> %2, <2 x float>* %3, align 8
diff --git a/llvm/test/CodeGen/X86/pr18023.ll b/llvm/test/CodeGen/X86/pr18023.ll
index 4c6f8cfce73..c376fe2a8d0 100644
--- a/llvm/test/CodeGen/X86/pr18023.ll
+++ b/llvm/test/CodeGen/X86/pr18023.ll
@@ -15,15 +15,15 @@
define void @func() {
store i32 1, i32* getelementptr inbounds ([3 x i32]* @a, i64 0, i64 1), align 4
store i32 0, i32* getelementptr inbounds ([3 x i32]* @a, i64 0, i64 0), align 4
- %1 = load volatile i32* @b, align 4
+ %1 = load volatile i32, i32* @b, align 4
store i32 1, i32* getelementptr inbounds ([3 x i32]* @a, i64 0, i64 1), align 4
store i32 0, i32* getelementptr inbounds ([3 x i32]* @a, i64 0, i64 1), align 4
- %2 = load volatile i32* @b, align 4
+ %2 = load volatile i32, i32* @b, align 4
store i32 1, i32* getelementptr inbounds ([3 x i32]* @a, i64 0, i64 1), align 4
store i32 0, i32* getelementptr inbounds ([3 x i32]* @a, i64 0, i64 2), align 4
- %3 = load volatile i32* @b, align 4
+ %3 = load volatile i32, i32* @b, align 4
store i32 3, i32* @c, align 4
- %4 = load i32* getelementptr inbounds ([3 x i32]* @a, i64 0, i64 1), align 4
+ %4 = load i32, i32* getelementptr inbounds ([3 x i32]* @a, i64 0, i64 1), align 4
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %4)
ret void
}
diff --git a/llvm/test/CodeGen/X86/pr18162.ll b/llvm/test/CodeGen/X86/pr18162.ll
index 3afa3ba5aab..7912db863ed 100644
--- a/llvm/test/CodeGen/X86/pr18162.ll
+++ b/llvm/test/CodeGen/X86/pr18162.ll
@@ -13,15 +13,15 @@ define { i64, <2 x float> } @Foo(%"Iterator"* %this) {
entry:
%retval = alloca i32
%this.addr = alloca %"Iterator"*
- %this1 = load %"Iterator"** %this.addr
+ %this1 = load %"Iterator"*, %"Iterator"** %this.addr
%bundle_ = getelementptr inbounds %"Iterator", %"Iterator"* %this1, i32 0, i32 0
- %0 = load i32** %bundle_
+ %0 = load i32*, i32** %bundle_
%1 = call { i64, <2 x float> } @Call()
%2 = call { i64, <2 x float> }* @CallPtr()
%3 = getelementptr { i64, <2 x float> }, { i64, <2 x float> }* %2, i32 0, i32 1
%4 = extractvalue { i64, <2 x float> } %1, 1
store <2 x float> %4, <2 x float>* %3
- %5 = load { i64, <2 x float> }* %2
+ %5 = load { i64, <2 x float> }, { i64, <2 x float> }* %2
ret { i64, <2 x float> } %5
}
diff --git a/llvm/test/CodeGen/X86/pr18846.ll b/llvm/test/CodeGen/X86/pr18846.ll
index eb6addb66af..02c33fed8d6 100644
--- a/llvm/test/CodeGen/X86/pr18846.ll
+++ b/llvm/test/CodeGen/X86/pr18846.ll
@@ -21,31 +21,31 @@ for.body29: ; preds = %for.body29, %entry
br i1 undef, label %for.body29, label %for.body65
for.body65: ; preds = %for.body29
- %0 = load float* undef, align 4, !tbaa !1
+ %0 = load float, float* undef, align 4, !tbaa !1
%vecinit7.i4448 = insertelement <8 x float> undef, float %0, i32 7
- %1 = load float* null, align 4, !tbaa !1
+ %1 = load float, float* null, align 4, !tbaa !1
%vecinit7.i4304 = insertelement <8 x float> undef, float %1, i32 7
- %2 = load float* undef, align 4, !tbaa !1
+ %2 = load float, float* undef, align 4, !tbaa !1
%vecinit7.i4196 = insertelement <8 x float> undef, float %2, i32 7
%3 = or i64 0, 16
%add.ptr111.sum4096 = add i64 %3, 0
- %4 = load <8 x float>* null, align 16, !tbaa !5
+ %4 = load <8 x float>, <8 x float>* null, align 16, !tbaa !5
%add.ptr162 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr111.sum4096
%__v.i4158 = bitcast float* %add.ptr162 to <8 x float>*
- %5 = load <8 x float>* %__v.i4158, align 16, !tbaa !5
+ %5 = load <8 x float>, <8 x float>* %__v.i4158, align 16, !tbaa !5
%add.ptr158.sum40975066 = or i64 %add.ptr111.sum4096, 8
%add.ptr183 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr158.sum40975066
%__v.i4162 = bitcast float* %add.ptr183 to <8 x float>*
- %6 = load <8 x float>* %__v.i4162, align 16, !tbaa !5
+ %6 = load <8 x float>, <8 x float>* %__v.i4162, align 16, !tbaa !5
%add.ptr200.sum40995067 = or i64 undef, 8
%add.ptr225 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr200.sum40995067
%__v.i4167 = bitcast float* %add.ptr225 to <8 x float>*
- %7 = load <8 x float>* %__v.i4167, align 4, !tbaa !5
- %8 = load <8 x float>* undef, align 16, !tbaa !5
+ %7 = load <8 x float>, <8 x float>* %__v.i4167, align 4, !tbaa !5
+ %8 = load <8 x float>, <8 x float>* undef, align 16, !tbaa !5
%add.ptr242.sum41015068 = or i64 0, 8
%add.ptr267 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr242.sum41015068
%__v.i4171 = bitcast float* %add.ptr267 to <8 x float>*
- %9 = load <8 x float>* %__v.i4171, align 4, !tbaa !5
+ %9 = load <8 x float>, <8 x float>* %__v.i4171, align 4, !tbaa !5
%mul.i4690 = fmul <8 x float> %7, undef
%add.i4665 = fadd <8 x float> undef, undef
%mul.i4616 = fmul <8 x float> %8, undef
@@ -56,8 +56,8 @@ for.body65: ; preds = %for.body29
%mul.i4578 = fmul <8 x float> %9, undef
%add.i4577 = fadd <8 x float> %add.i4593, %mul.i4578
call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1
- %10 = load <8 x float>* null, align 16, !tbaa !5
- %11 = load <8 x float>* undef, align 16, !tbaa !5
+ %10 = load <8 x float>, <8 x float>* null, align 16, !tbaa !5
+ %11 = load <8 x float>, <8 x float>* undef, align 16, !tbaa !5
%mul.i4564 = fmul <8 x float> %4, undef
%add.i4563 = fadd <8 x float> %10, %mul.i4564
%mul.i4560 = fmul <8 x float> %5, undef
@@ -107,7 +107,7 @@ for.body65: ; preds = %for.body29
call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4293) #1
call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4291) #1
call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4289) #1
- %12 = load <8 x float>* undef, align 16, !tbaa !5
+ %12 = load <8 x float>, <8 x float>* undef, align 16, !tbaa !5
%mul.i4274 = fmul <8 x float> undef, undef
%add.i4273 = fadd <8 x float> %12, %mul.i4274
%mul.i4258 = fmul <8 x float> %7, undef
diff --git a/llvm/test/CodeGen/X86/pr20020.ll b/llvm/test/CodeGen/X86/pr20020.ll
index 5bc87d1df02..56c12faaf70 100644
--- a/llvm/test/CodeGen/X86/pr20020.ll
+++ b/llvm/test/CodeGen/X86/pr20020.ll
@@ -41,12 +41,12 @@ for.body3: ; preds = %for.body3, %for.bod
%iv20 = phi i32 [ %iv.next21, %for.body3 ], [ %iv19, %for.body3.lr.ph ]
%iv15 = phi %struct.planet* [ %gep16, %for.body3 ], [ %iv, %for.body3.lr.ph ]
%iv1517 = bitcast %struct.planet* %iv15 to double*
- %2 = load double* %x, align 8
+ %2 = load double, double* %x, align 8
%gep18 = getelementptr double, double* %iv1517, i64 -1
- %3 = load double* %gep18, align 8
+ %3 = load double, double* %gep18, align 8
%sub = fsub double %2, %3
- %4 = load double* %y, align 8
- %5 = load double* %iv1517, align 8
+ %4 = load double, double* %y, align 8
+ %5 = load double, double* %iv1517, align 8
%sub8 = fsub double %4, %5
%add10 = fadd double %sub, %sub8
%call = tail call double @sqrt(double %sub8) #2
diff --git a/llvm/test/CodeGen/X86/pr2177.ll b/llvm/test/CodeGen/X86/pr2177.ll
index 01e632b01cd..8260a7f0614 100644
--- a/llvm/test/CodeGen/X86/pr2177.ll
+++ b/llvm/test/CodeGen/X86/pr2177.ll
@@ -22,10 +22,10 @@ bb10: ; preds = %bb5
bb54: ; preds = %bb5
ret void
bb118: ; preds = %bb5, %bb5, %bb5, %bb5
- %tmp125 = load i8** null, align 8 ; <i8*> [#uses=1]
+ %tmp125 = load i8*, i8** null, align 8 ; <i8*> [#uses=1]
%tmp125126 = bitcast i8* %tmp125 to %struct.S2259* ; <%struct.S2259*> [#uses=1]
%tmp128 = getelementptr %struct.S2259, %struct.S2259* %tmp125126, i32 0, i32 0 ; <<4 x i16>*> [#uses=1]
- %tmp129 = load <4 x i16>* %tmp128, align 8 ; <<4 x i16>> [#uses=1]
+ %tmp129 = load <4 x i16>, <4 x i16>* %tmp128, align 8 ; <<4 x i16>> [#uses=1]
store <4 x i16> %tmp129, <4 x i16>* null, align 8
ret void
bb155: ; preds = %bb5
diff --git a/llvm/test/CodeGen/X86/pr2182.ll b/llvm/test/CodeGen/X86/pr2182.ll
index 94429b265d9..0cf3acf23b3 100644
--- a/llvm/test/CodeGen/X86/pr2182.ll
+++ b/llvm/test/CodeGen/X86/pr2182.ll
@@ -15,16 +15,16 @@ define void @loop_2() nounwind {
; CHECK-NEXT: addl $3, (%{{.*}})
; CHECK-NEXT: ret
- %tmp = load volatile i32* @x, align 4 ; <i32> [#uses=1]
+ %tmp = load volatile i32, i32* @x, align 4 ; <i32> [#uses=1]
%tmp1 = add i32 %tmp, 3 ; <i32> [#uses=1]
store volatile i32 %tmp1, i32* @x, align 4
- %tmp.1 = load volatile i32* @x, align 4 ; <i32> [#uses=1]
+ %tmp.1 = load volatile i32, i32* @x, align 4 ; <i32> [#uses=1]
%tmp1.1 = add i32 %tmp.1, 3 ; <i32> [#uses=1]
store volatile i32 %tmp1.1, i32* @x, align 4
- %tmp.2 = load volatile i32* @x, align 4 ; <i32> [#uses=1]
+ %tmp.2 = load volatile i32, i32* @x, align 4 ; <i32> [#uses=1]
%tmp1.2 = add i32 %tmp.2, 3 ; <i32> [#uses=1]
store volatile i32 %tmp1.2, i32* @x, align 4
- %tmp.3 = load volatile i32* @x, align 4 ; <i32> [#uses=1]
+ %tmp.3 = load volatile i32, i32* @x, align 4 ; <i32> [#uses=1]
%tmp1.3 = add i32 %tmp.3, 3 ; <i32> [#uses=1]
store volatile i32 %tmp1.3, i32* @x, align 4
ret void
diff --git a/llvm/test/CodeGen/X86/pr2326.ll b/llvm/test/CodeGen/X86/pr2326.ll
index f82dcb5d678..9cf83bb30d1 100644
--- a/llvm/test/CodeGen/X86/pr2326.ll
+++ b/llvm/test/CodeGen/X86/pr2326.ll
@@ -4,12 +4,12 @@
define i32 @func_59(i32 %p_60) nounwind {
entry:
%l_108 = alloca i32 ; <i32*> [#uses=2]
- %tmp15 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp16 = load i32* %l_108, align 4 ; <i32> [#uses=1]
+ %tmp15 = load i32, i32* null, align 4 ; <i32> [#uses=1]
+ %tmp16 = load i32, i32* %l_108, align 4 ; <i32> [#uses=1]
%tmp17 = icmp eq i32 %tmp15, %tmp16 ; <i1> [#uses=1]
%tmp1718 = zext i1 %tmp17 to i8 ; <i8> [#uses=1]
- %tmp19 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp20 = load i32* %l_108, align 4 ; <i32> [#uses=1]
+ %tmp19 = load i32, i32* null, align 4 ; <i32> [#uses=1]
+ %tmp20 = load i32, i32* %l_108, align 4 ; <i32> [#uses=1]
%tmp21 = icmp ule i32 %tmp19, %tmp20 ; <i1> [#uses=1]
%tmp2122 = zext i1 %tmp21 to i8 ; <i8> [#uses=1]
%toBool23 = icmp ne i8 %tmp1718, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/pr2656.ll b/llvm/test/CodeGen/X86/pr2656.ll
index 6d9c27eccf4..94b7ba17f87 100644
--- a/llvm/test/CodeGen/X86/pr2656.ll
+++ b/llvm/test/CodeGen/X86/pr2656.ll
@@ -12,9 +12,9 @@ target triple = "i686-apple-darwin9.4.0"
define void @foo(%struct.anon* byval %p) nounwind {
entry:
%tmp = getelementptr %struct.anon, %struct.anon* %p, i32 0, i32 0 ; <float*> [#uses=1]
- %tmp1 = load float* %tmp ; <float> [#uses=1]
+ %tmp1 = load float, float* %tmp ; <float> [#uses=1]
%tmp2 = getelementptr %struct.anon, %struct.anon* %p, i32 0, i32 1 ; <float*> [#uses=1]
- %tmp3 = load float* %tmp2 ; <float> [#uses=1]
+ %tmp3 = load float, float* %tmp2 ; <float> [#uses=1]
%neg = fsub float -0.000000e+00, %tmp1 ; <float> [#uses=1]
%conv = fpext float %neg to double ; <double> [#uses=1]
%neg4 = fsub float -0.000000e+00, %tmp3 ; <float> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/pr2849.ll b/llvm/test/CodeGen/X86/pr2849.ll
index c46f660515d..c3fd101ab6d 100644
--- a/llvm/test/CodeGen/X86/pr2849.ll
+++ b/llvm/test/CodeGen/X86/pr2849.ll
@@ -13,20 +13,20 @@ target triple = "x86_64-unknown-linux-gnu"
define void @obshow() {
entry:
- %tmp = load %struct.HashEntry** @hash_table_begin, align 8
+ %tmp = load %struct.HashEntry*, %struct.HashEntry** @hash_table_begin, align 8
br i1 false, label %xlygetvalue.exit, label %xlygetvalue.exit
xlygetvalue.exit:
%storemerge.in.i = phi %struct.NODE** [ null, %entry ], [ null, %entry ]
- %storemerge.i = load %struct.NODE** %storemerge.in.i
+ %storemerge.i = load %struct.NODE*, %struct.NODE** %storemerge.in.i
%tmp1 = ptrtoint %struct.NODE** %storemerge.in.i to i64
%tmp2 = lshr i64 %tmp1, 3
%tmp3 = and i64 %tmp2, 2147483647
%tmp4 = getelementptr %struct.HashEntry, %struct.HashEntry* %tmp, i64 %tmp3, i32 0, i32 1
- %tmp7 = load i8** %tmp4, align 8
+ %tmp7 = load i8*, i8** %tmp4, align 8
%tmp8 = getelementptr %struct.NODE, %struct.NODE* %storemerge.i, i64 0, i32 2
%tmp9 = bitcast %struct.anon* %tmp8 to %struct.NODE***
- %tmp11 = load %struct.NODE*** %tmp9, align 8
+ %tmp11 = load %struct.NODE**, %struct.NODE*** %tmp9, align 8
%tmp12 = ptrtoint %struct.NODE** %tmp11 to i64
%tmp13 = lshr i64 %tmp12, 3
%tmp14 = and i64 %tmp13, 2147483647
diff --git a/llvm/test/CodeGen/X86/pr2924.ll b/llvm/test/CodeGen/X86/pr2924.ll
index b9e8dc1740d..14e9fc49a2a 100644
--- a/llvm/test/CodeGen/X86/pr2924.ll
+++ b/llvm/test/CodeGen/X86/pr2924.ll
@@ -7,18 +7,18 @@ target triple = "i686-pc-linux-gnu"
define x86_stdcallcc { i32, i8* } @_D3std6string7toupperFAaZAa({ i32, i8* } %s) {
entry_std.string.toupper:
- %tmp58 = load i32* null
+ %tmp58 = load i32, i32* null
%tmp59 = icmp eq i32 %tmp58, 0
- %r.val = load { i32, i8* }* null, align 8
+ %r.val = load { i32, i8* }, { i32, i8* }* null, align 8
%condtmp.0 = select i1 %tmp59, { i32, i8* } undef, { i32, i8* } %r.val
ret { i32, i8* } %condtmp.0
}
define { } @empty({ } %s) {
entry_std.string.toupper:
- %tmp58 = load i32* null
+ %tmp58 = load i32, i32* null
%tmp59 = icmp eq i32 %tmp58, 0
- %r.val = load { }* null, align 8
+ %r.val = load { }, { }* null, align 8
%condtmp.0 = select i1 %tmp59, { } undef, { } %r.val
ret { } %condtmp.0
}
diff --git a/llvm/test/CodeGen/X86/pr2982.ll b/llvm/test/CodeGen/X86/pr2982.ll
index 3f9a5953153..ab46005ca26 100644
--- a/llvm/test/CodeGen/X86/pr2982.ll
+++ b/llvm/test/CodeGen/X86/pr2982.ll
@@ -12,11 +12,11 @@ declare i32 @rshift_u_u(...)
define void @bar() nounwind {
entry:
- %0 = load i32* @g_279, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* @g_279, align 4 ; <i32> [#uses=1]
%1 = shl i32 %0, 1 ; <i32> [#uses=1]
%2 = and i32 %1, 2 ; <i32> [#uses=1]
- %3 = load i32* @g_265, align 4 ; <i32> [#uses=1]
- %4 = load i8* @g_3, align 1 ; <i8> [#uses=1]
+ %3 = load i32, i32* @g_265, align 4 ; <i32> [#uses=1]
+ %4 = load i8, i8* @g_3, align 1 ; <i8> [#uses=1]
%5 = sext i8 %4 to i32 ; <i32> [#uses=1]
%6 = add i32 %2, %3 ; <i32> [#uses=1]
%7 = add i32 %6, %5 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/pr3216.ll b/llvm/test/CodeGen/X86/pr3216.ll
index a4a48210d35..23dcf5693cd 100644
--- a/llvm/test/CodeGen/X86/pr3216.ll
+++ b/llvm/test/CodeGen/X86/pr3216.ll
@@ -8,7 +8,7 @@ define i32 @main() nounwind {
; CHECK: sar{{.}} $5
; CHECK: ret
- %tmp = load i8* @foo
+ %tmp = load i8, i8* @foo
%bf.lo = lshr i8 %tmp, 5
%bf.lo.cleared = and i8 %bf.lo, 7
%1 = shl i8 %bf.lo.cleared, 5
diff --git a/llvm/test/CodeGen/X86/pr3241.ll b/llvm/test/CodeGen/X86/pr3241.ll
index 2f7917b77c3..f89634d5b82 100644
--- a/llvm/test/CodeGen/X86/pr3241.ll
+++ b/llvm/test/CodeGen/X86/pr3241.ll
@@ -9,7 +9,7 @@ entry:
%t1 = call i32 @safe_add_macro_uint32_t_u_u() nounwind
%t2 = icmp sgt i32 %t1, 0
%t3 = zext i1 %t2 to i32
- %t4 = load i32* @g_620, align 4
+ %t4 = load i32, i32* @g_620, align 4
%t5 = icmp eq i32 %t3, %t4
%t6 = xor i32 %p_21, 1
%t7 = call i32 @func_55(i32 %t6) nounwind
diff --git a/llvm/test/CodeGen/X86/pr3244.ll b/llvm/test/CodeGen/X86/pr3244.ll
index 2598c2f976b..b08a22365d7 100644
--- a/llvm/test/CodeGen/X86/pr3244.ll
+++ b/llvm/test/CodeGen/X86/pr3244.ll
@@ -6,8 +6,8 @@
define i32 @func_42(i32 %p_43, i32 %p_44, i32 %p_45, i32 %p_46) nounwind {
entry:
- %0 = load i16* @g_62, align 2 ; <i16> [#uses=1]
- %1 = load i32* @g_487, align 4 ; <i32> [#uses=1]
+ %0 = load i16, i16* @g_62, align 2 ; <i16> [#uses=1]
+ %1 = load i32, i32* @g_487, align 4 ; <i32> [#uses=1]
%2 = trunc i16 %0 to i8 ; <i8> [#uses=1]
%3 = trunc i32 %1 to i8 ; <i8> [#uses=1]
%4 = tail call i32 (...)* @func_7(i64 -4455561449541442965, i32 1)
diff --git a/llvm/test/CodeGen/X86/pr3317.ll b/llvm/test/CodeGen/X86/pr3317.ll
index 3854deac3f2..cab8ae6b73f 100644
--- a/llvm/test/CodeGen/X86/pr3317.ll
+++ b/llvm/test/CodeGen/X86/pr3317.ll
@@ -21,7 +21,7 @@ declare void @jnjvmNullPointerException()
define i32 @JnJVM_java_rmi_activation_ActivationGroupID_hashCode__(%JavaObject* nocapture) nounwind {
start:
%1 = getelementptr %JavaObject, %JavaObject* %0, i64 1, i32 1 ; <%JavaCommonClass**> [#uses=1]
- %2 = load %JavaCommonClass** %1 ; <%JavaCommonClass*> [#uses=4]
+ %2 = load %JavaCommonClass*, %JavaCommonClass** %1 ; <%JavaCommonClass*> [#uses=4]
%3 = icmp eq %JavaCommonClass* %2, null ; <i1> [#uses=1]
br i1 %3, label %verifyNullExit1, label %verifyNullCont2
@@ -32,13 +32,13 @@ verifyNullExit1: ; preds = %start
verifyNullCont2: ; preds = %start
%4 = bitcast %JavaCommonClass* %2 to { %JavaObject, i16, i32, i64 }* ; <{ %JavaObject, i16, i32, i64 }*> [#uses=1]
%5 = getelementptr { %JavaObject, i16, i32, i64 }, { %JavaObject, i16, i32, i64 }* %4, i64 0, i32 2 ; <i32*> [#uses=1]
- %6 = load i32* %5 ; <i32> [#uses=1]
+ %6 = load i32, i32* %5 ; <i32> [#uses=1]
%7 = getelementptr %JavaCommonClass, %JavaCommonClass* %2, i64 0, i32 4 ; <%JavaClass***> [#uses=1]
%8 = bitcast %JavaClass*** %7 to i64* ; <i64*> [#uses=1]
- %9 = load i64* %8 ; <i64> [#uses=1]
+ %9 = load i64, i64* %8 ; <i64> [#uses=1]
%10 = trunc i64 %9 to i32 ; <i32> [#uses=1]
%11 = getelementptr %JavaCommonClass, %JavaCommonClass* %2, i64 0, i32 3 ; <i16*> [#uses=1]
- %12 = load i16* %11 ; <i16> [#uses=1]
+ %12 = load i16, i16* %11 ; <i16> [#uses=1]
%13 = sext i16 %12 to i32 ; <i32> [#uses=1]
%14 = xor i32 %10, %6 ; <i32> [#uses=1]
%15 = xor i32 %14, %13 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/pr3366.ll b/llvm/test/CodeGen/X86/pr3366.ll
index 1127b609321..b89a69ab7d4 100644
--- a/llvm/test/CodeGen/X86/pr3366.ll
+++ b/llvm/test/CodeGen/X86/pr3366.ll
@@ -3,7 +3,7 @@
define void @_ada_c34002a() nounwind {
entry:
- %0 = load i8* null, align 1
+ %0 = load i8, i8* null, align 1
%1 = sdiv i8 90, %0
%2 = icmp ne i8 %1, 3
%3 = zext i1 %2 to i8
diff --git a/llvm/test/CodeGen/X86/pr9127.ll b/llvm/test/CodeGen/X86/pr9127.ll
index ba92c77e22b..33f9ace33ff 100644
--- a/llvm/test/CodeGen/X86/pr9127.ll
+++ b/llvm/test/CodeGen/X86/pr9127.ll
@@ -3,7 +3,7 @@
define i8 @foobar(double %d, double* %x) {
entry:
- %tmp2 = load double* %x, align 8
+ %tmp2 = load double, double* %x, align 8
%cmp = fcmp oeq double %tmp2, %d
%conv3 = zext i1 %cmp to i8
ret i8 %conv3
diff --git a/llvm/test/CodeGen/X86/pre-ra-sched.ll b/llvm/test/CodeGen/X86/pre-ra-sched.ll
index e8ddce6d098..f8e196ba580 100644
--- a/llvm/test/CodeGen/X86/pre-ra-sched.ll
+++ b/llvm/test/CodeGen/X86/pre-ra-sched.ll
@@ -15,32 +15,32 @@
; CHECK: *** Final schedule
define i32 @test(i8* %pin) #0 {
%g0 = getelementptr inbounds i8, i8* %pin, i64 0
- %l0 = load i8* %g0, align 1
+ %l0 = load i8, i8* %g0, align 1
%g1a = getelementptr inbounds i8, i8* %pin, i64 1
- %l1a = load i8* %g1a, align 1
+ %l1a = load i8, i8* %g1a, align 1
%z1a = zext i8 %l1a to i32
%g1b = getelementptr inbounds i8, i8* %pin, i64 2
- %l1b = load i8* %g1b, align 1
+ %l1b = load i8, i8* %g1b, align 1
%z1b = zext i8 %l1b to i32
%c1 = icmp ne i8 %l0, 0
%x1 = xor i32 %z1a, %z1b
%s1 = select i1 %c1, i32 %z1a, i32 %x1
%g2a = getelementptr inbounds i8, i8* %pin, i64 3
- %l2a = load i8* %g2a, align 1
+ %l2a = load i8, i8* %g2a, align 1
%z2a = zext i8 %l2a to i32
%g2b = getelementptr inbounds i8, i8* %pin, i64 4
- %l2b = load i8* %g2b, align 1
+ %l2b = load i8, i8* %g2b, align 1
%z2b = zext i8 %l2b to i32
%x2 = xor i32 %z2a, %z2b
%s2 = select i1 %c1, i32 %z2a, i32 %x2
%g3a = getelementptr inbounds i8, i8* %pin, i64 5
- %l3a = load i8* %g3a, align 1
+ %l3a = load i8, i8* %g3a, align 1
%z3a = zext i8 %l3a to i32
%g3b = getelementptr inbounds i8, i8* %pin, i64 6
- %l3b = load i8* %g3b, align 1
+ %l3b = load i8, i8* %g3b, align 1
%z3b = zext i8 %l3b to i32
%x3 = xor i32 %z3a, %z3b
%s3 = select i1 %c1, i32 %z3a, i32 %x3
diff --git a/llvm/test/CodeGen/X86/private-2.ll b/llvm/test/CodeGen/X86/private-2.ll
index c9d6793f443..21b6b3aff2d 100644
--- a/llvm/test/CodeGen/X86/private-2.ll
+++ b/llvm/test/CodeGen/X86/private-2.ll
@@ -10,6 +10,6 @@
define internal i32* @"\01-[Example1 whatever]"() nounwind optsize ssp {
entry:
%0 = getelementptr %struct.A, %struct.A* @"_ZZ20-[Example1 whatever]E4C.91", i64 0, i32 0 ; <i32**> [#uses=1]
- %1 = load i32** %0, align 8 ; <i32*> [#uses=1]
+ %1 = load i32*, i32** %0, align 8 ; <i32*> [#uses=1]
ret i32* %1
}
diff --git a/llvm/test/CodeGen/X86/private.ll b/llvm/test/CodeGen/X86/private.ll
index c02d19319a4..4b936d2323e 100644
--- a/llvm/test/CodeGen/X86/private.ll
+++ b/llvm/test/CodeGen/X86/private.ll
@@ -10,7 +10,7 @@ define private void @foo() {
define i32 @bar() {
call void @foo()
- %1 = load i32* @baz, align 4
+ %1 = load i32, i32* @baz, align 4
ret i32 %1
; CHECK-LABEL: bar:
diff --git a/llvm/test/CodeGen/X86/promote-assert-zext.ll b/llvm/test/CodeGen/X86/promote-assert-zext.ll
index b582806c96a..50674831205 100644
--- a/llvm/test/CodeGen/X86/promote-assert-zext.ll
+++ b/llvm/test/CodeGen/X86/promote-assert-zext.ll
@@ -11,7 +11,7 @@ target triple = "x86_64-apple-darwin11"
define i64 @_ZL5matchPKtPKhiR9MatchData(i8* %tmp13) nounwind {
entry:
- %tmp14 = load i8* %tmp13, align 1
+ %tmp14 = load i8, i8* %tmp13, align 1
%tmp17 = zext i8 %tmp14 to i16
br label %bb341
diff --git a/llvm/test/CodeGen/X86/promote-trunc.ll b/llvm/test/CodeGen/X86/promote-trunc.ll
index 40a58b07392..a20557a1fef 100644
--- a/llvm/test/CodeGen/X86/promote-trunc.ll
+++ b/llvm/test/CodeGen/X86/promote-trunc.ll
@@ -1,9 +1,9 @@
; RUN: llc < %s -march=x86-64
define<4 x i8> @func_8_64() {
- %F = load <4 x i64>* undef
+ %F = load <4 x i64>, <4 x i64>* undef
%G = trunc <4 x i64> %F to <4 x i8>
- %H = load <4 x i64>* undef
+ %H = load <4 x i64>, <4 x i64>* undef
%Y = trunc <4 x i64> %H to <4 x i8>
%T = add <4 x i8> %Y, %G
ret <4 x i8> %T
diff --git a/llvm/test/CodeGen/X86/promote.ll b/llvm/test/CodeGen/X86/promote.ll
index 283f48cd37b..38cdc14b380 100644
--- a/llvm/test/CodeGen/X86/promote.ll
+++ b/llvm/test/CodeGen/X86/promote.ll
@@ -9,7 +9,7 @@ define i32 @mul_f(<4 x i8>* %A) {
entry:
; CHECK: pmul
; CHECK-NOT: mulb
- %0 = load <4 x i8>* %A, align 8
+ %0 = load <4 x i8>, <4 x i8>* %A, align 8
%mul = mul <4 x i8> %0, %0
store <4 x i8> %mul, <4 x i8>* undef
ret i32 0
@@ -23,7 +23,7 @@ entry:
; CHECK: pmovzxbd
; CHECK: paddd
; CHECK: pshufb
- %0 = load <4 x i8>* %A, align 8
+ %0 = load <4 x i8>, <4 x i8>* %A, align 8
%add = add <4 x i8> %0, %0
store <4 x i8> %add, <4 x i8>* undef
ret i32 0
diff --git a/llvm/test/CodeGen/X86/pshufb-mask-comments.ll b/llvm/test/CodeGen/X86/pshufb-mask-comments.ll
index ca5a02ce8d3..105a035be59 100644
--- a/llvm/test/CodeGen/X86/pshufb-mask-comments.ll
+++ b/llvm/test/CodeGen/X86/pshufb-mask-comments.ll
@@ -41,10 +41,10 @@ define <16 x i8> @test5() {
; CHECK-LABEL: test5
; CHECK: pshufb {{.*}}
store <2 x i64> <i64 1, i64 0>, <2 x i64>* undef, align 16
- %l = load <2 x i64>* undef, align 16
+ %l = load <2 x i64>, <2 x i64>* undef, align 16
%shuffle = shufflevector <2 x i64> %l, <2 x i64> undef, <2 x i32> zeroinitializer
store <2 x i64> %shuffle, <2 x i64>* undef, align 16
- %1 = load <16 x i8>* undef, align 16
+ %1 = load <16 x i8>, <16 x i8>* undef, align 16
%2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> undef, <16 x i8> %1)
ret <16 x i8> %2
}
diff --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll
index bda164bfc82..4b83b55997e 100644
--- a/llvm/test/CodeGen/X86/psubus.ll
+++ b/llvm/test/CodeGen/X86/psubus.ll
@@ -9,7 +9,7 @@ define void @test1(i16* nocapture %head) nounwind {
vector.ph:
%0 = getelementptr inbounds i16, i16* %head, i64 0
%1 = bitcast i16* %0 to <8 x i16>*
- %2 = load <8 x i16>* %1, align 2
+ %2 = load <8 x i16>, <8 x i16>* %1, align 2
%3 = icmp slt <8 x i16> %2, zeroinitializer
%4 = xor <8 x i16> %2, <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
%5 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> zeroinitializer
@@ -42,7 +42,7 @@ define void @test2(i16* nocapture %head) nounwind {
vector.ph:
%0 = getelementptr inbounds i16, i16* %head, i64 0
%1 = bitcast i16* %0 to <8 x i16>*
- %2 = load <8 x i16>* %1, align 2
+ %2 = load <8 x i16>, <8 x i16>* %1, align 2
%3 = icmp ugt <8 x i16> %2, <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>
%4 = add <8 x i16> %2, <i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767>
%5 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> zeroinitializer
@@ -77,7 +77,7 @@ vector.ph:
%broadcast15 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer
%1 = getelementptr inbounds i16, i16* %head, i64 0
%2 = bitcast i16* %1 to <8 x i16>*
- %3 = load <8 x i16>* %2, align 2
+ %3 = load <8 x i16>, <8 x i16>* %2, align 2
%4 = icmp ult <8 x i16> %3, %broadcast15
%5 = sub <8 x i16> %3, %broadcast15
%6 = select <8 x i1> %4, <8 x i16> zeroinitializer, <8 x i16> %5
@@ -116,7 +116,7 @@ define void @test4(i8* nocapture %head) nounwind {
vector.ph:
%0 = getelementptr inbounds i8, i8* %head, i64 0
%1 = bitcast i8* %0 to <16 x i8>*
- %2 = load <16 x i8>* %1, align 1
+ %2 = load <16 x i8>, <16 x i8>* %1, align 1
%3 = icmp slt <16 x i8> %2, zeroinitializer
%4 = xor <16 x i8> %2, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
%5 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> zeroinitializer
@@ -149,7 +149,7 @@ define void @test5(i8* nocapture %head) nounwind {
vector.ph:
%0 = getelementptr inbounds i8, i8* %head, i64 0
%1 = bitcast i8* %0 to <16 x i8>*
- %2 = load <16 x i8>* %1, align 1
+ %2 = load <16 x i8>, <16 x i8>* %1, align 1
%3 = icmp ugt <16 x i8> %2, <i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126>
%4 = add <16 x i8> %2, <i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127>
%5 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> zeroinitializer
@@ -184,7 +184,7 @@ vector.ph:
%broadcast15 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
%1 = getelementptr inbounds i8, i8* %head, i64 0
%2 = bitcast i8* %1 to <16 x i8>*
- %3 = load <16 x i8>* %2, align 1
+ %3 = load <16 x i8>, <16 x i8>* %2, align 1
%4 = icmp ult <16 x i8> %3, %broadcast15
%5 = sub <16 x i8> %3, %broadcast15
%6 = select <16 x i1> %4, <16 x i8> zeroinitializer, <16 x i8> %5
@@ -225,7 +225,7 @@ define void @test7(i16* nocapture %head) nounwind {
vector.ph:
%0 = getelementptr inbounds i16, i16* %head, i64 0
%1 = bitcast i16* %0 to <16 x i16>*
- %2 = load <16 x i16>* %1, align 2
+ %2 = load <16 x i16>, <16 x i16>* %1, align 2
%3 = icmp slt <16 x i16> %2, zeroinitializer
%4 = xor <16 x i16> %2, <i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768, i16 -32768>
%5 = select <16 x i1> %3, <16 x i16> %4, <16 x i16> zeroinitializer
@@ -245,7 +245,7 @@ define void @test8(i16* nocapture %head) nounwind {
vector.ph:
%0 = getelementptr inbounds i16, i16* %head, i64 0
%1 = bitcast i16* %0 to <16 x i16>*
- %2 = load <16 x i16>* %1, align 2
+ %2 = load <16 x i16>, <16 x i16>* %1, align 2
%3 = icmp ugt <16 x i16> %2, <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>
%4 = add <16 x i16> %2, <i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767, i16 -32767>
%5 = select <16 x i1> %3, <16 x i16> %4, <16 x i16> zeroinitializer
@@ -267,7 +267,7 @@ vector.ph:
%broadcast15 = shufflevector <16 x i16> %0, <16 x i16> undef, <16 x i32> zeroinitializer
%1 = getelementptr inbounds i16, i16* %head, i64 0
%2 = bitcast i16* %1 to <16 x i16>*
- %3 = load <16 x i16>* %2, align 2
+ %3 = load <16 x i16>, <16 x i16>* %2, align 2
%4 = icmp ult <16 x i16> %3, %broadcast15
%5 = sub <16 x i16> %3, %broadcast15
%6 = select <16 x i1> %4, <16 x i16> zeroinitializer, <16 x i16> %5
@@ -289,7 +289,7 @@ define void @test10(i8* nocapture %head) nounwind {
vector.ph:
%0 = getelementptr inbounds i8, i8* %head, i64 0
%1 = bitcast i8* %0 to <32 x i8>*
- %2 = load <32 x i8>* %1, align 1
+ %2 = load <32 x i8>, <32 x i8>* %1, align 1
%3 = icmp slt <32 x i8> %2, zeroinitializer
%4 = xor <32 x i8> %2, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>
%5 = select <32 x i1> %3, <32 x i8> %4, <32 x i8> zeroinitializer
@@ -309,7 +309,7 @@ define void @test11(i8* nocapture %head) nounwind {
vector.ph:
%0 = getelementptr inbounds i8, i8* %head, i64 0
%1 = bitcast i8* %0 to <32 x i8>*
- %2 = load <32 x i8>* %1, align 1
+ %2 = load <32 x i8>, <32 x i8>* %1, align 1
%3 = icmp ugt <32 x i8> %2, <i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126>
%4 = add <32 x i8> %2, <i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127, i8 -127>
%5 = select <32 x i1> %3, <32 x i8> %4, <32 x i8> zeroinitializer
@@ -331,7 +331,7 @@ vector.ph:
%broadcast15 = shufflevector <32 x i8> %0, <32 x i8> undef, <32 x i32> zeroinitializer
%1 = getelementptr inbounds i8, i8* %head, i64 0
%2 = bitcast i8* %1 to <32 x i8>*
- %3 = load <32 x i8>* %2, align 1
+ %3 = load <32 x i8>, <32 x i8>* %2, align 1
%4 = icmp ult <32 x i8> %3, %broadcast15
%5 = sub <32 x i8> %3, %broadcast15
%6 = select <32 x i1> %4, <32 x i8> zeroinitializer, <32 x i8> %5
diff --git a/llvm/test/CodeGen/X86/ragreedy-bug.ll b/llvm/test/CodeGen/X86/ragreedy-bug.ll
index 938babfd7e5..e8426317f13 100644
--- a/llvm/test/CodeGen/X86/ragreedy-bug.ll
+++ b/llvm/test/CodeGen/X86/ragreedy-bug.ll
@@ -30,31 +30,31 @@ declare i32 @__maskrune(i32, i64) #7
define fastcc i32 @prune_match(%struct.Connector_struct* nocapture readonly %a, %struct.Connector_struct* nocapture readonly %b) #9 {
entry:
%label56 = bitcast %struct.Connector_struct* %a to i16*
- %0 = load i16* %label56, align 2
+ %0 = load i16, i16* %label56, align 2
%label157 = bitcast %struct.Connector_struct* %b to i16*
- %1 = load i16* %label157, align 2
+ %1 = load i16, i16* %label157, align 2
%cmp = icmp eq i16 %0, %1
br i1 %cmp, label %if.end, label %return, !prof !988
if.end:
%priority = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 2
- %2 = load i8* %priority, align 1
+ %2 = load i8, i8* %priority, align 1
%priority5 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 2
- %3 = load i8* %priority5, align 1
+ %3 = load i8, i8* %priority5, align 1
%string = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 5
- %4 = load i8** %string, align 8
+ %4 = load i8*, i8** %string, align 8
%string7 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 5
- %5 = load i8** %string7, align 8
+ %5 = load i8*, i8** %string7, align 8
br label %while.cond
while.cond:
%lsr.iv27 = phi i64 [ %lsr.iv.next28, %if.end17 ], [ 0, %if.end ]
%scevgep55 = getelementptr i8, i8* %4, i64 %lsr.iv27
- %6 = load i8* %scevgep55, align 1
+ %6 = load i8, i8* %scevgep55, align 1
%idxprom.i.i = sext i8 %6 to i64
%isascii.i.i224 = icmp sgt i8 %6, -1
br i1 %isascii.i.i224, label %cond.true.i.i, label %cond.false.i.i, !prof !181
cond.true.i.i:
%arrayidx.i.i = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
- %7 = load i32* %arrayidx.i.i, align 4
+ %7 = load i32, i32* %arrayidx.i.i, align 4
%and.i.i = and i32 %7, 32768
br label %isupper.exit
cond.false.i.i:
@@ -70,13 +70,13 @@ lor.rhs:
%sunkaddr = ptrtoint i8* %5 to i64
%sunkaddr58 = add i64 %sunkaddr, %lsr.iv27
%sunkaddr59 = inttoptr i64 %sunkaddr58 to i8*
- %9 = load i8* %sunkaddr59, align 1
+ %9 = load i8, i8* %sunkaddr59, align 1
%idxprom.i.i214 = sext i8 %9 to i64
%isascii.i.i213225 = icmp sgt i8 %9, -1
br i1 %isascii.i.i213225, label %cond.true.i.i217, label %cond.false.i.i219, !prof !181
cond.true.i.i217:
%arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
- %10 = load i32* %arrayidx.i.i215, align 4
+ %10 = load i32, i32* %arrayidx.i.i215, align 4
%and.i.i216 = and i32 %10, 32768
br label %isupper.exit223
cond.false.i.i219:
@@ -92,11 +92,11 @@ while.body:
%sunkaddr60 = ptrtoint i8* %4 to i64
%sunkaddr61 = add i64 %sunkaddr60, %lsr.iv27
%sunkaddr62 = inttoptr i64 %sunkaddr61 to i8*
- %12 = load i8* %sunkaddr62, align 1
+ %12 = load i8, i8* %sunkaddr62, align 1
%sunkaddr63 = ptrtoint i8* %5 to i64
%sunkaddr64 = add i64 %sunkaddr63, %lsr.iv27
%sunkaddr65 = inttoptr i64 %sunkaddr64 to i8*
- %13 = load i8* %sunkaddr65, align 1
+ %13 = load i8, i8* %sunkaddr65, align 1
%cmp14 = icmp eq i8 %12, %13
br i1 %cmp14, label %if.end17, label %return, !prof !991
if.end17:
@@ -110,13 +110,13 @@ if.then23:
%sunkaddr66 = ptrtoint %struct.Connector_struct* %a to i64
%sunkaddr67 = add i64 %sunkaddr66, 16
%sunkaddr68 = inttoptr i64 %sunkaddr67 to i8**
- %16 = load i8** %sunkaddr68, align 8
- %17 = load i8* %16, align 1
+ %16 = load i8*, i8** %sunkaddr68, align 8
+ %17 = load i8, i8* %16, align 1
%cmp26 = icmp eq i8 %17, 83
%sunkaddr69 = ptrtoint i8* %4 to i64
%sunkaddr70 = add i64 %sunkaddr69, %lsr.iv27
%sunkaddr71 = inttoptr i64 %sunkaddr70 to i8*
- %18 = load i8* %sunkaddr71, align 1
+ %18 = load i8, i8* %sunkaddr71, align 1
br i1 %cmp26, label %land.lhs.true28, label %while.cond59.preheader, !prof !993
land.lhs.true28:
switch i8 %18, label %land.rhs.preheader [
@@ -127,7 +127,7 @@ land.lhs.true35:
%sunkaddr72 = ptrtoint i8* %5 to i64
%sunkaddr73 = add i64 %sunkaddr72, %lsr.iv27
%sunkaddr74 = inttoptr i64 %sunkaddr73 to i8*
- %19 = load i8* %sunkaddr74, align 1
+ %19 = load i8, i8* %sunkaddr74, align 1
switch i8 %19, label %land.rhs.preheader [
i8 112, label %land.lhs.true43
], !prof !995
@@ -152,7 +152,7 @@ land.lhs.true52:
%sunkaddr76 = add i64 %sunkaddr75, %lsr.iv27
%sunkaddr77 = add i64 %sunkaddr76, -1
%sunkaddr78 = inttoptr i64 %sunkaddr77 to i8*
- %24 = load i8* %sunkaddr78, align 1
+ %24 = load i8, i8* %sunkaddr78, align 1
%cmp55 = icmp eq i8 %24, 73
%cmp61233 = icmp eq i8 %18, 0
%or.cond265 = or i1 %cmp55, %cmp61233
@@ -168,7 +168,7 @@ land.rhs:
%lsr.iv = phi i64 [ 0, %land.rhs.preheader ], [ %lsr.iv.next, %if.then83 ]
%25 = phi i8 [ %27, %if.then83 ], [ %18, %land.rhs.preheader ]
%scevgep34 = getelementptr i8, i8* %scevgep33, i64 %lsr.iv
- %26 = load i8* %scevgep34, align 1
+ %26 = load i8, i8* %scevgep34, align 1
%cmp64 = icmp eq i8 %26, 0
br i1 %cmp64, label %return, label %while.body66, !prof !1000
while.body66:
@@ -184,7 +184,7 @@ lor.lhs.false74:
if.then83:
%scevgep44 = getelementptr i8, i8* %scevgep43, i64 %lsr.iv
%scevgep45 = getelementptr i8, i8* %scevgep44, i64 1
- %27 = load i8* %scevgep45, align 1
+ %27 = load i8, i8* %scevgep45, align 1
%cmp61 = icmp eq i8 %27, 0
%lsr.iv.next = add i64 %lsr.iv, 1
br i1 %cmp61, label %return, label %land.rhs, !prof !999
@@ -197,7 +197,7 @@ while.cond95.preheader:
%sunkaddr79 = ptrtoint i8* %4 to i64
%sunkaddr80 = add i64 %sunkaddr79, %lsr.iv27
%sunkaddr81 = inttoptr i64 %sunkaddr80 to i8*
- %28 = load i8* %sunkaddr81, align 1
+ %28 = load i8, i8* %sunkaddr81, align 1
%cmp97238 = icmp eq i8 %28, 0
br i1 %cmp97238, label %return, label %land.rhs99.preheader, !prof !1004
land.rhs99.preheader:
@@ -208,7 +208,7 @@ land.rhs99:
%lsr.iv17 = phi i64 [ 0, %land.rhs99.preheader ], [ %lsr.iv.next18, %if.then117 ]
%29 = phi i8 [ %31, %if.then117 ], [ %28, %land.rhs99.preheader ]
%scevgep32 = getelementptr i8, i8* %scevgep31, i64 %lsr.iv17
- %30 = load i8* %scevgep32, align 1
+ %30 = load i8, i8* %scevgep32, align 1
%cmp101 = icmp eq i8 %30, 0
br i1 %cmp101, label %return, label %while.body104, !prof !1005
while.body104:
@@ -221,7 +221,7 @@ while.body104:
if.then117:
%scevgep41 = getelementptr i8, i8* %scevgep40, i64 %lsr.iv17
%scevgep42 = getelementptr i8, i8* %scevgep41, i64 1
- %31 = load i8* %scevgep42, align 1
+ %31 = load i8, i8* %scevgep42, align 1
%cmp97 = icmp eq i8 %31, 0
%lsr.iv.next18 = add i64 %lsr.iv17, 1
br i1 %cmp97, label %return, label %land.rhs99, !prof !1004
@@ -234,7 +234,7 @@ while.cond130.preheader:
%sunkaddr82 = ptrtoint i8* %4 to i64
%sunkaddr83 = add i64 %sunkaddr82, %lsr.iv27
%sunkaddr84 = inttoptr i64 %sunkaddr83 to i8*
- %32 = load i8* %sunkaddr84, align 1
+ %32 = load i8, i8* %sunkaddr84, align 1
%cmp132244 = icmp eq i8 %32, 0
br i1 %cmp132244, label %return, label %land.rhs134.preheader, !prof !1008
land.rhs134.preheader:
@@ -245,7 +245,7 @@ land.rhs134:
%lsr.iv22 = phi i64 [ 0, %land.rhs134.preheader ], [ %lsr.iv.next23, %if.then152 ]
%33 = phi i8 [ %35, %if.then152 ], [ %32, %land.rhs134.preheader ]
%scevgep30 = getelementptr i8, i8* %scevgep29, i64 %lsr.iv22
- %34 = load i8* %scevgep30, align 1
+ %34 = load i8, i8* %scevgep30, align 1
%cmp136 = icmp eq i8 %34, 0
br i1 %cmp136, label %return, label %while.body139, !prof !1009
while.body139:
@@ -258,7 +258,7 @@ while.body139:
if.then152:
%scevgep38 = getelementptr i8, i8* %scevgep37, i64 %lsr.iv22
%scevgep39 = getelementptr i8, i8* %scevgep38, i64 1
- %35 = load i8* %scevgep39, align 1
+ %35 = load i8, i8* %scevgep39, align 1
%cmp132 = icmp eq i8 %35, 0
%lsr.iv.next23 = add i64 %lsr.iv22, 1
br i1 %cmp132, label %return, label %land.rhs134, !prof !1008
diff --git a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
index 454680f134b..68ce452f520 100644
--- a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
+++ b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll
@@ -340,7 +340,7 @@ while.cond1683.preheader:
while.body1679:
%oldc.43406 = phi i32 [ %inc, %syEchoch.exit3070 ], [ %oldc.1.lcssa, %for.body1664.lr.ph ]
- %4 = load %struct.TMP.2** %echo.i3101, align 8, !tbaa !6
+ %4 = load %struct.TMP.2*, %struct.TMP.2** %echo.i3101, align 8, !tbaa !6
%call.i3062 = call i32 @fileno(%struct.TMP.2* %4)
br i1 undef, label %if.then.i3069, label %syEchoch.exit3070
@@ -362,7 +362,7 @@ while.end1693:
for.body1723:
%q.303203 = phi i8* [ getelementptr inbounds ([8192 x i8]* @syHistory, i64 0, i64 8189), %if.then1477 ], [ %incdec.ptr1730, %for.body1723 ]
%add.ptr1728 = getelementptr i8, i8* %q.303203, i64 %idx.neg1727
- %5 = load i8* %add.ptr1728, align 1, !tbaa !5
+ %5 = load i8, i8* %add.ptr1728, align 1, !tbaa !5
%incdec.ptr1730 = getelementptr i8, i8* %q.303203, i64 -1
br label %for.body1723
diff --git a/llvm/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll b/llvm/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll
index 430dc984089..d8085b3e905 100644
--- a/llvm/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll
+++ b/llvm/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll
@@ -37,26 +37,26 @@ bb85: ; preds = %bb222, %bb85, %bb
bb206: ; preds = %bb
%tmp = getelementptr [499 x i32], [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 undef
- %tmp207 = load i32* %tmp
+ %tmp207 = load i32, i32* %tmp
%tmp208 = add i32 %tmp207, 1
%tmp209 = inttoptr i32 %tmp208 to i8*
indirectbr i8* %tmp209, [label %bb213]
bb213: ; preds = %bb206
- %tmp214 = load i32* @fp_dh_18716afa4a5354de0a302c8edb3b0ee1, align 4
- %tmp215 = load i8** @fp_dh_20a33cdeefab8f4c8887e82766cb9dcb, align 4
+ %tmp214 = load i32, i32* @fp_dh_18716afa4a5354de0a302c8edb3b0ee1, align 4
+ %tmp215 = load i8*, i8** @fp_dh_20a33cdeefab8f4c8887e82766cb9dcb, align 4
%tmp216 = urem i32 -717428541, %tmp214
%tmp217 = getelementptr i8, i8* %tmp215, i32 %tmp216
%tmp218 = bitcast i8* %tmp217 to i32*
- %tmp219 = load i32* %tmp218, align 4
+ %tmp219 = load i32, i32* %tmp218, align 4
store i32 %tmp219, i32* undef, align 4
%tmp220 = select i1 false, i32 359373646, i32 1677237955
%tmp221 = add i32 %tmp220, 0
indirectbr i8* undef, [label %bb432, label %bb222]
bb222: ; preds = %bb213
- %tmp224 = load i32* undef, align 4
- %tmp225 = load i32* undef, align 4
+ %tmp224 = load i32, i32* undef, align 4
+ %tmp225 = load i32, i32* undef, align 4
%tmp226 = xor i32 %tmp225, %tmp224
%tmp227 = shl i32 %tmp226, 1
%tmp228 = and i32 %tmp227, -2048880334
@@ -66,12 +66,12 @@ bb222: ; preds = %bb213
%tmp232 = mul i32 %tmp231, 1603744721
%tmp233 = urem i32 %tmp232, 259
%tmp234 = getelementptr [259 x i8], [259 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 2039075) to [259 x i8]*), i32 0, i32 %tmp233
- %tmp235 = load i8* %tmp234, align 1
+ %tmp235 = load i8, i8* %tmp234, align 1
%tmp236 = add i32 %tmp233, 2
%tmp237 = getelementptr [264 x i8], [264 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 3388166) to [264 x i8]*), i32 0, i32 %tmp236
- %tmp238 = load i8* %tmp237, align 1
+ %tmp238 = load i8, i8* %tmp237, align 1
%tmp239 = getelementptr [265 x i8], [265 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 1325165) to [265 x i8]*), i32 0, i32 0
- %tmp240 = load i8* %tmp239, align 1
+ %tmp240 = load i8, i8* %tmp239, align 1
%tmp241 = add i32 %tmp233, 6
%tmp242 = trunc i32 %tmp241 to i8
%tmp243 = mul i8 %tmp242, -3
@@ -80,7 +80,7 @@ bb222: ; preds = %bb213
%tmp246 = and i8 %tmp245, 6
%tmp247 = sub i8 0, %tmp246
%tmp248 = add i8 %tmp244, %tmp247
- %tmp249 = load i8* undef, align 1
+ %tmp249 = load i8, i8* undef, align 1
%tmp250 = xor i8 %tmp235, 17
%tmp251 = xor i8 %tmp250, %tmp238
%tmp252 = xor i8 %tmp251, %tmp240
@@ -88,13 +88,13 @@ bb222: ; preds = %bb213
%tmp254 = xor i8 %tmp253, %tmp248
%tmp255 = zext i8 %tmp254 to i16
%tmp256 = shl nuw i16 %tmp255, 8
- %tmp257 = load i8* null, align 1
- %tmp258 = load i32* @fp_dh_18716afa4a5354de0a302c8edb3b0ee1, align 4
- %tmp259 = load i8** @fp_dh_20a33cdeefab8f4c8887e82766cb9dcb, align 4
+ %tmp257 = load i8, i8* null, align 1
+ %tmp258 = load i32, i32* @fp_dh_18716afa4a5354de0a302c8edb3b0ee1, align 4
+ %tmp259 = load i8*, i8** @fp_dh_20a33cdeefab8f4c8887e82766cb9dcb, align 4
%tmp260 = urem i32 -717428541, %tmp258
%tmp261 = getelementptr i8, i8* %tmp259, i32 %tmp260
%tmp262 = bitcast i8* %tmp261 to i32*
- %tmp263 = load i32* %tmp262, align 4
+ %tmp263 = load i32, i32* %tmp262, align 4
%tmp264 = xor i32 %tmp263, 0
%tmp265 = shl i32 %tmp264, 1
%tmp266 = and i32 %tmp265, -1312119832
@@ -105,7 +105,7 @@ bb222: ; preds = %bb213
%tmp271 = urem i32 %tmp270, 259
%tmp274 = add i32 %tmp271, 3
%tmp275 = getelementptr [265 x i8], [265 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 1325165) to [265 x i8]*), i32 0, i32 %tmp274
- %tmp276 = load i8* %tmp275, align 1
+ %tmp276 = load i8, i8* %tmp275, align 1
%tmp277 = add i32 %tmp271, 6
%tmp278 = trunc i32 %tmp277 to i8
%tmp279 = mul i8 %tmp278, -3
@@ -162,7 +162,7 @@ bb222: ; preds = %bb213
%tmp335 = zext i1 %tmp333 to i32
%tmp336 = add i32 %tmp334, %tmp335
%tmp337 = getelementptr [499 x i32], [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 %tmp336
- %tmp338 = load i32* %tmp337
+ %tmp338 = load i32, i32* %tmp337
%tmp339 = add i32 %tmp338, 1
%tmp340 = inttoptr i32 %tmp339 to i8*
indirectbr i8* %tmp340, [label %bb85, label %bb439]
@@ -171,7 +171,7 @@ bb432: ; preds = %bb432, %bb213
%tmp433 = phi i32 [ %tmp221, %bb213 ], [ %tmp433, %bb432 ]
%tmp434 = add i32 %tmp433, 1022523279
%tmp435 = getelementptr [499 x i32], [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 %tmp434
- %tmp436 = load i32* %tmp435
+ %tmp436 = load i32, i32* %tmp435
%tmp437 = add i32 %tmp436, 1
%tmp438 = inttoptr i32 %tmp437 to i8*
indirectbr i8* %tmp438, [label %bb432]
diff --git a/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll b/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll
index d2e68882b8c..2a81e8246c4 100644
--- a/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll
+++ b/llvm/test/CodeGen/X86/rd-mod-wr-eflags.ll
@@ -8,7 +8,7 @@ entry:
; CHECK: decq (%{{rdi|rcx}})
; CHECK-NEXT: je
%refcnt = getelementptr inbounds %struct.obj, %struct.obj* %o, i64 0, i32 0
- %0 = load i64* %refcnt, align 8
+ %0 = load i64, i64* %refcnt, align 8
%dec = add i64 %0, -1
store i64 %dec, i64* %refcnt, align 8
%tobool = icmp eq i64 %dec, 0
@@ -33,7 +33,7 @@ define i32 @test() nounwind uwtable ssp {
entry:
; CHECK: decq
; CHECK-NOT: decq
-%0 = load i64* @c, align 8
+%0 = load i64, i64* @c, align 8
%dec.i = add nsw i64 %0, -1
store i64 %dec.i, i64* @c, align 8
%tobool.i = icmp ne i64 %dec.i, 0
@@ -47,7 +47,7 @@ ret i32 0
define i32 @test2() nounwind uwtable ssp {
entry:
; CHECK-NOT: decq ({{.*}})
-%0 = load i64* @c, align 8
+%0 = load i64, i64* @c, align 8
%dec.i = add nsw i64 %0, -1
store i64 %dec.i, i64* @c, align 8
%tobool.i = icmp ne i64 %0, 0
@@ -71,7 +71,7 @@ define void @example_dec(%struct.obj2* %o) nounwind uwtable ssp {
entry:
%s64 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 0
; CHECK-NOT: load
- %0 = load i64* %s64, align 8
+ %0 = load i64, i64* %s64, align 8
; CHECK: decq ({{.*}})
%dec = add i64 %0, -1
store i64 %dec, i64* %s64, align 8
@@ -82,7 +82,7 @@ entry:
if.end:
%s32 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 1
; CHECK-NOT: load
- %1 = load i32* %s32, align 4
+ %1 = load i32, i32* %s32, align 4
; CHECK: decl {{[0-9][0-9]*}}({{.*}})
%dec1 = add i32 %1, -1
store i32 %dec1, i32* %s32, align 4
@@ -93,7 +93,7 @@ if.end:
if.end1:
%s16 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 2
; CHECK-NOT: load
- %2 = load i16* %s16, align 2
+ %2 = load i16, i16* %s16, align 2
; CHECK: decw {{[0-9][0-9]*}}({{.*}})
%dec2 = add i16 %2, -1
store i16 %dec2, i16* %s16, align 2
@@ -104,7 +104,7 @@ if.end1:
if.end2:
%s8 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 3
; CHECK-NOT: load
- %3 = load i8* %s8
+ %3 = load i8, i8* %s8
; CHECK: decb {{[0-9][0-9]*}}({{.*}})
%dec3 = add i8 %3, -1
store i8 %dec3, i8* %s8
@@ -125,7 +125,7 @@ define void @example_inc(%struct.obj2* %o) nounwind uwtable ssp {
entry:
%s64 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 0
; CHECK-NOT: load
- %0 = load i64* %s64, align 8
+ %0 = load i64, i64* %s64, align 8
; CHECK: incq ({{.*}})
%inc = add i64 %0, 1
store i64 %inc, i64* %s64, align 8
@@ -136,7 +136,7 @@ entry:
if.end:
%s32 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 1
; CHECK-NOT: load
- %1 = load i32* %s32, align 4
+ %1 = load i32, i32* %s32, align 4
; CHECK: incl {{[0-9][0-9]*}}({{.*}})
%inc1 = add i32 %1, 1
store i32 %inc1, i32* %s32, align 4
@@ -147,7 +147,7 @@ if.end:
if.end1:
%s16 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 2
; CHECK-NOT: load
- %2 = load i16* %s16, align 2
+ %2 = load i16, i16* %s16, align 2
; CHECK: incw {{[0-9][0-9]*}}({{.*}})
%inc2 = add i16 %2, 1
store i16 %inc2, i16* %s16, align 2
@@ -158,7 +158,7 @@ if.end1:
if.end2:
%s8 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 3
; CHECK-NOT: load
- %3 = load i8* %s8
+ %3 = load i8, i8* %s8
; CHECK: incb {{[0-9][0-9]*}}({{.*}})
%inc3 = add i8 %3, 1
store i8 %inc3, i8* %s8
@@ -181,9 +181,9 @@ define void @test3() nounwind ssp {
entry:
; CHECK-LABEL: test3:
; CHECK: decq 16(%rax)
- %0 = load i64** @foo, align 8
+ %0 = load i64*, i64** @foo, align 8
%arrayidx = getelementptr inbounds i64, i64* %0, i64 2
- %1 = load i64* %arrayidx, align 8
+ %1 = load i64, i64* %arrayidx, align 8
%dec = add i64 %1, -1
store i64 %dec, i64* %arrayidx, align 8
%cmp = icmp eq i64 %dec, 0
@@ -209,8 +209,8 @@ declare void @baz()
define void @test4() nounwind uwtable ssp {
entry:
- %0 = load i32* @x, align 4
- %1 = load i32* @y, align 4
+ %0 = load i32, i32* @x, align 4
+ %1 = load i32, i32* @y, align 4
%dec = add nsw i32 %1, -1
store i32 %dec, i32* @y, align 4
%tobool.i = icmp ne i32 %dec, 0
diff --git a/llvm/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll b/llvm/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
index 3d4f4fa0e83..016b0d13fc4 100644
--- a/llvm/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
+++ b/llvm/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
@@ -46,7 +46,7 @@ entry:
%next = getelementptr inbounds i8, i8* %call, i64 8
%tmp = bitcast i8* %next to %struct._list**
%tmp2 = bitcast i8* %call to %struct._list*
- %.pre78 = load i32* @ncol, align 4
+ %.pre78 = load i32, i32* @ncol, align 4
br label %for.cond1.preheader
for.cond1.preheader: ; preds = %for.inc32, %entry
@@ -60,7 +60,7 @@ for.body3: ; preds = %if.end31, %for.cond
%row.172 = phi i32 [ %row.3, %if.end31 ], [ 0, %for.cond1.preheader ]
%col.071 = phi i32 [ %inc, %if.end31 ], [ 0, %for.cond1.preheader ]
%call4 = tail call i32* @make_data()
- %tmp5 = load i32* @ncol, align 4
+ %tmp5 = load i32, i32* @ncol, align 4
%tobool14.i = icmp eq i32 %tmp5, 0
br i1 %tobool14.i, label %while.cond.i, label %while.body.lr.ph.i
@@ -84,9 +84,9 @@ while.cond.i: ; preds = %land.rhs.i, %while.
land.rhs.i: ; preds = %while.cond.i
%arrayidx.i67 = getelementptr inbounds i32, i32* %call4, i64 %indvars.iv.next.i65
- %tmp11 = load i32* %arrayidx.i67, align 4
+ %tmp11 = load i32, i32* %arrayidx.i67, align 4
%arrayidx2.i68 = getelementptr inbounds i32, i32* %data, i64 %indvars.iv.next.i65
- %tmp12 = load i32* %arrayidx2.i68, align 4
+ %tmp12 = load i32, i32* %arrayidx2.i68, align 4
%cmp.i69 = icmp eq i32 %tmp11, %tmp12
br i1 %cmp.i69, label %while.cond.i, label %equal_data.exit
@@ -96,14 +96,14 @@ equal_data.exit: ; preds = %land.rhs.i
if.then: ; preds = %equal_data.exit
%next7 = getelementptr inbounds %struct._list, %struct._list* %current.173, i64 0, i32 1
- %tmp14 = load %struct._list** %next7, align 8
+ %tmp14 = load %struct._list*, %struct._list** %next7, align 8
%next12 = getelementptr inbounds %struct._list, %struct._list* %tmp14, i64 0, i32 1
store %struct._list* null, %struct._list** %next12, align 8
- %tmp15 = load %struct._list** %next7, align 8
- %tmp16 = load i32* %value, align 4
+ %tmp15 = load %struct._list*, %struct._list** %next7, align 8
+ %tmp16 = load i32, i32* %value, align 4
%cmp14 = icmp eq i32 %tmp16, 1
%.tmp16 = select i1 %cmp14, i32 0, i32 %tmp16
- %tmp18 = load i32* %all, align 4
+ %tmp18 = load i32, i32* %all, align 4
%tmp19 = or i32 %tmp18, %.tmp16
%tmp20 = icmp eq i32 %tmp19, 0
br i1 %tmp20, label %if.then19, label %if.end31
@@ -123,12 +123,12 @@ if.end31: ; preds = %if.else, %if.then19
%row.3 = phi i32 [ %.row.172, %if.else ], [ %row.172, %if.then ], [ 0, %if.then19 ]
%current.2 = phi %struct._list* [ %current.173, %if.else ], [ %tmp15, %if.then ], [ %tmp15, %if.then19 ]
%inc = add nsw i32 %col.1, 1
- %tmp25 = load i32* @ncol, align 4
+ %tmp25 = load i32, i32* @ncol, align 4
%cmp2 = icmp eq i32 %inc, %tmp25
br i1 %cmp2, label %for.cond1.for.inc32_crit_edge, label %for.body3
for.cond1.for.inc32_crit_edge: ; preds = %if.end31
- %.pre79 = load i32* @nrow, align 4
+ %.pre79 = load i32, i32* @nrow, align 4
br label %for.inc32
for.inc32: ; preds = %for.cond1.for.inc32_crit_edge, %for.cond1.preheader
@@ -140,6 +140,6 @@ for.inc32: ; preds = %for.cond1.for.inc32
br i1 %cmp, label %for.end34, label %for.cond1.preheader
for.end34: ; preds = %for.inc32
- %.pre = load %struct._list** %tmp, align 8
+ %.pre = load %struct._list*, %struct._list** %tmp, align 8
ret %struct._list* %.pre
}
diff --git a/llvm/test/CodeGen/X86/regpressure.ll b/llvm/test/CodeGen/X86/regpressure.ll
index 19f5ef838ce..8f352b8fbb5 100644
--- a/llvm/test/CodeGen/X86/regpressure.ll
+++ b/llvm/test/CodeGen/X86/regpressure.ll
@@ -9,57 +9,57 @@
;; folded into the multiplies, 2 registers otherwise.
define i32 @regpressure1(i32* %P) {
- %A = load i32* %P ; <i32> [#uses=1]
+ %A = load i32, i32* %P ; <i32> [#uses=1]
%Bp = getelementptr i32, i32* %P, i32 1 ; <i32*> [#uses=1]
- %B = load i32* %Bp ; <i32> [#uses=1]
+ %B = load i32, i32* %Bp ; <i32> [#uses=1]
%s1 = mul i32 %A, %B ; <i32> [#uses=1]
%Cp = getelementptr i32, i32* %P, i32 2 ; <i32*> [#uses=1]
- %C = load i32* %Cp ; <i32> [#uses=1]
+ %C = load i32, i32* %Cp ; <i32> [#uses=1]
%s2 = mul i32 %s1, %C ; <i32> [#uses=1]
%Dp = getelementptr i32, i32* %P, i32 3 ; <i32*> [#uses=1]
- %D = load i32* %Dp ; <i32> [#uses=1]
+ %D = load i32, i32* %Dp ; <i32> [#uses=1]
%s3 = mul i32 %s2, %D ; <i32> [#uses=1]
%Ep = getelementptr i32, i32* %P, i32 4 ; <i32*> [#uses=1]
- %E = load i32* %Ep ; <i32> [#uses=1]
+ %E = load i32, i32* %Ep ; <i32> [#uses=1]
%s4 = mul i32 %s3, %E ; <i32> [#uses=1]
%Fp = getelementptr i32, i32* %P, i32 5 ; <i32*> [#uses=1]
- %F = load i32* %Fp ; <i32> [#uses=1]
+ %F = load i32, i32* %Fp ; <i32> [#uses=1]
%s5 = mul i32 %s4, %F ; <i32> [#uses=1]
%Gp = getelementptr i32, i32* %P, i32 6 ; <i32*> [#uses=1]
- %G = load i32* %Gp ; <i32> [#uses=1]
+ %G = load i32, i32* %Gp ; <i32> [#uses=1]
%s6 = mul i32 %s5, %G ; <i32> [#uses=1]
%Hp = getelementptr i32, i32* %P, i32 7 ; <i32*> [#uses=1]
- %H = load i32* %Hp ; <i32> [#uses=1]
+ %H = load i32, i32* %Hp ; <i32> [#uses=1]
%s7 = mul i32 %s6, %H ; <i32> [#uses=1]
%Ip = getelementptr i32, i32* %P, i32 8 ; <i32*> [#uses=1]
- %I = load i32* %Ip ; <i32> [#uses=1]
+ %I = load i32, i32* %Ip ; <i32> [#uses=1]
%s8 = mul i32 %s7, %I ; <i32> [#uses=1]
%Jp = getelementptr i32, i32* %P, i32 9 ; <i32*> [#uses=1]
- %J = load i32* %Jp ; <i32> [#uses=1]
+ %J = load i32, i32* %Jp ; <i32> [#uses=1]
%s9 = mul i32 %s8, %J ; <i32> [#uses=1]
ret i32 %s9
}
define i32 @regpressure2(i32* %P) {
- %A = load i32* %P ; <i32> [#uses=1]
+ %A = load i32, i32* %P ; <i32> [#uses=1]
%Bp = getelementptr i32, i32* %P, i32 1 ; <i32*> [#uses=1]
- %B = load i32* %Bp ; <i32> [#uses=1]
+ %B = load i32, i32* %Bp ; <i32> [#uses=1]
%Cp = getelementptr i32, i32* %P, i32 2 ; <i32*> [#uses=1]
- %C = load i32* %Cp ; <i32> [#uses=1]
+ %C = load i32, i32* %Cp ; <i32> [#uses=1]
%Dp = getelementptr i32, i32* %P, i32 3 ; <i32*> [#uses=1]
- %D = load i32* %Dp ; <i32> [#uses=1]
+ %D = load i32, i32* %Dp ; <i32> [#uses=1]
%Ep = getelementptr i32, i32* %P, i32 4 ; <i32*> [#uses=1]
- %E = load i32* %Ep ; <i32> [#uses=1]
+ %E = load i32, i32* %Ep ; <i32> [#uses=1]
%Fp = getelementptr i32, i32* %P, i32 5 ; <i32*> [#uses=1]
- %F = load i32* %Fp ; <i32> [#uses=1]
+ %F = load i32, i32* %Fp ; <i32> [#uses=1]
%Gp = getelementptr i32, i32* %P, i32 6 ; <i32*> [#uses=1]
- %G = load i32* %Gp ; <i32> [#uses=1]
+ %G = load i32, i32* %Gp ; <i32> [#uses=1]
%Hp = getelementptr i32, i32* %P, i32 7 ; <i32*> [#uses=1]
- %H = load i32* %Hp ; <i32> [#uses=1]
+ %H = load i32, i32* %Hp ; <i32> [#uses=1]
%Ip = getelementptr i32, i32* %P, i32 8 ; <i32*> [#uses=1]
- %I = load i32* %Ip ; <i32> [#uses=1]
+ %I = load i32, i32* %Ip ; <i32> [#uses=1]
%Jp = getelementptr i32, i32* %P, i32 9 ; <i32*> [#uses=1]
- %J = load i32* %Jp ; <i32> [#uses=1]
+ %J = load i32, i32* %Jp ; <i32> [#uses=1]
%s1 = mul i32 %A, %B ; <i32> [#uses=1]
%s2 = mul i32 %s1, %C ; <i32> [#uses=1]
%s3 = mul i32 %s2, %D ; <i32> [#uses=1]
@@ -73,25 +73,25 @@ define i32 @regpressure2(i32* %P) {
}
define i32 @regpressure3(i16* %P, i1 %Cond, i32* %Other) {
- %A = load i16* %P ; <i16> [#uses=1]
+ %A = load i16, i16* %P ; <i16> [#uses=1]
%Bp = getelementptr i16, i16* %P, i32 1 ; <i16*> [#uses=1]
- %B = load i16* %Bp ; <i16> [#uses=1]
+ %B = load i16, i16* %Bp ; <i16> [#uses=1]
%Cp = getelementptr i16, i16* %P, i32 2 ; <i16*> [#uses=1]
- %C = load i16* %Cp ; <i16> [#uses=1]
+ %C = load i16, i16* %Cp ; <i16> [#uses=1]
%Dp = getelementptr i16, i16* %P, i32 3 ; <i16*> [#uses=1]
- %D = load i16* %Dp ; <i16> [#uses=1]
+ %D = load i16, i16* %Dp ; <i16> [#uses=1]
%Ep = getelementptr i16, i16* %P, i32 4 ; <i16*> [#uses=1]
- %E = load i16* %Ep ; <i16> [#uses=1]
+ %E = load i16, i16* %Ep ; <i16> [#uses=1]
%Fp = getelementptr i16, i16* %P, i32 5 ; <i16*> [#uses=1]
- %F = load i16* %Fp ; <i16> [#uses=1]
+ %F = load i16, i16* %Fp ; <i16> [#uses=1]
%Gp = getelementptr i16, i16* %P, i32 6 ; <i16*> [#uses=1]
- %G = load i16* %Gp ; <i16> [#uses=1]
+ %G = load i16, i16* %Gp ; <i16> [#uses=1]
%Hp = getelementptr i16, i16* %P, i32 7 ; <i16*> [#uses=1]
- %H = load i16* %Hp ; <i16> [#uses=1]
+ %H = load i16, i16* %Hp ; <i16> [#uses=1]
%Ip = getelementptr i16, i16* %P, i32 8 ; <i16*> [#uses=1]
- %I = load i16* %Ip ; <i16> [#uses=1]
+ %I = load i16, i16* %Ip ; <i16> [#uses=1]
%Jp = getelementptr i16, i16* %P, i32 9 ; <i16*> [#uses=1]
- %J = load i16* %Jp ; <i16> [#uses=1]
+ %J = load i16, i16* %Jp ; <i16> [#uses=1]
%A.upgrd.1 = sext i16 %A to i32 ; <i32> [#uses=1]
%B.upgrd.2 = sext i16 %B to i32 ; <i32> [#uses=1]
%D.upgrd.3 = sext i16 %D to i32 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/remat-constant.ll b/llvm/test/CodeGen/X86/remat-constant.ll
index 3e813209d41..5a6826f3809 100644
--- a/llvm/test/CodeGen/X86/remat-constant.ll
+++ b/llvm/test/CodeGen/X86/remat-constant.ll
@@ -7,7 +7,7 @@ declare void @bar() nounwind
declare void @qux(float %f) nounwind
define void @foo() nounwind {
- %f = load float* @a
+ %f = load float, float* @a
call void @bar()
call void @qux(float %f)
call void @qux(float %f)
diff --git a/llvm/test/CodeGen/X86/remat-fold-load.ll b/llvm/test/CodeGen/X86/remat-fold-load.ll
index 185bda19ce0..3478033bfbf 100644
--- a/llvm/test/CodeGen/X86/remat-fold-load.ll
+++ b/llvm/test/CodeGen/X86/remat-fold-load.ll
@@ -40,24 +40,24 @@ if.then.i.i.i.i71: ; preds = %while.body12
%call4.i.i.i.i68 = call noalias i8* @malloc(i32 undef) nounwind
%tmp1 = getelementptr inbounds %type_a, %type_a* %tmp, i32 0, i32 1, i32 0, i32 1
%buf_6.i.i.i.i70 = bitcast %type_d* %tmp1 to i8**
- %tmp2 = load i8** %buf_6.i.i.i.i70, align 4
+ %tmp2 = load i8*, i8** %buf_6.i.i.i.i70, align 4
call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %tmp2, i32 undef, i32 1, i1 false) nounwind
unreachable
if.else.i.i.i.i74: ; preds = %while.body12
%i_.i.i.i.i72 = getelementptr inbounds %type_a, %type_a* %tmp, i32 0, i32 1, i32 0, i32 1, i32 0
- %tmp3 = load i64* %i_.i.i.i.i72, align 4
+ %tmp3 = load i64, i64* %i_.i.i.i.i72, align 4
%tmp4 = zext i64 %tmp3 to i128
%tmp5 = shl nuw nsw i128 %tmp4, 32
%ins148 = or i128 %tmp5, %ins151
%second3.i.i76 = getelementptr inbounds %type_a, %type_a* %tmp, i32 0, i32 1, i32 1
- %tmp6 = load i32* %second3.i.i76, align 4
+ %tmp6 = load i32, i32* %second3.i.i76, align 4
%tmp7 = zext i32 %tmp6 to i128
%tmp8 = shl nuw i128 %tmp7, 96
%mask144 = and i128 %ins148, 79228162495817593519834398720
- %tmp9 = load %type_e** undef, align 4
+ %tmp9 = load %type_e*, %type_e** undef, align 4
%len_.i.i.i.i86 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 0, i32 0
- %tmp10 = load i32* %len_.i.i.i.i86, align 4
+ %tmp10 = load i32, i32* %len_.i.i.i.i86, align 4
%tmp11 = zext i32 %tmp10 to i128
%ins135 = or i128 %tmp11, %ins135156160
%cmp.i.i.i.i.i88 = icmp sgt i32 %tmp10, 8
@@ -68,7 +68,7 @@ if.then.i.i.i.i92: ; preds = %if.else.i.i.i.i74
%ins126 = or i128 0, %ins135
%tmp12 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 0, i32 1
%buf_6.i.i.i.i91 = bitcast %type_d* %tmp12 to i8**
- %tmp13 = load i8** %buf_6.i.i.i.i91, align 4
+ %tmp13 = load i8*, i8** %buf_6.i.i.i.i91, align 4
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %call4.i.i.i.i89, i8* %tmp13, i32 %tmp10, i32 1, i1 false) nounwind
br label %A
@@ -79,7 +79,7 @@ if.else.i.i.i.i95: ; preds = %if.else.i.i.i.i74
A: ; preds = %if.else.i.i.i.i95, %if.then.i.i.i.i92
%ins135157 = phi i128 [ %ins126, %if.then.i.i.i.i92 ], [ undef, %if.else.i.i.i.i95 ]
%second3.i.i97 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 1
- %tmp14 = load i64* %second3.i.i97, align 4
+ %tmp14 = load i64, i64* %second3.i.i97, align 4
%tmp15 = trunc i64 %tmp14 to i32
%cmp.i99 = icmp sgt i32 %tmp6, %tmp15
%tmp16 = trunc i128 %ins135157 to i32
@@ -118,13 +118,13 @@ E: ; preds = %D
if.end24: ; preds = %E, %C
%phitmp = or i128 %tmp8, %mask144
%phitmp158 = or i128 undef, undef
- %tmp18 = load %type_a** undef, align 4
- %tmp19 = load %type_a** undef, align 4
+ %tmp18 = load %type_a*, %type_a** undef, align 4
+ %tmp19 = load %type_a*, %type_a** undef, align 4
%cmp.i49 = icmp eq %type_a* %tmp18, %tmp19
br i1 %cmp.i49, label %while.cond10.while.end26_crit_edge, label %while.body12
while.cond10.while.end26_crit_edge: ; preds = %if.end24
- %.pre = load %type_e** undef, align 4
+ %.pre = load %type_e*, %type_e** undef, align 4
br label %while.end26
while.end26: ; preds = %while.cond10.while.end26_crit_edge, %while.end.while.end26_crit_edge
diff --git a/llvm/test/CodeGen/X86/remat-invalid-liveness.ll b/llvm/test/CodeGen/X86/remat-invalid-liveness.ll
index 92f75e16dc4..c6b43b0dd3e 100644
--- a/llvm/test/CodeGen/X86/remat-invalid-liveness.ll
+++ b/llvm/test/CodeGen/X86/remat-invalid-liveness.ll
@@ -40,7 +40,7 @@ entry:
sw.bb.i: ; preds = %entry
%call.i.i.i = tail call i32 undef(%struct.A* %ht, i8 zeroext 22, i32 undef, i32 0, %struct.D* undef)
- %bf.load.i.i = load i128* undef, align 4
+ %bf.load.i.i = load i128, i128* undef, align 4
%bf.lshr.i.i = lshr i128 %bf.load.i.i, %const72
%shl1.i.i = shl nuw nsw i128 %bf.lshr.i.i, 8
%shl.i.i = trunc i128 %shl1.i.i to i32
@@ -59,13 +59,13 @@ cond.false10.i.i: ; preds = %sw.bb.i
cond.end12.i.i: ; preds = %cond.false10.i.i, %__XXX2.exit.i.i
%.sink.in.i.i = phi i8** [ %arrayidx.i.i.i, %__XXX2.exit.i.i ], [ %arrayidx.i6.i.i, %cond.false10.i.i ]
- %.sink.i.i = load i8** %.sink.in.i.i, align 4
+ %.sink.i.i = load i8*, i8** %.sink.in.i.i, align 4
%tmp = bitcast i8* %.sink.i.i to %union.E*
br i1 undef, label %for.body.i.i, label %if.end196
for.body.i.i: ; preds = %for.body.i.i, %cond.end12.i.i
%weak.i.i = getelementptr inbounds %union.E, %union.E* %tmp, i32 undef, i32 0
- %tmp1 = load i32* %weak.i.i, align 4
+ %tmp1 = load i32, i32* %weak.i.i, align 4
%cmp36.i.i = icmp ne i32 %tmp1, %shl.i.i
%or.cond = and i1 %cmp36.i.i, false
br i1 %or.cond, label %for.body.i.i, label %if.end196
diff --git a/llvm/test/CodeGen/X86/remat-scalar-zero.ll b/llvm/test/CodeGen/X86/remat-scalar-zero.ll
index d577d6dd189..0f081937b24 100644
--- a/llvm/test/CodeGen/X86/remat-scalar-zero.ll
+++ b/llvm/test/CodeGen/X86/remat-scalar-zero.ll
@@ -10,39 +10,39 @@
define void @foo(double* nocapture %x, double* nocapture %y) nounwind {
entry:
- %tmp1 = load double* %x ; <double> [#uses=1]
+ %tmp1 = load double, double* %x ; <double> [#uses=1]
%arrayidx4 = getelementptr inbounds double, double* %x, i64 1 ; <double*> [#uses=1]
- %tmp5 = load double* %arrayidx4 ; <double> [#uses=1]
+ %tmp5 = load double, double* %arrayidx4 ; <double> [#uses=1]
%arrayidx8 = getelementptr inbounds double, double* %x, i64 2 ; <double*> [#uses=1]
- %tmp9 = load double* %arrayidx8 ; <double> [#uses=1]
+ %tmp9 = load double, double* %arrayidx8 ; <double> [#uses=1]
%arrayidx12 = getelementptr inbounds double, double* %x, i64 3 ; <double*> [#uses=1]
- %tmp13 = load double* %arrayidx12 ; <double> [#uses=1]
+ %tmp13 = load double, double* %arrayidx12 ; <double> [#uses=1]
%arrayidx16 = getelementptr inbounds double, double* %x, i64 4 ; <double*> [#uses=1]
- %tmp17 = load double* %arrayidx16 ; <double> [#uses=1]
+ %tmp17 = load double, double* %arrayidx16 ; <double> [#uses=1]
%arrayidx20 = getelementptr inbounds double, double* %x, i64 5 ; <double*> [#uses=1]
- %tmp21 = load double* %arrayidx20 ; <double> [#uses=1]
+ %tmp21 = load double, double* %arrayidx20 ; <double> [#uses=1]
%arrayidx24 = getelementptr inbounds double, double* %x, i64 6 ; <double*> [#uses=1]
- %tmp25 = load double* %arrayidx24 ; <double> [#uses=1]
+ %tmp25 = load double, double* %arrayidx24 ; <double> [#uses=1]
%arrayidx28 = getelementptr inbounds double, double* %x, i64 7 ; <double*> [#uses=1]
- %tmp29 = load double* %arrayidx28 ; <double> [#uses=1]
+ %tmp29 = load double, double* %arrayidx28 ; <double> [#uses=1]
%arrayidx32 = getelementptr inbounds double, double* %x, i64 8 ; <double*> [#uses=1]
- %tmp33 = load double* %arrayidx32 ; <double> [#uses=1]
+ %tmp33 = load double, double* %arrayidx32 ; <double> [#uses=1]
%arrayidx36 = getelementptr inbounds double, double* %x, i64 9 ; <double*> [#uses=1]
- %tmp37 = load double* %arrayidx36 ; <double> [#uses=1]
+ %tmp37 = load double, double* %arrayidx36 ; <double> [#uses=1]
%arrayidx40 = getelementptr inbounds double, double* %x, i64 10 ; <double*> [#uses=1]
- %tmp41 = load double* %arrayidx40 ; <double> [#uses=1]
+ %tmp41 = load double, double* %arrayidx40 ; <double> [#uses=1]
%arrayidx44 = getelementptr inbounds double, double* %x, i64 11 ; <double*> [#uses=1]
- %tmp45 = load double* %arrayidx44 ; <double> [#uses=1]
+ %tmp45 = load double, double* %arrayidx44 ; <double> [#uses=1]
%arrayidx48 = getelementptr inbounds double, double* %x, i64 12 ; <double*> [#uses=1]
- %tmp49 = load double* %arrayidx48 ; <double> [#uses=1]
+ %tmp49 = load double, double* %arrayidx48 ; <double> [#uses=1]
%arrayidx52 = getelementptr inbounds double, double* %x, i64 13 ; <double*> [#uses=1]
- %tmp53 = load double* %arrayidx52 ; <double> [#uses=1]
+ %tmp53 = load double, double* %arrayidx52 ; <double> [#uses=1]
%arrayidx56 = getelementptr inbounds double, double* %x, i64 14 ; <double*> [#uses=1]
- %tmp57 = load double* %arrayidx56 ; <double> [#uses=1]
+ %tmp57 = load double, double* %arrayidx56 ; <double> [#uses=1]
%arrayidx60 = getelementptr inbounds double, double* %x, i64 15 ; <double*> [#uses=1]
- %tmp61 = load double* %arrayidx60 ; <double> [#uses=1]
+ %tmp61 = load double, double* %arrayidx60 ; <double> [#uses=1]
%arrayidx64 = getelementptr inbounds double, double* %x, i64 16 ; <double*> [#uses=1]
- %tmp65 = load double* %arrayidx64 ; <double> [#uses=1]
+ %tmp65 = load double, double* %arrayidx64 ; <double> [#uses=1]
%div = fdiv double %tmp1, 0.000000e+00 ; <double> [#uses=1]
store double %div, double* %y
%div70 = fdiv double %tmp5, 2.000000e-01 ; <double> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/reverse_branches.ll b/llvm/test/CodeGen/X86/reverse_branches.ll
index 24b2da946c3..779e1cd1b5a 100644
--- a/llvm/test/CodeGen/X86/reverse_branches.ll
+++ b/llvm/test/CodeGen/X86/reverse_branches.ll
@@ -68,7 +68,7 @@ for.body20:
do.body.i:
%n.addr.0.i = phi i64 [ %dec.i, %do.cond.i ], [ 1000, %for.body20 ]
%p.0.i = phi i8* [ %incdec.ptr.i, %do.cond.i ], [ %arraydecay24, %for.body20 ]
- %2 = load i8* %p.0.i, align 1
+ %2 = load i8, i8* %p.0.i, align 1
%cmp3.i = icmp eq i8 %2, 120
br i1 %cmp3.i, label %exit, label %do.cond.i
diff --git a/llvm/test/CodeGen/X86/rip-rel-address.ll b/llvm/test/CodeGen/X86/rip-rel-address.ll
index 24ff07b4b21..b49d597d9f0 100644
--- a/llvm/test/CodeGen/X86/rip-rel-address.ll
+++ b/llvm/test/CodeGen/X86/rip-rel-address.ll
@@ -6,7 +6,7 @@
@a = internal global double 3.4
define double @foo() nounwind {
- %a = load double* @a
+ %a = load double, double* @a
ret double %a
; PIC64: movsd _a(%rip), %xmm0
diff --git a/llvm/test/CodeGen/X86/rot32.ll b/llvm/test/CodeGen/X86/rot32.ll
index 7bdd606e9cb..5738f70fa47 100644
--- a/llvm/test/CodeGen/X86/rot32.ll
+++ b/llvm/test/CodeGen/X86/rot32.ll
@@ -61,7 +61,7 @@ define i32 @xfoop(i32* %p) nounwind readnone {
entry:
; BMI2-LABEL: xfoop:
; BMI2: rorxl $25, ({{.+}}), %{{.+}}
- %x = load i32* %p
+ %x = load i32, i32* %p
%a = lshr i32 %x, 25
%b = shl i32 %x, 7
%c = or i32 %a, %b
@@ -94,7 +94,7 @@ define i32 @xunp(i32* %p) nounwind readnone {
entry:
; BMI2-LABEL: xunp:
; BMI2: rorxl $7, ({{.+}}), %{{.+}}
- %x = load i32* %p
+ %x = load i32, i32* %p
%a = lshr i32 %x, 7
%b = shl i32 %x, 25
%c = or i32 %a, %b
diff --git a/llvm/test/CodeGen/X86/rot64.ll b/llvm/test/CodeGen/X86/rot64.ll
index e19a35da1cd..f77bde050c7 100644
--- a/llvm/test/CodeGen/X86/rot64.ll
+++ b/llvm/test/CodeGen/X86/rot64.ll
@@ -55,7 +55,7 @@ define i64 @xfoop(i64* %p) nounwind readnone {
entry:
; BMI2-LABEL: xfoop:
; BMI2: rorxq $57, ({{.+}}), %{{.+}}
- %x = load i64* %p
+ %x = load i64, i64* %p
%a = lshr i64 %x, 57
%b = shl i64 %x, 7
%c = or i64 %a, %b
@@ -84,7 +84,7 @@ define i64 @xunp(i64* %p) nounwind readnone {
entry:
; BMI2-LABEL: xunp:
; BMI2: rorxq $7, ({{.+}}), %{{.+}}
- %x = load i64* %p
+ %x = load i64, i64* %p
%a = lshr i64 %x, 7
%b = shl i64 %x, 57
%c = or i64 %a, %b
diff --git a/llvm/test/CodeGen/X86/rotate4.ll b/llvm/test/CodeGen/X86/rotate4.ll
index 5372612aeab..56a7d328505 100644
--- a/llvm/test/CodeGen/X86/rotate4.ll
+++ b/llvm/test/CodeGen/X86/rotate4.ll
@@ -68,7 +68,7 @@ define void @rotate_left_m32(i32 *%pa, i32 %b) {
; no store:
; CHECK-NOT: mov
entry:
- %a = load i32* %pa, align 16
+ %a = load i32, i32* %pa, align 16
%and = and i32 %b, 31
%shl = shl i32 %a, %and
%0 = sub i32 0, %b
@@ -86,7 +86,7 @@ define void @rotate_right_m32(i32 *%pa, i32 %b) {
; no store:
; CHECK-NOT: mov
entry:
- %a = load i32* %pa, align 16
+ %a = load i32, i32* %pa, align 16
%and = and i32 %b, 31
%shl = lshr i32 %a, %and
%0 = sub i32 0, %b
@@ -104,7 +104,7 @@ define void @rotate_left_m64(i64 *%pa, i64 %b) {
; no store:
; CHECK-NOT: mov
entry:
- %a = load i64* %pa, align 16
+ %a = load i64, i64* %pa, align 16
%and = and i64 %b, 63
%shl = shl i64 %a, %and
%0 = sub i64 0, %b
@@ -122,7 +122,7 @@ define void @rotate_right_m64(i64 *%pa, i64 %b) {
; no store:
; CHECK-NOT: mov
entry:
- %a = load i64* %pa, align 16
+ %a = load i64, i64* %pa, align 16
%and = and i64 %b, 63
%shl = lshr i64 %a, %and
%0 = sub i64 0, %b
diff --git a/llvm/test/CodeGen/X86/sandybridge-loads.ll b/llvm/test/CodeGen/X86/sandybridge-loads.ll
index b8c364e2961..2e31154068f 100644
--- a/llvm/test/CodeGen/X86/sandybridge-loads.ll
+++ b/llvm/test/CodeGen/X86/sandybridge-loads.ll
@@ -8,10 +8,10 @@
;CHECK: ret
define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
- %v0 = load <8 x float>* %a, align 16 ; <---- unaligned!
- %v1 = load <8 x float>* %b, align 32 ; <---- aligned!
+ %v0 = load <8 x float>, <8 x float>* %a, align 16 ; <---- unaligned!
+ %v1 = load <8 x float>, <8 x float>* %b, align 32 ; <---- aligned!
%m0 = fcmp olt <8 x float> %v1, %v0
- %v2 = load <8 x float>* %c, align 32 ; <---- aligned!
+ %v2 = load <8 x float>, <8 x float>* %c, align 32 ; <---- aligned!
%m1 = fcmp olt <8 x float> %v2, %v0
%mand = and <8 x i1> %m1, %m0
%r = zext <8 x i1> %mand to <8 x i32>
@@ -30,8 +30,8 @@ define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
;CHECK: ret
define void @widestores(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
- %v0 = load <8 x float>* %a, align 32
- %v1 = load <8 x float>* %b, align 32
+ %v0 = load <8 x float>, <8 x float>* %a, align 32
+ %v1 = load <8 x float>, <8 x float>* %b, align 32
store <8 x float> %v0, <8 x float>* %b, align 32 ; <--- aligned
store <8 x float> %v1, <8 x float>* %a, align 16 ; <--- unaligned
ret void
diff --git a/llvm/test/CodeGen/X86/scalar-extract.ll b/llvm/test/CodeGen/X86/scalar-extract.ll
index 28458384093..b8ef5e74c43 100644
--- a/llvm/test/CodeGen/X86/scalar-extract.ll
+++ b/llvm/test/CodeGen/X86/scalar-extract.ll
@@ -6,7 +6,7 @@
define void @foo(<2 x i16>* %A, <2 x i16>* %B) {
entry:
- %tmp1 = load <2 x i16>* %A ; <<2 x i16>> [#uses=1]
+ %tmp1 = load <2 x i16>, <2 x i16>* %A ; <<2 x i16>> [#uses=1]
store <2 x i16> %tmp1, <2 x i16>* %B
ret void
}
diff --git a/llvm/test/CodeGen/X86/scalar_widen_div.ll b/llvm/test/CodeGen/X86/scalar_widen_div.ll
index 2d23fc47b42..1671f8f8910 100644
--- a/llvm/test/CodeGen/X86/scalar_widen_div.ll
+++ b/llvm/test/CodeGen/X86/scalar_widen_div.ll
@@ -17,17 +17,17 @@ entry:
store <2 x i32> addrspace(1)* %nsource, <2 x i32> addrspace(1)** %nsource.addr
store <2 x i32> addrspace(1)* %dsource, <2 x i32> addrspace(1)** %dsource.addr
store <2 x i32> addrspace(1)* %qdest, <2 x i32> addrspace(1)** %qdest.addr
- %tmp = load <2 x i32> addrspace(1)** %qdest.addr
- %tmp1 = load i32* %index
+ %tmp = load <2 x i32> addrspace(1)*, <2 x i32> addrspace(1)** %qdest.addr
+ %tmp1 = load i32, i32* %index
%arrayidx = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %tmp, i32 %tmp1
- %tmp2 = load <2 x i32> addrspace(1)** %nsource.addr
- %tmp3 = load i32* %index
+ %tmp2 = load <2 x i32> addrspace(1)*, <2 x i32> addrspace(1)** %nsource.addr
+ %tmp3 = load i32, i32* %index
%arrayidx4 = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %tmp2, i32 %tmp3
- %tmp5 = load <2 x i32> addrspace(1)* %arrayidx4
- %tmp6 = load <2 x i32> addrspace(1)** %dsource.addr
- %tmp7 = load i32* %index
+ %tmp5 = load <2 x i32>, <2 x i32> addrspace(1)* %arrayidx4
+ %tmp6 = load <2 x i32> addrspace(1)*, <2 x i32> addrspace(1)** %dsource.addr
+ %tmp7 = load i32, i32* %index
%arrayidx8 = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %tmp6, i32 %tmp7
- %tmp9 = load <2 x i32> addrspace(1)* %arrayidx8
+ %tmp9 = load <2 x i32>, <2 x i32> addrspace(1)* %arrayidx8
%tmp10 = sdiv <2 x i32> %tmp5, %tmp9
store <2 x i32> %tmp10, <2 x i32> addrspace(1)* %arrayidx
ret void
@@ -181,9 +181,9 @@ bb.nph:
for.body:
%i.014 = phi i32 [ 0, %bb.nph ], [ %inc, %for.body ]
%arrayidx11 = getelementptr <3 x i32>, <3 x i32>* %dest, i32 %i.014
- %tmp4 = load <3 x i32>* %arrayidx11 ; <<3 x i32>> [#uses=1]
+ %tmp4 = load <3 x i32>, <3 x i32>* %arrayidx11 ; <<3 x i32>> [#uses=1]
%arrayidx7 = getelementptr inbounds <3 x i32>, <3 x i32>* %old, i32 %i.014
- %tmp8 = load <3 x i32>* %arrayidx7 ; <<3 x i32>> [#uses=1]
+ %tmp8 = load <3 x i32>, <3 x i32>* %arrayidx7 ; <<3 x i32>> [#uses=1]
%div = sdiv <3 x i32> %tmp4, %tmp8
store <3 x i32> %div, <3 x i32>* %arrayidx11
%inc = add nsw i32 %i.014, 1
diff --git a/llvm/test/CodeGen/X86/scalarize-bitcast.ll b/llvm/test/CodeGen/X86/scalarize-bitcast.ll
index f6b29ecfbb6..6de511f1c5a 100644
--- a/llvm/test/CodeGen/X86/scalarize-bitcast.ll
+++ b/llvm/test/CodeGen/X86/scalarize-bitcast.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-pc-linux-gnu"
define void @mmxCombineMaskU(i32* nocapture %src, i32* nocapture %mask) nounwind {
entry:
- %tmp1 = load i32* %src ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %src ; <i32> [#uses=1]
%0 = insertelement <2 x i32> undef, i32 %tmp1, i32 0 ; <<2 x i32>> [#uses=1]
%1 = insertelement <2 x i32> %0, i32 0, i32 1 ; <<2 x i32>> [#uses=1]
%conv.i.i = bitcast <2 x i32> %1 to <1 x i64> ; <<1 x i64>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/scev-interchange.ll b/llvm/test/CodeGen/X86/scev-interchange.ll
index 0e7047b4845..e224c0858af 100644
--- a/llvm/test/CodeGen/X86/scev-interchange.ll
+++ b/llvm/test/CodeGen/X86/scev-interchange.ll
@@ -296,7 +296,7 @@ bb9.i216.i: ; preds = %bb29.loopexit.i.i, %bb8.i.i
bb15.i.i: ; preds = %bb16.preheader.i.i, %bb15.i.i
%j1.0212.i.i = phi i32 [ %1, %bb15.i.i ], [ 0, %bb16.preheader.i.i ] ; <i32> [#uses=2]
- %tmp6.i.i195.i.i = load i32* undef, align 4 ; <i32> [#uses=1]
+ %tmp6.i.i195.i.i = load i32, i32* undef, align 4 ; <i32> [#uses=1]
%tmp231.i.i = mul i32 %0, %tmp6.i.i195.i.i ; <i32> [#uses=1]
%tmp13.i197.i.i = add i32 %j1.0212.i.i, %tmp231.i.i ; <i32> [#uses=0]
%1 = add i32 %j1.0212.i.i, 1 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/segmented-stacks.ll b/llvm/test/CodeGen/X86/segmented-stacks.ll
index 3e47121a380..412728882b1 100644
--- a/llvm/test/CodeGen/X86/segmented-stacks.ll
+++ b/llvm/test/CodeGen/X86/segmented-stacks.ll
@@ -152,7 +152,7 @@ define void @test_basic() #0 {
}
define i32 @test_nested(i32 * nest %closure, i32 %other) #0 {
- %addend = load i32 * %closure
+ %addend = load i32 , i32 * %closure
%result = add i32 %other, %addend
%mem = alloca i32, i32 10
call void @dummy_use (i32* %mem, i32 10)
diff --git a/llvm/test/CodeGen/X86/seh-safe-div.ll b/llvm/test/CodeGen/X86/seh-safe-div.ll
index e294f24f28b..94d250a1f3a 100644
--- a/llvm/test/CodeGen/X86/seh-safe-div.ll
+++ b/llvm/test/CodeGen/X86/seh-safe-div.ll
@@ -58,7 +58,7 @@ eh.resume:
resume { i8*, i32 } %vals
__try.cont:
- %safe_ret = load i32* %r, align 4
+ %safe_ret = load i32, i32* %r, align 4
ret i32 %safe_ret
}
@@ -117,8 +117,8 @@ __try.cont:
define void @try_body(i32* %r, i32* %n, i32* %d) {
entry:
- %0 = load i32* %n, align 4
- %1 = load i32* %d, align 4
+ %0 = load i32, i32* %n, align 4
+ %1 = load i32, i32* %d, align 4
%div = sdiv i32 %0, %1
store i32 %div, i32* %r, align 4
ret void
@@ -146,8 +146,8 @@ entry:
define i32 @safe_div_filt0(i8* %eh_ptrs, i8* %rbp) {
%eh_ptrs_c = bitcast i8* %eh_ptrs to i32**
- %eh_rec = load i32** %eh_ptrs_c
- %eh_code = load i32* %eh_rec
+ %eh_rec = load i32*, i32** %eh_ptrs_c
+ %eh_code = load i32, i32* %eh_rec
; EXCEPTION_ACCESS_VIOLATION = 0xC0000005
%cmp = icmp eq i32 %eh_code, 3221225477
%filt.res = zext i1 %cmp to i32
@@ -156,8 +156,8 @@ define i32 @safe_div_filt0(i8* %eh_ptrs, i8* %rbp) {
define i32 @safe_div_filt1(i8* %eh_ptrs, i8* %rbp) {
%eh_ptrs_c = bitcast i8* %eh_ptrs to i32**
- %eh_rec = load i32** %eh_ptrs_c
- %eh_code = load i32* %eh_rec
+ %eh_rec = load i32*, i32** %eh_ptrs_c
+ %eh_code = load i32, i32* %eh_rec
; EXCEPTION_INT_DIVIDE_BY_ZERO = 0xC0000094
%cmp = icmp eq i32 %eh_code, 3221225620
%filt.res = zext i1 %cmp to i32
diff --git a/llvm/test/CodeGen/X86/select-with-and-or.ll b/llvm/test/CodeGen/X86/select-with-and-or.ll
index 1ccf30bf208..40af46bc0ff 100644
--- a/llvm/test/CodeGen/X86/select-with-and-or.ll
+++ b/llvm/test/CodeGen/X86/select-with-and-or.ll
@@ -62,7 +62,7 @@ define <4 x i32> @test6(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
define <4 x i32> @test7(<4 x float> %a, <4 x float> %b, <4 x i32>* %p) {
%f = fcmp ult <4 x float> %a, %b
%s = sext <4 x i1> %f to <4 x i32>
- %l = load <4 x i32>* %p
+ %l = load <4 x i32>, <4 x i32>* %p
%r = and <4 x i32> %l, %s
ret <4 x i32> %r
; CHECK: test7
diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll
index e27204554fb..a4e06b39844 100644
--- a/llvm/test/CodeGen/X86/select.ll
+++ b/llvm/test/CodeGen/X86/select.ll
@@ -5,8 +5,8 @@
%0 = type { i64, i32 }
define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
- %t0 = load %0* %p
- %t1 = load %0* %q
+ %t0 = load %0, %0* %p
+ %t1 = load %0, %0* %q
%t4 = select i1 %r, %0 %t0, %0 %t1
%t5 = extractvalue %0 %t4, 1
ret i32 %t5
@@ -63,7 +63,7 @@ entry:
%0 = fcmp olt double %F, 4.200000e+01 ; <i1> [#uses=1]
%iftmp.0.0 = select i1 %0, i32 4, i32 0 ; <i32> [#uses=1]
%1 = getelementptr i8, i8* %P, i32 %iftmp.0.0 ; <i8*> [#uses=1]
- %2 = load i8* %1, align 1 ; <i8> [#uses=1]
+ %2 = load i8, i8* %1, align 1 ; <i8> [#uses=1]
ret i8 %2
; CHECK-LABEL: test4:
; CHECK: movsbl ({{.*}},4), %eax
@@ -82,8 +82,8 @@ define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
}
define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
- %tmp = load <4 x float>* %A ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %A ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>, <4 x float>* %B ; <<4 x float>> [#uses=2]
%tmp9 = fmul <4 x float> %tmp3, %tmp3 ; <<4 x float>> [#uses=1]
%tmp.upgrd.1 = icmp eq i32 %C, 0 ; <i1> [#uses=1]
%iftmp.38.0 = select i1 %tmp.upgrd.1, <4 x float> %tmp9, <4 x float> %tmp ; <<4 x float>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/setcc-narrowing.ll b/llvm/test/CodeGen/X86/setcc-narrowing.ll
index 25cb2c822c5..bf5b45031a2 100644
--- a/llvm/test/CodeGen/X86/setcc-narrowing.ll
+++ b/llvm/test/CodeGen/X86/setcc-narrowing.ll
@@ -10,7 +10,7 @@ entry:
; CHECK-NEXT: setne %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: ret
- %0 = load i64* @t1.global, align 8
+ %0 = load i64, i64* @t1.global, align 8
%and = and i64 4294967295, %0
%cmp = icmp sgt i64 %and, 0
%conv = zext i1 %cmp to i32
diff --git a/llvm/test/CodeGen/X86/sext-load.ll b/llvm/test/CodeGen/X86/sext-load.ll
index e5eef33120b..2ea6e012192 100644
--- a/llvm/test/CodeGen/X86/sext-load.ll
+++ b/llvm/test/CodeGen/X86/sext-load.ll
@@ -23,7 +23,7 @@ define i32 @test2({i16, [6 x i8]}* %this) {
entry:
%b48 = getelementptr inbounds { i16, [6 x i8] }, { i16, [6 x i8] }* %this, i32 0, i32 1
%cast = bitcast [6 x i8]* %b48 to i48*
- %bf.load = load i48* %cast, align 2
+ %bf.load = load i48, i48* %cast, align 2
%bf.ashr = ashr i48 %bf.load, 32
%bf.cast = trunc i48 %bf.ashr to i32
ret i32 %bf.cast
diff --git a/llvm/test/CodeGen/X86/sha.ll b/llvm/test/CodeGen/X86/sha.ll
index bf81e9938ec..fe42637bc53 100644
--- a/llvm/test/CodeGen/X86/sha.ll
+++ b/llvm/test/CodeGen/X86/sha.ll
@@ -13,7 +13,7 @@ entry:
define <4 x i32> @test_sha1rnds4rm(<4 x i32> %a, <4 x i32>* %b) nounwind uwtable {
entry:
- %0 = load <4 x i32>* %b
+ %0 = load <4 x i32>, <4 x i32>* %b
%1 = tail call <4 x i32> @llvm.x86.sha1rnds4(<4 x i32> %a, <4 x i32> %0, i8 3)
ret <4 x i32> %1
; CHECK: test_sha1rnds4rm
@@ -32,7 +32,7 @@ entry:
define <4 x i32> @test_sha1nexterm(<4 x i32> %a, <4 x i32>* %b) nounwind uwtable {
entry:
- %0 = load <4 x i32>* %b
+ %0 = load <4 x i32>, <4 x i32>* %b
%1 = tail call <4 x i32> @llvm.x86.sha1nexte(<4 x i32> %a, <4 x i32> %0)
ret <4 x i32> %1
; CHECK: test_sha1nexterm
@@ -51,7 +51,7 @@ entry:
define <4 x i32> @test_sha1msg1rm(<4 x i32> %a, <4 x i32>* %b) nounwind uwtable {
entry:
- %0 = load <4 x i32>* %b
+ %0 = load <4 x i32>, <4 x i32>* %b
%1 = tail call <4 x i32> @llvm.x86.sha1msg1(<4 x i32> %a, <4 x i32> %0)
ret <4 x i32> %1
; CHECK: test_sha1msg1rm
@@ -70,7 +70,7 @@ entry:
define <4 x i32> @test_sha1msg2rm(<4 x i32> %a, <4 x i32>* %b) nounwind uwtable {
entry:
- %0 = load <4 x i32>* %b
+ %0 = load <4 x i32>, <4 x i32>* %b
%1 = tail call <4 x i32> @llvm.x86.sha1msg2(<4 x i32> %a, <4 x i32> %0)
ret <4 x i32> %1
; CHECK: test_sha1msg2rm
@@ -91,7 +91,7 @@ entry:
define <4 x i32> @test_sha256rnds2rm(<4 x i32> %a, <4 x i32>* %b, <4 x i32> %c) nounwind uwtable {
entry:
- %0 = load <4 x i32>* %b
+ %0 = load <4 x i32>, <4 x i32>* %b
%1 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %a, <4 x i32> %0, <4 x i32> %c)
ret <4 x i32> %1
; CHECK: test_sha256rnds2rm
@@ -112,7 +112,7 @@ entry:
define <4 x i32> @test_sha256msg1rm(<4 x i32> %a, <4 x i32>* %b) nounwind uwtable {
entry:
- %0 = load <4 x i32>* %b
+ %0 = load <4 x i32>, <4 x i32>* %b
%1 = tail call <4 x i32> @llvm.x86.sha256msg1(<4 x i32> %a, <4 x i32> %0)
ret <4 x i32> %1
; CHECK: test_sha256msg1rm
@@ -131,7 +131,7 @@ entry:
define <4 x i32> @test_sha256msg2rm(<4 x i32> %a, <4 x i32>* %b) nounwind uwtable {
entry:
- %0 = load <4 x i32>* %b
+ %0 = load <4 x i32>, <4 x i32>* %b
%1 = tail call <4 x i32> @llvm.x86.sha256msg2(<4 x i32> %a, <4 x i32> %0)
ret <4 x i32> %1
; CHECK: test_sha256msg2rm
diff --git a/llvm/test/CodeGen/X86/shift-and.ll b/llvm/test/CodeGen/X86/shift-and.ll
index d487368431b..edd43a35ce5 100644
--- a/llvm/test/CodeGen/X86/shift-and.ll
+++ b/llvm/test/CodeGen/X86/shift-and.ll
@@ -38,7 +38,7 @@ define void @t3(i16 %t) nounwind {
; X64-NOT: andl
; X64: sarw
%shamt = and i16 %t, 31
- %tmp = load i16* @X
+ %tmp = load i16, i16* @X
%tmp1 = ashr i16 %tmp, %shamt
store i16 %tmp1, i16* @X
ret void
@@ -71,7 +71,7 @@ entry:
; X64: decq
; X64: andq
%shr = lshr i64 %key, 3
- %0 = load i64* %val, align 8
+ %0 = load i64, i64* %val, align 8
%sub = add i64 %0, 2305843009213693951
%and = and i64 %sub, %shr
ret i64 %and
diff --git a/llvm/test/CodeGen/X86/shift-bmi2.ll b/llvm/test/CodeGen/X86/shift-bmi2.ll
index 7615754a042..63b6ec55fac 100644
--- a/llvm/test/CodeGen/X86/shift-bmi2.ll
+++ b/llvm/test/CodeGen/X86/shift-bmi2.ll
@@ -27,7 +27,7 @@ entry:
define i32 @shl32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
entry:
- %x = load i32* %p
+ %x = load i32, i32* %p
%shl = shl i32 %x, %shamt
; BMI2: shl32p
; Source order scheduling prevents folding, rdar:14208996.
@@ -41,7 +41,7 @@ entry:
define i32 @shl32pi(i32* %p) nounwind uwtable readnone {
entry:
- %x = load i32* %p
+ %x = load i32, i32* %p
%shl = shl i32 %x, 5
; BMI2: shl32pi
; BMI2-NOT: shlxl
@@ -72,7 +72,7 @@ entry:
define i64 @shl64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
entry:
- %x = load i64* %p
+ %x = load i64, i64* %p
%shl = shl i64 %x, %shamt
; BMI264: shl64p
; BMI264: shlxq %{{.+}}, %{{.+}}, %{{.+}}
@@ -82,7 +82,7 @@ entry:
define i64 @shl64pi(i64* %p) nounwind uwtable readnone {
entry:
- %x = load i64* %p
+ %x = load i64, i64* %p
%shl = shl i64 %x, 7
; BMI264: shl64pi
; BMI264-NOT: shlxq
@@ -104,7 +104,7 @@ entry:
define i32 @lshr32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
entry:
- %x = load i32* %p
+ %x = load i32, i32* %p
%shl = lshr i32 %x, %shamt
; BMI2: lshr32p
; Source order scheduling prevents folding, rdar:14208996.
@@ -127,7 +127,7 @@ entry:
define i64 @lshr64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
entry:
- %x = load i64* %p
+ %x = load i64, i64* %p
%shl = lshr i64 %x, %shamt
; BMI264: lshr64p
; BMI264: shrxq %{{.+}}, %{{.+}}, %{{.+}}
@@ -149,7 +149,7 @@ entry:
define i32 @ashr32p(i32* %p, i32 %shamt) nounwind uwtable readnone {
entry:
- %x = load i32* %p
+ %x = load i32, i32* %p
%shl = ashr i32 %x, %shamt
; BMI2: ashr32p
; Source order scheduling prevents folding, rdar:14208996.
@@ -172,7 +172,7 @@ entry:
define i64 @ashr64p(i64* %p, i64 %shamt) nounwind uwtable readnone {
entry:
- %x = load i64* %p
+ %x = load i64, i64* %p
%shl = ashr i64 %x, %shamt
; BMI264: ashr64p
; BMI264: sarxq %{{.+}}, %{{.+}}, %{{.+}}
diff --git a/llvm/test/CodeGen/X86/shift-coalesce.ll b/llvm/test/CodeGen/X86/shift-coalesce.ll
index 5241042d0c5..dee7d373dce 100644
--- a/llvm/test/CodeGen/X86/shift-coalesce.ll
+++ b/llvm/test/CodeGen/X86/shift-coalesce.ll
@@ -6,7 +6,7 @@
; PR687
define i64 @foo(i64 %x, i64* %X) {
- %tmp.1 = load i64* %X ; <i64> [#uses=1]
+ %tmp.1 = load i64, i64* %X ; <i64> [#uses=1]
%tmp.3 = trunc i64 %tmp.1 to i8 ; <i8> [#uses=1]
%shift.upgrd.1 = zext i8 %tmp.3 to i64 ; <i64> [#uses=1]
%tmp.4 = shl i64 %x, %shift.upgrd.1 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/shift-codegen.ll b/llvm/test/CodeGen/X86/shift-codegen.ll
index 88b86100794..7d52bdeb9e3 100644
--- a/llvm/test/CodeGen/X86/shift-codegen.ll
+++ b/llvm/test/CodeGen/X86/shift-codegen.ll
@@ -15,9 +15,9 @@ define void @fn1() {
; CHECK-NOT: lea
; CHECK: ret
- %tmp = load i32* @Y ; <i32> [#uses=1]
+ %tmp = load i32, i32* @Y ; <i32> [#uses=1]
%tmp1 = shl i32 %tmp, 3 ; <i32> [#uses=1]
- %tmp2 = load i32* @X ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* @X ; <i32> [#uses=1]
%tmp3 = or i32 %tmp1, %tmp2 ; <i32> [#uses=1]
store i32 %tmp3, i32* @X
ret void
diff --git a/llvm/test/CodeGen/X86/shift-combine.ll b/llvm/test/CodeGen/X86/shift-combine.ll
index 754464c6794..ec62bcdcdba 100644
--- a/llvm/test/CodeGen/X86/shift-combine.ll
+++ b/llvm/test/CodeGen/X86/shift-combine.ll
@@ -13,7 +13,7 @@ entry:
%tmp2 = lshr i32 %x, 2
%tmp3 = and i32 %tmp2, 3
%tmp4 = getelementptr [4 x i32], [4 x i32]* @array, i32 0, i32 %tmp3
- %tmp5 = load i32* %tmp4, align 4
+ %tmp5 = load i32, i32* %tmp4, align 4
ret i32 %tmp5
}
diff --git a/llvm/test/CodeGen/X86/shift-folding.ll b/llvm/test/CodeGen/X86/shift-folding.ll
index eca7643e91f..69887870897 100644
--- a/llvm/test/CodeGen/X86/shift-folding.ll
+++ b/llvm/test/CodeGen/X86/shift-folding.ll
@@ -44,7 +44,7 @@ define fastcc i32 @test4(i32* %d) {
; CHECK: ret
entry:
- %tmp4 = load i32* %d
+ %tmp4 = load i32, i32* %d
%tmp512 = lshr i32 %tmp4, 24
ret i32 %tmp512
}
@@ -63,7 +63,7 @@ entry:
%index = lshr i32 %i.zext, 11
%index.zext = zext i32 %index to i64
%val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext
- %val = load i32* %val.ptr
+ %val = load i32, i32* %val.ptr
%val.zext = zext i32 %val to i64
%sum = add i64 %val.zext, %index.zext
ret i64 %sum
diff --git a/llvm/test/CodeGen/X86/shift-one.ll b/llvm/test/CodeGen/X86/shift-one.ll
index 0f80f90c773..1ff02eb53e9 100644
--- a/llvm/test/CodeGen/X86/shift-one.ll
+++ b/llvm/test/CodeGen/X86/shift-one.ll
@@ -3,7 +3,7 @@
@x = external global i32 ; <i32*> [#uses=1]
define i32 @test() {
- %tmp.0 = load i32* @x ; <i32> [#uses=1]
+ %tmp.0 = load i32, i32* @x ; <i32> [#uses=1]
%tmp.1 = shl i32 %tmp.0, 1 ; <i32> [#uses=1]
ret i32 %tmp.1
}
diff --git a/llvm/test/CodeGen/X86/shift-parts.ll b/llvm/test/CodeGen/X86/shift-parts.ll
index 763da639710..0b25a7595f2 100644
--- a/llvm/test/CodeGen/X86/shift-parts.ll
+++ b/llvm/test/CodeGen/X86/shift-parts.ll
@@ -9,7 +9,7 @@
define i32 @int87(i32 %uint64p_8, i1 %cond) nounwind {
entry:
- %srcval4 = load i320* bitcast (%0* @g_144 to i320*), align 8 ; <i320> [#uses=1]
+ %srcval4 = load i320, i320* bitcast (%0* @g_144 to i320*), align 8 ; <i320> [#uses=1]
br label %for.cond
for.cond: ; preds = %for.cond, %entry
diff --git a/llvm/test/CodeGen/X86/shl-i64.ll b/llvm/test/CodeGen/X86/shl-i64.ll
index 073b35bf7c1..849912cc12e 100644
--- a/llvm/test/CodeGen/X86/shl-i64.ll
+++ b/llvm/test/CodeGen/X86/shl-i64.ll
@@ -7,9 +7,9 @@
define void @test_cl(<4 x i64>* %dst, <4 x i64>* %src, i32 %idx) {
entry:
%arrayidx = getelementptr inbounds <4 x i64>, <4 x i64> * %src, i32 %idx
- %0 = load <4 x i64> * %arrayidx, align 32
+ %0 = load <4 x i64> , <4 x i64> * %arrayidx, align 32
%arrayidx1 = getelementptr inbounds <4 x i64>, <4 x i64> * %dst, i32 %idx
- %1 = load <4 x i64> * %arrayidx1, align 32
+ %1 = load <4 x i64> , <4 x i64> * %arrayidx1, align 32
%2 = extractelement <4 x i64> %1, i32 0
%and = and i64 %2, 63
%3 = insertelement <4 x i64> undef, i64 %and, i32 0
diff --git a/llvm/test/CodeGen/X86/shl_undef.ll b/llvm/test/CodeGen/X86/shl_undef.ll
index 705af5b4e33..f59d014a2d8 100644
--- a/llvm/test/CodeGen/X86/shl_undef.ll
+++ b/llvm/test/CodeGen/X86/shl_undef.ll
@@ -18,7 +18,7 @@ entry:
%tmp0 = alloca i8
%tmp1 = alloca i32
store i8 1, i8* %tmp0
- %tmp921.i7845 = load i8* %a0, align 1
+ %tmp921.i7845 = load i8, i8* %a0, align 1
%tmp309 = xor i8 %tmp921.i7845, 104
%tmp592 = zext i8 %tmp309 to i32
%tmp862 = xor i32 1293461297, %tmp592
@@ -49,7 +49,7 @@ entry:
; shl undef, x -> 0
define i32 @foo1_undef(i32* %a0) nounwind {
entry:
- %tmp1 = load i32* %a0, align 1
+ %tmp1 = load i32, i32* %a0, align 1
%tmp2 = shl i32 undef, %tmp1;
ret i32 %tmp2
}
diff --git a/llvm/test/CodeGen/X86/shrink-compare.ll b/llvm/test/CodeGen/X86/shrink-compare.ll
index 4ddef4ca535..0efa073cb19 100644
--- a/llvm/test/CodeGen/X86/shrink-compare.ll
+++ b/llvm/test/CodeGen/X86/shrink-compare.ll
@@ -4,7 +4,7 @@ declare void @bar()
define void @test1(i32* nocapture %X) nounwind minsize {
entry:
- %tmp1 = load i32* %X, align 4
+ %tmp1 = load i32, i32* %X, align 4
%and = and i32 %tmp1, 255
%cmp = icmp eq i32 %and, 47
br i1 %cmp, label %if.then, label %if.end
@@ -72,7 +72,7 @@ lor.end: ; preds = %lor.rhs, %entry
; PR16551
define void @test5(i32 %X) nounwind minsize {
entry:
- %bf.load = load i56* bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @x to i56*), align 4
+ %bf.load = load i56, i56* bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @x to i56*), align 4
%bf.lshr = lshr i56 %bf.load, 32
%bf.cast = trunc i56 %bf.lshr to i32
%cmp = icmp ne i32 %bf.cast, 1
diff --git a/llvm/test/CodeGen/X86/shuffle-combine-crash.ll b/llvm/test/CodeGen/X86/shuffle-combine-crash.ll
index 6ab7b97e6a7..06fcaa97389 100644
--- a/llvm/test/CodeGen/X86/shuffle-combine-crash.ll
+++ b/llvm/test/CodeGen/X86/shuffle-combine-crash.ll
@@ -18,7 +18,7 @@ define void @sample_test() {
br i1 undef, label %5, label %1
; <label>:1 ; preds = %0
- %2 = load <4 x i8>* undef
+ %2 = load <4 x i8>, <4 x i8>* undef
%3 = shufflevector <4 x i8> %2, <4 x i8> undef, <4 x i32> <i32 2, i32 2, i32 0, i32 0>
%4 = shufflevector <4 x i8> %3, <4 x i8> undef, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
store <4 x i8> %4, <4 x i8>* undef
diff --git a/llvm/test/CodeGen/X86/sibcall-4.ll b/llvm/test/CodeGen/X86/sibcall-4.ll
index a1a9feb613c..23b73c031c0 100644
--- a/llvm/test/CodeGen/X86/sibcall-4.ll
+++ b/llvm/test/CodeGen/X86/sibcall-4.ll
@@ -6,7 +6,7 @@ cm1:
; CHECK-LABEL: t:
; CHECK: jmpl *%eax
%nm3 = getelementptr i32, i32* %Sp_Arg, i32 1
- %nm9 = load i32* %Sp_Arg
+ %nm9 = load i32, i32* %Sp_Arg
%nma = inttoptr i32 %nm9 to void (i32*, i32*, i32*, i32)*
tail call ghccc void %nma(i32* %Base_Arg, i32* %nm3, i32* %Hp_Arg, i32 %R1_Arg) nounwind
ret void
diff --git a/llvm/test/CodeGen/X86/sibcall-5.ll b/llvm/test/CodeGen/X86/sibcall-5.ll
index b065cce17b2..aab028bd17c 100644
--- a/llvm/test/CodeGen/X86/sibcall-5.ll
+++ b/llvm/test/CodeGen/X86/sibcall-5.ll
@@ -46,7 +46,7 @@ define hidden { double, double } @foo2(%0* %self, i8* nocapture %_cmd) uwtable o
; X64_BAD: call
; X64_BAD: call
; X64_BAD: call
- %1 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_2", align 8, !invariant.load !0
+ %1 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_2", align 8, !invariant.load !0
%2 = bitcast %0* %self to i8*
%3 = tail call { double, double } bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to { double, double } (i8*, i8*)*)(i8* %2, i8* %1) optsize
%4 = extractvalue { double, double } %3, 0
diff --git a/llvm/test/CodeGen/X86/sibcall.ll b/llvm/test/CodeGen/X86/sibcall.ll
index 2a70d017abe..d32e5676551 100644
--- a/llvm/test/CodeGen/X86/sibcall.ll
+++ b/llvm/test/CodeGen/X86/sibcall.ll
@@ -288,10 +288,10 @@ entry:
; X32ABI-NEXT: movl 12(%edi), %eax
; X32ABI-NEXT: jmpq *%rax
%0 = getelementptr inbounds %struct.__block_literal_2, %struct.__block_literal_2* %.block_descriptor, i64 0, i32 5 ; <void ()**> [#uses=1]
- %1 = load void ()** %0, align 8 ; <void ()*> [#uses=2]
+ %1 = load void ()*, void ()** %0, align 8 ; <void ()*> [#uses=2]
%2 = bitcast void ()* %1 to %struct.__block_literal_1* ; <%struct.__block_literal_1*> [#uses=1]
%3 = getelementptr inbounds %struct.__block_literal_1, %struct.__block_literal_1* %2, i64 0, i32 3 ; <i8**> [#uses=1]
- %4 = load i8** %3, align 8 ; <i8*> [#uses=1]
+ %4 = load i8*, i8** %3, align 8 ; <i8*> [#uses=1]
%5 = bitcast i8* %4 to void (i8*)* ; <void (i8*)*> [#uses=1]
%6 = bitcast void ()* %1 to i8* ; <i8*> [#uses=1]
tail call void %5(i8* %6) nounwind
diff --git a/llvm/test/CodeGen/X86/simple-zext.ll b/llvm/test/CodeGen/X86/simple-zext.ll
index ccd8292bcdb..b80c0bc6b10 100644
--- a/llvm/test/CodeGen/X86/simple-zext.ll
+++ b/llvm/test/CodeGen/X86/simple-zext.ll
@@ -6,7 +6,7 @@
define void @load_zext(i32* nocapture %p){
entry:
- %0 = load i32* %p, align 4
+ %0 = load i32, i32* %p, align 4
%and = and i32 %0, 255
tail call void @use(i32 %and)
ret void
diff --git a/llvm/test/CodeGen/X86/sink-hoist.ll b/llvm/test/CodeGen/X86/sink-hoist.ll
index 5e5bac5e36a..2f70a83f528 100644
--- a/llvm/test/CodeGen/X86/sink-hoist.ll
+++ b/llvm/test/CodeGen/X86/sink-hoist.ll
@@ -50,7 +50,7 @@ entry:
bb:
%i.03 = phi i64 [ 0, %entry ], [ %3, %bb ]
%scevgep = getelementptr double, double* %p, i64 %i.03
- %1 = load double* %scevgep, align 8
+ %1 = load double, double* %scevgep, align 8
%2 = fdiv double 3.200000e+00, %1
store double %2, double* %scevgep, align 8
%3 = add nsw i64 %i.03, 1
@@ -104,7 +104,7 @@ entry:
bb: ; preds = %bb60
%i.0 = phi i32 [ 0, %bb60 ] ; <i32> [#uses=2]
%0 = bitcast float* %x_addr.0 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %1 = load <4 x float>* %0, align 16 ; <<4 x float>> [#uses=4]
+ %1 = load <4 x float>, <4 x float>* %0, align 16 ; <<4 x float>> [#uses=4]
%tmp20 = bitcast <4 x float> %1 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp22 = and <4 x i32> %tmp20, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647> ; <<4 x i32>> [#uses=1]
%tmp23 = bitcast <4 x i32> %tmp22 to <4 x float> ; <<4 x float>> [#uses=1]
@@ -130,7 +130,7 @@ bb: ; preds = %bb60
%5 = getelementptr float, float* %x_addr.0, i64 4 ; <float*> [#uses=1]
%6 = getelementptr float, float* %y_addr.0, i64 4 ; <float*> [#uses=1]
%7 = add i32 %i.0, 4 ; <i32> [#uses=1]
- %8 = load i32* %n, align 4 ; <i32> [#uses=1]
+ %8 = load i32, i32* %n, align 4 ; <i32> [#uses=1]
%9 = icmp sgt i32 %8, %7 ; <i1> [#uses=1]
br i1 %9, label %bb60, label %return
@@ -157,14 +157,14 @@ declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
define void @default_get_pch_validity() nounwind {
entry:
- %tmp4 = load i32* @cl_options_count, align 4 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* @cl_options_count, align 4 ; <i32> [#uses=1]
%tmp5 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
br i1 %tmp5, label %bb6, label %bb2
bb2: ; preds = %bb2, %entry
%i.019 = phi i64 [ 0, %entry ], [ %tmp25, %bb2 ] ; <i64> [#uses=1]
%tmp25 = add i64 %i.019, 1 ; <i64> [#uses=2]
- %tmp11 = load i32* @cl_options_count, align 4 ; <i32> [#uses=1]
+ %tmp11 = load i32, i32* @cl_options_count, align 4 ; <i32> [#uses=1]
%tmp12 = zext i32 %tmp11 to i64 ; <i64> [#uses=1]
%tmp13 = icmp ugt i64 %tmp12, %tmp25 ; <i1> [#uses=1]
br i1 %tmp13, label %bb2, label %bb6
diff --git a/llvm/test/CodeGen/X86/slow-incdec.ll b/llvm/test/CodeGen/X86/slow-incdec.ll
index 965dd8b03eb..1857f61e6c2 100644
--- a/llvm/test/CodeGen/X86/slow-incdec.ll
+++ b/llvm/test/CodeGen/X86/slow-incdec.ll
@@ -29,7 +29,7 @@ for.cond: ; preds = %for.body
for.body: ; preds = %for.body.preheader, %for.cond
%i.06 = phi i32 [ %dec, %for.cond ], [ %s, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.06
- %0 = load i32* %arrayidx, align 4, !tbaa !1
+ %0 = load i32, i32* %arrayidx, align 4, !tbaa !1
%cmp1 = icmp eq i32 %0, 0
;
%dec = add nsw i32 %i.06, -1
@@ -60,7 +60,7 @@ for.cond: ; preds = %for.body
for.body: ; preds = %for.body.preheader, %for.cond
%i.06 = phi i32 [ %inc, %for.cond ], [ %s, %for.body.preheader ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.06
- %0 = load i32* %arrayidx, align 4, !tbaa !1
+ %0 = load i32, i32* %arrayidx, align 4, !tbaa !1
%cmp1 = icmp eq i32 %0, 0
%inc = add nsw i32 %i.06, 1
br i1 %cmp1, label %for.end.loopexit, label %for.cond
diff --git a/llvm/test/CodeGen/X86/split-vector-bitcast.ll b/llvm/test/CodeGen/X86/split-vector-bitcast.ll
index fae15cfaf26..8d80754b9a3 100644
--- a/llvm/test/CodeGen/X86/split-vector-bitcast.ll
+++ b/llvm/test/CodeGen/X86/split-vector-bitcast.ll
@@ -3,7 +3,7 @@
; PR10497 + another isel issue with sse2 disabled
; (This is primarily checking that this construct doesn't crash.)
define void @a(<2 x float>* %a, <2 x i32>* %b) {
- %cc = load <2 x float>* %a
+ %cc = load <2 x float>, <2 x float>* %a
%c = fadd <2 x float> %cc, %cc
%dd = bitcast <2 x float> %c to <2 x i32>
%d = add <2 x i32> %dd, %dd
diff --git a/llvm/test/CodeGen/X86/sse-align-0.ll b/llvm/test/CodeGen/X86/sse-align-0.ll
index 8ffd3124770..54c89ea411a 100644
--- a/llvm/test/CodeGen/X86/sse-align-0.ll
+++ b/llvm/test/CodeGen/X86/sse-align-0.ll
@@ -2,12 +2,12 @@
; CHECK-NOT: mov
define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
- %t = load <4 x float>* %p
+ %t = load <4 x float>, <4 x float>* %p
%z = fmul <4 x float> %t, %x
ret <4 x float> %z
}
define <2 x double> @bar(<2 x double>* %p, <2 x double> %x) nounwind {
- %t = load <2 x double>* %p
+ %t = load <2 x double>, <2 x double>* %p
%z = fmul <2 x double> %t, %x
ret <2 x double> %z
}
diff --git a/llvm/test/CodeGen/X86/sse-align-1.ll b/llvm/test/CodeGen/X86/sse-align-1.ll
index c7a5cd55912..1a6058c6114 100644
--- a/llvm/test/CodeGen/X86/sse-align-1.ll
+++ b/llvm/test/CodeGen/X86/sse-align-1.ll
@@ -1,10 +1,10 @@
; RUN: llc < %s -march=x86-64 | grep movap | count 2
define <4 x float> @foo(<4 x float>* %p) nounwind {
- %t = load <4 x float>* %p
+ %t = load <4 x float>, <4 x float>* %p
ret <4 x float> %t
}
define <2 x double> @bar(<2 x double>* %p) nounwind {
- %t = load <2 x double>* %p
+ %t = load <2 x double>, <2 x double>* %p
ret <2 x double> %t
}
diff --git a/llvm/test/CodeGen/X86/sse-align-10.ll b/llvm/test/CodeGen/X86/sse-align-10.ll
index 0f916971255..81bf55354cd 100644
--- a/llvm/test/CodeGen/X86/sse-align-10.ll
+++ b/llvm/test/CodeGen/X86/sse-align-10.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=x86-64 | grep movups | count 1
define <2 x i64> @bar(<2 x i64>* %p) nounwind {
- %t = load <2 x i64>* %p, align 8
+ %t = load <2 x i64>, <2 x i64>* %p, align 8
ret <2 x i64> %t
}
diff --git a/llvm/test/CodeGen/X86/sse-align-12.ll b/llvm/test/CodeGen/X86/sse-align-12.ll
index 396da0f4895..9441cc0002f 100644
--- a/llvm/test/CodeGen/X86/sse-align-12.ll
+++ b/llvm/test/CodeGen/X86/sse-align-12.ll
@@ -6,7 +6,7 @@ define <4 x float> @a(<4 x float>* %y) nounwind {
; CHECK-NEXT: movups (%rdi), %xmm0
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; CHECK-NEXT: retq
- %x = load <4 x float>* %y, align 4
+ %x = load <4 x float>, <4 x float>* %y, align 4
%a = extractelement <4 x float> %x, i32 0
%b = extractelement <4 x float> %x, i32 1
%c = extractelement <4 x float> %x, i32 2
@@ -24,7 +24,7 @@ define <4 x float> @b(<4 x float>* %y, <4 x float> %z) nounwind {
; CHECK-NEXT: movups (%rdi), %xmm1
; CHECK-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: retq
- %x = load <4 x float>* %y, align 4
+ %x = load <4 x float>, <4 x float>* %y, align 4
%a = extractelement <4 x float> %x, i32 2
%b = extractelement <4 x float> %x, i32 3
%c = extractelement <4 x float> %z, i32 2
@@ -42,7 +42,7 @@ define <2 x double> @c(<2 x double>* %y) nounwind {
; CHECK-NEXT: movupd (%rdi), %xmm0
; CHECK-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1,0]
; CHECK-NEXT: retq
- %x = load <2 x double>* %y, align 8
+ %x = load <2 x double>, <2 x double>* %y, align 8
%a = extractelement <2 x double> %x, i32 0
%c = extractelement <2 x double> %x, i32 1
%p = insertelement <2 x double> undef, double %c, i32 0
@@ -56,7 +56,7 @@ define <2 x double> @d(<2 x double>* %y, <2 x double> %z) nounwind {
; CHECK-NEXT: movupd (%rdi), %xmm1
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; CHECK-NEXT: retq
- %x = load <2 x double>* %y, align 8
+ %x = load <2 x double>, <2 x double>* %y, align 8
%a = extractelement <2 x double> %x, i32 1
%c = extractelement <2 x double> %z, i32 1
%p = insertelement <2 x double> undef, double %c, i32 0
diff --git a/llvm/test/CodeGen/X86/sse-align-2.ll b/llvm/test/CodeGen/X86/sse-align-2.ll
index 98e75b56e89..063cc9d2f56 100644
--- a/llvm/test/CodeGen/X86/sse-align-2.ll
+++ b/llvm/test/CodeGen/X86/sse-align-2.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86-64 -mcpu=penryn | FileCheck %s
define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
- %t = load <4 x float>* %p, align 4
+ %t = load <4 x float>, <4 x float>* %p, align 4
%z = fmul <4 x float> %t, %x
ret <4 x float> %z
}
@@ -11,7 +11,7 @@ define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
; CHECK: ret
define <2 x double> @bar(<2 x double>* %p, <2 x double> %x) nounwind {
- %t = load <2 x double>* %p, align 8
+ %t = load <2 x double>, <2 x double>* %p, align 8
%z = fmul <2 x double> %t, %x
ret <2 x double> %z
}
diff --git a/llvm/test/CodeGen/X86/sse-align-5.ll b/llvm/test/CodeGen/X86/sse-align-5.ll
index 21cd2311b91..a64b953220d 100644
--- a/llvm/test/CodeGen/X86/sse-align-5.ll
+++ b/llvm/test/CodeGen/X86/sse-align-5.ll
@@ -1,6 +1,6 @@
; RUN: llc < %s -march=x86-64 | grep movaps | count 1
define <2 x i64> @bar(<2 x i64>* %p) nounwind {
- %t = load <2 x i64>* %p
+ %t = load <2 x i64>, <2 x i64>* %p
ret <2 x i64> %t
}
diff --git a/llvm/test/CodeGen/X86/sse-align-6.ll b/llvm/test/CodeGen/X86/sse-align-6.ll
index fcea1b102a2..01f225101b9 100644
--- a/llvm/test/CodeGen/X86/sse-align-6.ll
+++ b/llvm/test/CodeGen/X86/sse-align-6.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=x86-64 | grep movdqu | count 1
define <2 x i64> @bar(<2 x i64>* %p, <2 x i64> %x) nounwind {
- %t = load <2 x i64>* %p, align 8
+ %t = load <2 x i64>, <2 x i64>* %p, align 8
%z = mul <2 x i64> %t, %x
ret <2 x i64> %z
}
diff --git a/llvm/test/CodeGen/X86/sse-align-9.ll b/llvm/test/CodeGen/X86/sse-align-9.ll
index cb26b9535a8..182c91c69d9 100644
--- a/llvm/test/CodeGen/X86/sse-align-9.ll
+++ b/llvm/test/CodeGen/X86/sse-align-9.ll
@@ -1,10 +1,10 @@
; RUN: llc < %s -march=x86-64 | grep movup | count 2
define <4 x float> @foo(<4 x float>* %p) nounwind {
- %t = load <4 x float>* %p, align 4
+ %t = load <4 x float>, <4 x float>* %p, align 4
ret <4 x float> %t
}
define <2 x double> @bar(<2 x double>* %p) nounwind {
- %t = load <2 x double>* %p, align 8
+ %t = load <2 x double>, <2 x double>* %p, align 8
ret <2 x double> %t
}
diff --git a/llvm/test/CodeGen/X86/sse-domains.ll b/llvm/test/CodeGen/X86/sse-domains.ll
index 5b5d0f9b4a1..8016a246fa6 100644
--- a/llvm/test/CodeGen/X86/sse-domains.ll
+++ b/llvm/test/CodeGen/X86/sse-domains.ll
@@ -35,7 +35,7 @@ while.body:
%and = and <4 x i32> %x.02, <i32 127, i32 127, i32 127, i32 127>
%incdec.ptr = getelementptr inbounds <4 x i32>, <4 x i32>* %p.addr.04, i64 1
store <4 x i32> %and, <4 x i32>* %p.addr.04, align 16
- %0 = load <4 x i32>* %incdec.ptr, align 16
+ %0 = load <4 x i32>, <4 x i32>* %incdec.ptr, align 16
%add = shl <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
%tobool = icmp eq i32 %dec, 0
br i1 %tobool, label %while.end, label %while.body
diff --git a/llvm/test/CodeGen/X86/sse-intel-ocl.ll b/llvm/test/CodeGen/X86/sse-intel-ocl.ll
index 188505072f0..b96ecc57502 100644
--- a/llvm/test/CodeGen/X86/sse-intel-ocl.ll
+++ b/llvm/test/CodeGen/X86/sse-intel-ocl.ll
@@ -36,7 +36,7 @@ define <16 x float> @testf16_inp(<16 x float> %a, <16 x float> %b) nounwind {
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %2, %1
ret <16 x float> %3
}
@@ -63,7 +63,7 @@ define <16 x float> @testf16_regs(<16 x float> %a, <16 x float> %b) nounwind {
%y = alloca <16 x float>, align 16
%x = fadd <16 x float> %a, %b
%1 = call intel_ocl_bicc <16 x float> @func_float16_ptr(<16 x float> %x, <16 x float>* %y)
- %2 = load <16 x float>* %y, align 16
+ %2 = load <16 x float>, <16 x float>* %y, align 16
%3 = fadd <16 x float> %1, %b
%4 = fadd <16 x float> %2, %3
ret <16 x float> %4
diff --git a/llvm/test/CodeGen/X86/sse-load-ret.ll b/llvm/test/CodeGen/X86/sse-load-ret.ll
index 1ebcb1a6fa6..8da45a786e7 100644
--- a/llvm/test/CodeGen/X86/sse-load-ret.ll
+++ b/llvm/test/CodeGen/X86/sse-load-ret.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -march=x86 -mcpu=yonah | not grep xmm
define double @test1(double* %P) {
- %X = load double* %P ; <double> [#uses=1]
+ %X = load double, double* %P ; <double> [#uses=1]
ret double %X
}
diff --git a/llvm/test/CodeGen/X86/sse-unaligned-mem-feature.ll b/llvm/test/CodeGen/X86/sse-unaligned-mem-feature.ll
index bb558291d19..1c61a515f38 100644
--- a/llvm/test/CodeGen/X86/sse-unaligned-mem-feature.ll
+++ b/llvm/test/CodeGen/X86/sse-unaligned-mem-feature.ll
@@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "x86_64-unknown-linux-gnu"
define <4 x float> @foo(<4 x float>* %P, <4 x float> %In) nounwind {
- %A = load <4 x float>* %P, align 4
+ %A = load <4 x float>, <4 x float>* %P, align 4
%B = fadd <4 x float> %A, %In
ret <4 x float> %B
diff --git a/llvm/test/CodeGen/X86/sse2.ll b/llvm/test/CodeGen/X86/sse2.ll
index 37efa518cef..ce98606143f 100644
--- a/llvm/test/CodeGen/X86/sse2.ll
+++ b/llvm/test/CodeGen/X86/sse2.ll
@@ -10,7 +10,7 @@ define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; CHECK-NEXT: movlpd {{[0-9]+}}(%esp), %xmm0
; CHECK-NEXT: movapd %xmm0, (%eax)
; CHECK-NEXT: retl
- %tmp3 = load <2 x double>* %A, align 16
+ %tmp3 = load <2 x double>, <2 x double>* %A, align 16
%tmp7 = insertelement <2 x double> undef, double %B, i32 0
%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 2, i32 1 >
store <2 x double> %tmp9, <2 x double>* %r, align 16
@@ -26,7 +26,7 @@ define void @test2(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
; CHECK-NEXT: movhpd {{[0-9]+}}(%esp), %xmm0
; CHECK-NEXT: movapd %xmm0, (%eax)
; CHECK-NEXT: retl
- %tmp3 = load <2 x double>* %A, align 16
+ %tmp3 = load <2 x double>, <2 x double>* %A, align 16
%tmp7 = insertelement <2 x double> undef, double %B, i32 0
%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 0, i32 2 >
store <2 x double> %tmp9, <2 x double>* %r, align 16
@@ -44,8 +44,8 @@ define void @test3(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B) nounwind
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; CHECK-NEXT: movaps %xmm0, (%eax)
; CHECK-NEXT: retl
- %tmp = load <4 x float>* %B ; <<4 x float>> [#uses=2]
- %tmp3 = load <4 x float>* %A ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %B ; <<4 x float>> [#uses=2]
+ %tmp3 = load <4 x float>, <4 x float>* %A ; <<4 x float>> [#uses=2]
%tmp.upgrd.1 = extractelement <4 x float> %tmp3, i32 0 ; <float> [#uses=1]
%tmp7 = extractelement <4 x float> %tmp, i32 0 ; <float> [#uses=1]
%tmp8 = extractelement <4 x float> %tmp3, i32 1 ; <float> [#uses=1]
@@ -80,9 +80,9 @@ define <4 x i32> @test5(i8** %ptr) nounwind {
; CHECK-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; CHECK-NEXT: retl
- %tmp = load i8** %ptr ; <i8*> [#uses=1]
+ %tmp = load i8*, i8** %ptr ; <i8*> [#uses=1]
%tmp.upgrd.1 = bitcast i8* %tmp to float* ; <float*> [#uses=1]
- %tmp.upgrd.2 = load float* %tmp.upgrd.1 ; <float> [#uses=1]
+ %tmp.upgrd.2 = load float, float* %tmp.upgrd.1 ; <float> [#uses=1]
%tmp.upgrd.3 = insertelement <4 x float> undef, float %tmp.upgrd.2, i32 0 ; <<4 x float>> [#uses=1]
%tmp9 = insertelement <4 x float> %tmp.upgrd.3, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
%tmp10 = insertelement <4 x float> %tmp9, float 0.000000e+00, i32 2 ; <<4 x float>> [#uses=1]
@@ -103,7 +103,7 @@ define void @test6(<4 x float>* %res, <4 x float>* %A) nounwind {
; CHECK-NEXT: movaps (%ecx), %xmm0
; CHECK-NEXT: movaps %xmm0, (%eax)
; CHECK-NEXT: retl
- %tmp1 = load <4 x float>* %A ; <<4 x float>> [#uses=1]
+ %tmp1 = load <4 x float>, <4 x float>* %A ; <<4 x float>> [#uses=1]
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
store <4 x float> %tmp2, <4 x float>* %res
ret void
@@ -129,10 +129,10 @@ define <2 x i64> @test8() nounwind {
; CHECK-NEXT: movl L_x$non_lazy_ptr, %eax
; CHECK-NEXT: movups (%eax), %xmm0
; CHECK-NEXT: retl
- %tmp = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 0) ; <i32> [#uses=1]
- %tmp3 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 1) ; <i32> [#uses=1]
- %tmp5 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 2) ; <i32> [#uses=1]
- %tmp7 = load i32* getelementptr ([4 x i32]* @x, i32 0, i32 3) ; <i32> [#uses=1]
+ %tmp = load i32, i32* getelementptr ([4 x i32]* @x, i32 0, i32 0) ; <i32> [#uses=1]
+ %tmp3 = load i32, i32* getelementptr ([4 x i32]* @x, i32 0, i32 1) ; <i32> [#uses=1]
+ %tmp5 = load i32, i32* getelementptr ([4 x i32]* @x, i32 0, i32 2) ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* getelementptr ([4 x i32]* @x, i32 0, i32 3) ; <i32> [#uses=1]
%tmp.upgrd.1 = insertelement <4 x i32> undef, i32 %tmp, i32 0 ; <<4 x i32>> [#uses=1]
%tmp13 = insertelement <4 x i32> %tmp.upgrd.1, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
%tmp14 = insertelement <4 x i32> %tmp13, i32 %tmp5, i32 2 ; <<4 x i32>> [#uses=1]
@@ -186,7 +186,7 @@ define void @test12() nounwind {
; CHECK-NEXT: addps %xmm1, %xmm0
; CHECK-NEXT: movaps %xmm0, 0
; CHECK-NEXT: retl
- %tmp1 = load <4 x float>* null ; <<4 x float>> [#uses=2]
+ %tmp1 = load <4 x float>, <4 x float>* null ; <<4 x float>> [#uses=2]
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> < float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00 >, <4 x i32> < i32 0, i32 1, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
%tmp4 = fadd <4 x float> %tmp2, %tmp3 ; <<4 x float>> [#uses=1]
@@ -205,8 +205,8 @@ define void @test13(<4 x float>* %res, <4 x float>* %A, <4 x float>* %B, <4 x fl
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; CHECK-NEXT: movaps %xmm0, (%eax)
; CHECK-NEXT: retl
- %tmp3 = load <4 x float>* %B ; <<4 x float>> [#uses=1]
- %tmp5 = load <4 x float>* %C ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>, <4 x float>* %B ; <<4 x float>> [#uses=1]
+ %tmp5 = load <4 x float>, <4 x float>* %C ; <<4 x float>> [#uses=1]
%tmp11 = shufflevector <4 x float> %tmp3, <4 x float> %tmp5, <4 x i32> < i32 1, i32 4, i32 1, i32 5 > ; <<4 x float>> [#uses=1]
store <4 x float> %tmp11, <4 x float>* %res
ret void
@@ -224,8 +224,8 @@ define <4 x float> @test14(<4 x float>* %x, <4 x float>* %y) nounwind {
; CHECK-NEXT: subps %xmm1, %xmm2
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: retl
- %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=2]
- %tmp5 = load <4 x float>* %x ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %y ; <<4 x float>> [#uses=2]
+ %tmp5 = load <4 x float>, <4 x float>* %x ; <<4 x float>> [#uses=2]
%tmp9 = fadd <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1]
%tmp21 = fsub <4 x float> %tmp5, %tmp ; <<4 x float>> [#uses=1]
%tmp27 = shufflevector <4 x float> %tmp9, <4 x float> %tmp21, <4 x i32> < i32 0, i32 1, i32 4, i32 5 > ; <<4 x float>> [#uses=1]
@@ -241,8 +241,8 @@ define <4 x float> @test15(<4 x float>* %x, <4 x float>* %y) nounwind {
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
; CHECK-NEXT: retl
entry:
- %tmp = load <4 x float>* %y ; <<4 x float>> [#uses=1]
- %tmp3 = load <4 x float>* %x ; <<4 x float>> [#uses=1]
+ %tmp = load <4 x float>, <4 x float>* %y ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>, <4 x float>* %x ; <<4 x float>> [#uses=1]
%tmp4 = shufflevector <4 x float> %tmp3, <4 x float> %tmp, <4 x i32> < i32 2, i32 3, i32 6, i32 7 > ; <<4 x float>> [#uses=1]
ret <4 x float> %tmp4
}
@@ -257,7 +257,7 @@ define <2 x double> @test16(<4 x double> * nocapture %srcA, <2 x double>* nocap
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: retl
%i5 = getelementptr inbounds <4 x double>, <4 x double>* %srcA, i32 3
- %i6 = load <4 x double>* %i5, align 32
+ %i6 = load <4 x double>, <4 x double>* %i5, align 32
%i7 = shufflevector <4 x double> %i6, <4 x double> undef, <2 x i32> <i32 0, i32 2>
ret <2 x double> %i7
}
diff --git a/llvm/test/CodeGen/X86/sse3-avx-addsub.ll b/llvm/test/CodeGen/X86/sse3-avx-addsub.ll
index 431588f90ab..76141fc876a 100644
--- a/llvm/test/CodeGen/X86/sse3-avx-addsub.ll
+++ b/llvm/test/CodeGen/X86/sse3-avx-addsub.ll
@@ -87,7 +87,7 @@ define <2 x double> @test4(<2 x double> %A, <2 x double> %B) #0 {
define <4 x float> @test1b(<4 x float> %A, <4 x float>* %B) {
- %1 = load <4 x float>* %B
+ %1 = load <4 x float>, <4 x float>* %B
%add = fadd <4 x float> %A, %1
%sub = fsub <4 x float> %A, %1
%vecinit6 = shufflevector <4 x float> %sub, <4 x float> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -100,7 +100,7 @@ define <4 x float> @test1b(<4 x float> %A, <4 x float>* %B) {
define <8 x float> @test2b(<8 x float> %A, <8 x float>* %B) {
- %1 = load <8 x float>* %B
+ %1 = load <8 x float>, <8 x float>* %B
%add = fadd <8 x float> %A, %1
%sub = fsub <8 x float> %A, %1
%vecinit14 = shufflevector <8 x float> %sub, <8 x float> %add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
@@ -115,7 +115,7 @@ define <8 x float> @test2b(<8 x float> %A, <8 x float>* %B) {
define <4 x double> @test3b(<4 x double> %A, <4 x double>* %B) {
- %1 = load <4 x double>* %B
+ %1 = load <4 x double>, <4 x double>* %B
%add = fadd <4 x double> %A, %1
%sub = fsub <4 x double> %A, %1
%vecinit6 = shufflevector <4 x double> %sub, <4 x double> %add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -130,7 +130,7 @@ define <4 x double> @test3b(<4 x double> %A, <4 x double>* %B) {
define <2 x double> @test4b(<2 x double> %A, <2 x double>* %B) {
- %1 = load <2 x double>* %B
+ %1 = load <2 x double>, <2 x double>* %B
%sub = fsub <2 x double> %A, %1
%add = fadd <2 x double> %A, %1
%vecinit2 = shufflevector <2 x double> %sub, <2 x double> %add, <2 x i32> <i32 0, i32 3>
diff --git a/llvm/test/CodeGen/X86/sse3.ll b/llvm/test/CodeGen/X86/sse3.ll
index 6c0b7013656..c1cd91beaf5 100644
--- a/llvm/test/CodeGen/X86/sse3.ll
+++ b/llvm/test/CodeGen/X86/sse3.ll
@@ -14,7 +14,7 @@ define void @t0(<8 x i16>* %dest, <8 x i16>* %old) nounwind {
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
entry:
- %tmp3 = load <8 x i16>* %old
+ %tmp3 = load <8 x i16>, <8 x i16>* %old
%tmp6 = shufflevector <8 x i16> %tmp3,
<8 x i16> < i16 1, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef >,
<8 x i32> < i32 8, i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef >
@@ -31,8 +31,8 @@ define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; X64-NEXT: andps (%rdi), %xmm0
; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: retq
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> < i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
ret <8 x i16> %tmp3
@@ -112,7 +112,7 @@ define void @t8(<2 x i64>* %res, <2 x i64>* %A) nounwind {
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,4,7]
; X64-NEXT: movdqa %xmm0, (%rdi)
; X64-NEXT: retq
- %tmp = load <2 x i64>* %A
+ %tmp = load <2 x i64>, <2 x i64>* %A
%tmp.upgrd.1 = bitcast <2 x i64> %tmp to <8 x i16>
%tmp0 = extractelement <8 x i16> %tmp.upgrd.1, i32 0
%tmp1 = extractelement <8 x i16> %tmp.upgrd.1, i32 1
@@ -142,9 +142,9 @@ define void @t9(<4 x float>* %r, <2 x i32>* %A) nounwind {
; X64-NEXT: movhpd (%rsi), %xmm0
; X64-NEXT: movapd %xmm0, (%rdi)
; X64-NEXT: retq
- %tmp = load <4 x float>* %r
+ %tmp = load <4 x float>, <4 x float>* %r
%tmp.upgrd.3 = bitcast <2 x i32>* %A to double*
- %tmp.upgrd.4 = load double* %tmp.upgrd.3
+ %tmp.upgrd.4 = load double, double* %tmp.upgrd.3
%tmp.upgrd.5 = insertelement <2 x double> undef, double %tmp.upgrd.4, i32 0
%tmp5 = insertelement <2 x double> %tmp.upgrd.5, double undef, i32 1
%tmp6 = bitcast <2 x double> %tmp5 to <4 x float>
@@ -178,7 +178,7 @@ define void @t10() nounwind {
; X64-NEXT: movq _g2@{{.*}}(%rip), %rax
; X64-NEXT: movq %xmm0, (%rax)
; X64-NEXT: retq
- load <4 x i32>* @g1, align 16
+ load <4 x i32>, <4 x i32>* @g1, align 16
bitcast <4 x i32> %1 to <8 x i16>
shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> < i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef >
bitcast <8 x i16> %3 to <2 x i64>
@@ -273,9 +273,9 @@ define <4 x i32> @t17() nounwind {
; X64-NEXT: andpd {{.*}}(%rip), %xmm0
; X64-NEXT: retq
entry:
- %tmp1 = load <4 x float>* undef, align 16
+ %tmp1 = load <4 x float>, <4 x float>* undef, align 16
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
- %tmp3 = load <4 x float>* undef, align 16
+ %tmp3 = load <4 x float>, <4 x float>* undef, align 16
%tmp4 = shufflevector <4 x float> %tmp2, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
%tmp5 = bitcast <4 x float> %tmp3 to <4 x i32>
%tmp6 = shufflevector <4 x i32> %tmp5, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
diff --git a/llvm/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll b/llvm/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll
index 55faf4d32b3..a16e7927714 100644
--- a/llvm/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll
@@ -5,7 +5,7 @@ define <8 x i16> @test_llvm_x86_sse41_pmovsxbw(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbw
; SSE41: pmovsxbw (%rdi), %xmm0
; AVX: vpmovsxbw (%rdi), %xmm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <8 x i16> @llvm.x86.sse41.pmovsxbw(<16 x i8> %1)
ret <8 x i16> %2
}
@@ -14,7 +14,7 @@ define <4 x i32> @test_llvm_x86_sse41_pmovsxbd(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbd
; SSE41: pmovsxbd (%rdi), %xmm0
; AVX: vpmovsxbd (%rdi), %xmm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %1)
ret <4 x i32> %2
}
@@ -23,7 +23,7 @@ define <2 x i64> @test_llvm_x86_sse41_pmovsxbq(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovsxbq
; SSE41: pmovsxbq (%rdi), %xmm0
; AVX: vpmovsxbq (%rdi), %xmm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <2 x i64> @llvm.x86.sse41.pmovsxbq(<16 x i8> %1)
ret <2 x i64> %2
}
@@ -32,7 +32,7 @@ define <4 x i32> @test_llvm_x86_sse41_pmovsxwd(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovsxwd
; SSE41: pmovsxwd (%rdi), %xmm0
; AVX: vpmovsxwd (%rdi), %xmm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1)
ret <4 x i32> %2
}
@@ -41,7 +41,7 @@ define <2 x i64> @test_llvm_x86_sse41_pmovsxwq(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovsxwq
; SSE41: pmovsxwq (%rdi), %xmm0
; AVX: vpmovsxwq (%rdi), %xmm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <2 x i64> @llvm.x86.sse41.pmovsxwq(<8 x i16> %1)
ret <2 x i64> %2
}
@@ -50,7 +50,7 @@ define <2 x i64> @test_llvm_x86_sse41_pmovsxdq(<4 x i32>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovsxdq
; SSE41: pmovsxdq (%rdi), %xmm0
; AVX: vpmovsxdq (%rdi), %xmm0
- %1 = load <4 x i32>* %a, align 1
+ %1 = load <4 x i32>, <4 x i32>* %a, align 1
%2 = call <2 x i64> @llvm.x86.sse41.pmovsxdq(<4 x i32> %1)
ret <2 x i64> %2
}
@@ -59,7 +59,7 @@ define <8 x i16> @test_llvm_x86_sse41_pmovzxbw(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbw
; SSE41: pmovzxbw (%rdi), %xmm0
; AVX: vpmovzxbw (%rdi), %xmm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <8 x i16> @llvm.x86.sse41.pmovzxbw(<16 x i8> %1)
ret <8 x i16> %2
}
@@ -68,7 +68,7 @@ define <4 x i32> @test_llvm_x86_sse41_pmovzxbd(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbd
; SSE41: pmovzxbd (%rdi), %xmm0
; AVX: vpmovzxbd (%rdi), %xmm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <4 x i32> @llvm.x86.sse41.pmovzxbd(<16 x i8> %1)
ret <4 x i32> %2
}
@@ -77,7 +77,7 @@ define <2 x i64> @test_llvm_x86_sse41_pmovzxbq(<16 x i8>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovzxbq
; SSE41: pmovzxbq (%rdi), %xmm0
; AVX: vpmovzxbq (%rdi), %xmm0
- %1 = load <16 x i8>* %a, align 1
+ %1 = load <16 x i8>, <16 x i8>* %a, align 1
%2 = call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %1)
ret <2 x i64> %2
}
@@ -86,7 +86,7 @@ define <4 x i32> @test_llvm_x86_sse41_pmovzxwd(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovzxwd
; SSE41: pmovzxwd (%rdi), %xmm0
; AVX: vpmovzxwd (%rdi), %xmm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <4 x i32> @llvm.x86.sse41.pmovzxwd(<8 x i16> %1)
ret <4 x i32> %2
}
@@ -95,7 +95,7 @@ define <2 x i64> @test_llvm_x86_sse41_pmovzxwq(<8 x i16>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovzxwq
; SSE41: pmovzxwq (%rdi), %xmm0
; AVX: vpmovzxwq (%rdi), %xmm0
- %1 = load <8 x i16>* %a, align 1
+ %1 = load <8 x i16>, <8 x i16>* %a, align 1
%2 = call <2 x i64> @llvm.x86.sse41.pmovzxwq(<8 x i16> %1)
ret <2 x i64> %2
}
@@ -104,7 +104,7 @@ define <2 x i64> @test_llvm_x86_sse41_pmovzxdq(<4 x i32>* %a) {
; CHECK-LABEL: test_llvm_x86_sse41_pmovzxdq
; SSE41: pmovzxdq (%rdi), %xmm0
; AVX: vpmovzxdq (%rdi), %xmm0
- %1 = load <4 x i32>* %a, align 1
+ %1 = load <4 x i32>, <4 x i32>* %a, align 1
%2 = call <2 x i64> @llvm.x86.sse41.pmovzxdq(<4 x i32> %1)
ret <2 x i64> %2
}
diff --git a/llvm/test/CodeGen/X86/sse41.ll b/llvm/test/CodeGen/X86/sse41.ll
index 1a765395a05..f13cd0f5be9 100644
--- a/llvm/test/CodeGen/X86/sse41.ll
+++ b/llvm/test/CodeGen/X86/sse41.ll
@@ -43,7 +43,7 @@ define <2 x i64> @pmovsxbd_1(i32* %p) nounwind {
; X64-NEXT: pmovsxbd (%rdi), %xmm0
; X64-NEXT: retq
entry:
- %0 = load i32* %p, align 4
+ %0 = load i32, i32* %p, align 4
%1 = insertelement <4 x i32> undef, i32 %0, i32 0
%2 = insertelement <4 x i32> %1, i32 0, i32 1
%3 = insertelement <4 x i32> %2, i32 0, i32 2
@@ -66,7 +66,7 @@ define <2 x i64> @pmovsxwd_1(i64* %p) nounwind readonly {
; X64-NEXT: pmovsxwd (%rdi), %xmm0
; X64-NEXT: retq
entry:
- %0 = load i64* %p ; <i64> [#uses=1]
+ %0 = load i64, i64* %p ; <i64> [#uses=1]
%tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0 ; <<2 x i64>> [#uses=1]
%1 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1]
%2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone ; <<4 x i32>> [#uses=1]
@@ -87,7 +87,7 @@ define <2 x i64> @pmovzxbq_1() nounwind {
; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; X64-NEXT: retq
entry:
- %0 = load i16* @g16, align 2 ; <i16> [#uses=1]
+ %0 = load i16, i16* @g16, align 2 ; <i16> [#uses=1]
%1 = insertelement <8 x i16> undef, i16 %0, i32 0 ; <<8 x i16>> [#uses=1]
%2 = bitcast <8 x i16> %1 to <16 x i8> ; <<16 x i8>> [#uses=1]
%3 = tail call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %2) nounwind readnone ; <<2 x i64>> [#uses=1]
@@ -330,7 +330,7 @@ define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* n
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
entry:
- %0 = load <4 x float>* %pb, align 16
+ %0 = load <4 x float>, <4 x float>* %pb, align 16
%vecinit6 = shufflevector <4 x float> %a, <4 x float> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
ret <4 x float> %vecinit6
}
@@ -366,7 +366,7 @@ define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocaptu
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; X64-NEXT: retq
entry:
- %0 = load <4 x i32>* %pb, align 16
+ %0 = load <4 x i32>, <4 x i32>* %pb, align 16
%vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
ret <4 x i32> %vecinit6
}
@@ -399,7 +399,7 @@ define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b)
; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
; X64-NEXT: retq
- %1 = load float* %b, align 4
+ %1 = load float, float* %b, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
ret <4 x float> %result
@@ -421,7 +421,7 @@ define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
; X64-NEXT: retq
- %1 = load i32* %b, align 4
+ %1 = load i32, i32* %b, align 4
%2 = insertelement <4 x i32> undef, i32 %1, i32 0
%result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
ret <4 x i32> %result
@@ -823,7 +823,7 @@ define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocap
; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
; X64-NEXT: retq
- %1 = load <4 x float>* %pb, align 16
+ %1 = load <4 x float>, <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
ret <4 x float> %2
}
@@ -841,7 +841,7 @@ define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>
; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3]
; X64-NEXT: retq
- %1 = load <4 x float>* %pb, align 16
+ %1 = load <4 x float>, <4 x float>* %pb, align 16
%2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
ret <4 x float> %2
}
@@ -862,7 +862,7 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
; X64-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3]
; X64-NEXT: retq
%1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
- %2 = load <4 x float>* %1, align 16
+ %2 = load <4 x float>, <4 x float>* %1, align 16
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
ret <4 x float> %3
}
@@ -884,7 +884,7 @@ define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocap
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X64-NEXT: retq
%1 = getelementptr inbounds float, float* %fb, i64 %index
- %2 = load float* %1, align 4
+ %2 = load float, float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
%5 = insertelement <4 x float> %4, float %2, i32 2
@@ -908,7 +908,7 @@ define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X64-NEXT: retq
- %1 = load <4 x float>* %b, align 4
+ %1 = load <4 x float>, <4 x float>* %b, align 4
%2 = extractelement <4 x float> %1, i32 0
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
@@ -948,7 +948,7 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X64-NEXT: addps %xmm3, %xmm0
; X64-NEXT: retq
%1 = getelementptr inbounds float, float* %fb, i64 %index
- %2 = load float* %1, align 4
+ %2 = load float, float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
%5 = insertelement <4 x float> %4, float %2, i32 2
@@ -978,7 +978,7 @@ define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
; X64-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-NEXT: movapd %xmm1, %xmm0
; X64-NEXT: retq
- %1 = load float* %b, align 4
+ %1 = load float, float* %b, align 4
%2 = insertelement <4 x float> undef, float %1, i32 0
%result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7>
ret <4 x float> %result
@@ -997,7 +997,7 @@ define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
; X64: ## BB#0:
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2]
; X64-NEXT: retq
- %load = load <4 x float> *%ptr
+ %load = load <4 x float> , <4 x float> *%ptr
%ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
ret <4 x float> %ret
}
diff --git a/llvm/test/CodeGen/X86/sse42-intrinsics-x86.ll b/llvm/test/CodeGen/X86/sse42-intrinsics-x86.ll
index 5ca80098282..706c86b71a4 100644
--- a/llvm/test/CodeGen/X86/sse42-intrinsics-x86.ll
+++ b/llvm/test/CodeGen/X86/sse42-intrinsics-x86.ll
@@ -16,8 +16,8 @@ define i32 @test_x86_sse42_pcmpestri128_load(<16 x i8>* %a0, <16 x i8>* %a2) {
; CHECK: movl $7
; CHECK: pcmpestri $7, (
; CHECK: movl
- %1 = load <16 x i8>* %a0
- %2 = load <16 x i8>* %a2
+ %1 = load <16 x i8>, <16 x i8>* %a0
+ %2 = load <16 x i8>, <16 x i8>* %a2
%res = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %1, i32 7, <16 x i8> %2, i32 7, i8 7) ; <i32> [#uses=1]
ret i32 %res
}
@@ -94,7 +94,7 @@ define <16 x i8> @test_x86_sse42_pcmpestrm128_load(<16 x i8> %a0, <16 x i8>* %a2
; CHECK: movl $7
; CHECK: pcmpestrm $7,
; CHECK-NOT: vmov
- %1 = load <16 x i8>* %a2
+ %1 = load <16 x i8>, <16 x i8>* %a2
%res = call <16 x i8> @llvm.x86.sse42.pcmpestrm128(<16 x i8> %a0, i32 7, <16 x i8> %1, i32 7, i8 7) ; <<16 x i8>> [#uses=1]
ret <16 x i8> %res
}
@@ -112,8 +112,8 @@ declare i32 @llvm.x86.sse42.pcmpistri128(<16 x i8>, <16 x i8>, i8) nounwind read
define i32 @test_x86_sse42_pcmpistri128_load(<16 x i8>* %a0, <16 x i8>* %a1) {
; CHECK: pcmpistri $7, (
; CHECK: movl
- %1 = load <16 x i8>* %a0
- %2 = load <16 x i8>* %a1
+ %1 = load <16 x i8>, <16 x i8>* %a0
+ %2 = load <16 x i8>, <16 x i8>* %a1
%res = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %1, <16 x i8> %2, i8 7) ; <i32> [#uses=1]
ret i32 %res
}
@@ -176,7 +176,7 @@ declare <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8>, <16 x i8>, i8) nounwin
define <16 x i8> @test_x86_sse42_pcmpistrm128_load(<16 x i8> %a0, <16 x i8>* %a1) {
; CHECK: pcmpistrm $7, (
; CHECK-NOT: vmov
- %1 = load <16 x i8>* %a1
+ %1 = load <16 x i8>, <16 x i8>* %a1
%res = call <16 x i8> @llvm.x86.sse42.pcmpistrm128(<16 x i8> %a0, <16 x i8> %1, i8 7) ; <<16 x i8>> [#uses=1]
ret <16 x i8> %res
}
diff --git a/llvm/test/CodeGen/X86/ssp-data-layout.ll b/llvm/test/CodeGen/X86/ssp-data-layout.ll
index c3f6c18f20d..4a63aceb7cc 100644
--- a/llvm/test/CodeGen/X86/ssp-data-layout.ll
+++ b/llvm/test/CodeGen/X86/ssp-data-layout.ll
@@ -153,18 +153,18 @@ entry:
%arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
%arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
%arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
- %0 = load i32* %x, align 4
- %1 = load i32* %y, align 4
- %2 = load i32* %z, align 4
+ %0 = load i32, i32* %x, align 4
+ %1 = load i32, i32* %y, align 4
+ %2 = load i32, i32* %z, align 4
%coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
- %4 = load i64* %3, align 1
+ %4 = load i64, i64* %3, align 1
%coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
- %6 = load i16* %5, align 1
+ %6 = load i16, i16* %5, align 1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
- %8 = load i32* %7, align 1
+ %8 = load i32, i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
@@ -297,18 +297,18 @@ entry:
%arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
%arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
%arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
- %0 = load i32* %x, align 4
- %1 = load i32* %y, align 4
- %2 = load i32* %z, align 4
+ %0 = load i32, i32* %x, align 4
+ %1 = load i32, i32* %y, align 4
+ %2 = load i32, i32* %z, align 4
%coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
- %4 = load i64* %3, align 1
+ %4 = load i64, i64* %3, align 1
%coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
- %6 = load i16* %5, align 1
+ %6 = load i16, i16* %5, align 1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
- %8 = load i32* %7, align 1
+ %8 = load i32, i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
@@ -429,18 +429,18 @@ entry:
%arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
%arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
%arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
- %0 = load i32* %x, align 4
- %1 = load i32* %y, align 4
- %2 = load i32* %z, align 4
+ %0 = load i32, i32* %x, align 4
+ %1 = load i32, i32* %y, align 4
+ %2 = load i32, i32* %z, align 4
%coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
- %4 = load i64* %3, align 1
+ %4 = load i64, i64* %3, align 1
%coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
- %6 = load i16* %5, align 1
+ %6 = load i16, i16* %5, align 1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
- %8 = load i32* %7, align 1
+ %8 = load i32, i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
@@ -464,7 +464,7 @@ entry:
%arrayidx = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i64 0
store i8 %call1, i8* %arrayidx, align 1
call void @end_large_char()
- %0 = load i32* %x, align 4
+ %0 = load i32, i32* %x, align 4
%arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i32 0
call void @takes_two(i32 %0, i8* %arraydecay)
ret void
diff --git a/llvm/test/CodeGen/X86/stack-align.ll b/llvm/test/CodeGen/X86/stack-align.ll
index 059a3566aba..0cff95f266a 100644
--- a/llvm/test/CodeGen/X86/stack-align.ll
+++ b/llvm/test/CodeGen/X86/stack-align.ll
@@ -12,11 +12,11 @@ target triple = "i686-apple-darwin8"
define void @test({ double, double }* byval %z, double* %P) nounwind {
entry:
- %tmp3 = load double* @G, align 16 ; <double> [#uses=1]
+ %tmp3 = load double, double* @G, align 16 ; <double> [#uses=1]
%tmp4 = tail call double @fabs( double %tmp3 ) readnone ; <double> [#uses=1]
store volatile double %tmp4, double* %P
%tmp = getelementptr { double, double }, { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
- %tmp1 = load volatile double* %tmp, align 8 ; <double> [#uses=1]
+ %tmp1 = load volatile double, double* %tmp, align 8 ; <double> [#uses=1]
%tmp2 = tail call double @fabs( double %tmp1 ) readnone ; <double> [#uses=1]
%tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
store volatile double %tmp6, double* %P, align 8
diff --git a/llvm/test/CodeGen/X86/stack-protector-dbginfo.ll b/llvm/test/CodeGen/X86/stack-protector-dbginfo.ll
index a84b77eac5f..e8ded42efa9 100644
--- a/llvm/test/CodeGen/X86/stack-protector-dbginfo.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-dbginfo.ll
@@ -11,7 +11,7 @@
define i32 @_Z18read_response_sizev() #0 {
entry:
tail call void @llvm.dbg.value(metadata !22, i64 0, metadata !23, metadata !{!"0x102"}), !dbg !39
- %0 = load i64* getelementptr inbounds ({ i64, [56 x i8] }* @a, i32 0, i32 0), align 8, !dbg !40
+ %0 = load i64, i64* getelementptr inbounds ({ i64, [56 x i8] }* @a, i32 0, i32 0), align 8, !dbg !40
tail call void @llvm.dbg.value(metadata i32 undef, i64 0, metadata !64, metadata !{!"0x102"}), !dbg !71
%1 = trunc i64 %0 to i32
ret i32 %1
diff --git a/llvm/test/CodeGen/X86/stack-protector-vreg-to-vreg-copy.ll b/llvm/test/CodeGen/X86/stack-protector-vreg-to-vreg-copy.ll
index 1db4a1d6841..f3f9eebb26c 100644
--- a/llvm/test/CodeGen/X86/stack-protector-vreg-to-vreg-copy.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-vreg-to-vreg-copy.ll
@@ -28,7 +28,7 @@ declare void @g(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32,
define void @do_something(i32 %i) #0 {
entry:
%data = alloca [8 x i8], align 1
- %0 = load i32* @state, align 4
+ %0 = load i32, i32* @state, align 4
%cmp = icmp eq i32 %0, 0
br i1 %cmp, label %if.then, label %if.else
diff --git a/llvm/test/CodeGen/X86/stack-protector-weight.ll b/llvm/test/CodeGen/X86/stack-protector-weight.ll
index 1976bfbdec2..4220a4c46a0 100644
--- a/llvm/test/CodeGen/X86/stack-protector-weight.ll
+++ b/llvm/test/CodeGen/X86/stack-protector-weight.ll
@@ -22,7 +22,7 @@ entry:
call void @foo2(i32* %arraydecay)
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* %a, i64 0, i64 %idxprom
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
call void @llvm.lifetime.end(i64 512, i8* %0)
ret i32 %1
}
diff --git a/llvm/test/CodeGen/X86/stack-protector.ll b/llvm/test/CodeGen/X86/stack-protector.ll
index 45984319e1d..eab5efef602 100644
--- a/llvm/test/CodeGen/X86/stack-protector.ll
+++ b/llvm/test/CodeGen/X86/stack-protector.ll
@@ -44,7 +44,7 @@ entry:
%buf = alloca [16 x i8], align 16
store i8* %a, i8** %a.addr, align 8
%arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
@@ -80,7 +80,7 @@ entry:
%buf = alloca [16 x i8], align 16
store i8* %a, i8** %a.addr, align 8
%arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
@@ -112,7 +112,7 @@ entry:
%buf = alloca [16 x i8], align 16
store i8* %a, i8** %a.addr, align 8
%arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
@@ -144,7 +144,7 @@ entry:
%buf = alloca [16 x i8], align 16
store i8* %a, i8** %a.addr, align 8
%arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
@@ -176,7 +176,7 @@ entry:
store i8* %a, i8** %a.addr, align 8
%buf = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
%arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%buf1 = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
%arraydecay2 = getelementptr inbounds [16 x i8], [16 x i8]* %buf1, i32 0, i32 0
@@ -210,7 +210,7 @@ entry:
store i8* %a, i8** %a.addr, align 8
%buf = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
%arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%buf1 = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
%arraydecay2 = getelementptr inbounds [16 x i8], [16 x i8]* %buf1, i32 0, i32 0
@@ -244,7 +244,7 @@ entry:
store i8* %a, i8** %a.addr, align 8
%buf = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
%arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%buf1 = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
%arraydecay2 = getelementptr inbounds [16 x i8], [16 x i8]* %buf1, i32 0, i32 0
@@ -278,7 +278,7 @@ entry:
store i8* %a, i8** %a.addr, align 8
%buf = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
%arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%buf1 = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
%arraydecay2 = getelementptr inbounds [16 x i8], [16 x i8]* %buf1, i32 0, i32 0
@@ -310,7 +310,7 @@ entry:
%buf = alloca [4 x i8], align 1
store i8* %a, i8** %a.addr, align 8
%arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%arraydecay1 = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
@@ -342,7 +342,7 @@ entry:
%buf = alloca [4 x i8], align 1
store i8* %a, i8** %a.addr, align 8
%arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%arraydecay1 = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
@@ -374,7 +374,7 @@ entry:
%buf = alloca [4 x i8], align 1
store i8* %a, i8** %a.addr, align 8
%arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%arraydecay1 = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
@@ -406,7 +406,7 @@ entry:
%buf = alloca [4 x i8], align 1
store i8* %a, i8** %a.addr, align 8
%arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%arraydecay1 = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
@@ -438,7 +438,7 @@ entry:
store i8* %a, i8** %a.addr, align 8
%buf = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
%arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%buf1 = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
%arraydecay2 = getelementptr inbounds [4 x i8], [4 x i8]* %buf1, i32 0, i32 0
@@ -472,7 +472,7 @@ entry:
store i8* %a, i8** %a.addr, align 8
%buf = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
%arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%buf1 = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
%arraydecay2 = getelementptr inbounds [4 x i8], [4 x i8]* %buf1, i32 0, i32 0
@@ -506,7 +506,7 @@ entry:
store i8* %a, i8** %a.addr, align 8
%buf = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
%arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%buf1 = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
%arraydecay2 = getelementptr inbounds [4 x i8], [4 x i8]* %buf1, i32 0, i32 0
@@ -540,7 +540,7 @@ entry:
store i8* %a, i8** %a.addr, align 8
%buf = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
%arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
%buf1 = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
%arraydecay2 = getelementptr inbounds [4 x i8], [4 x i8]* %buf1, i32 0, i32 0
@@ -570,7 +570,7 @@ entry:
; DARWIN-X64: .cfi_endproc
%a.addr = alloca i8*, align 8
store i8* %a, i8** %a.addr, align 8
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %0)
ret void
}
@@ -598,7 +598,7 @@ entry:
; DARWIN-X64: .cfi_endproc
%a.addr = alloca i8*, align 8
store i8* %a, i8** %a.addr, align 8
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %0)
ret void
}
@@ -626,7 +626,7 @@ entry:
; DARWIN-X64: .cfi_endproc
%a.addr = alloca i8*, align 8
store i8* %a, i8** %a.addr, align 8
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %0)
ret void
}
@@ -654,7 +654,7 @@ entry:
; DARWIN-X64: callq ___stack_chk_fail
%a.addr = alloca i8*, align 8
store i8* %a, i8** %a.addr, align 8
- %0 = load i8** %a.addr, align 8
+ %0 = load i8*, i8** %a.addr, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %0)
ret void
}
@@ -683,7 +683,7 @@ entry:
%a = alloca i32, align 4
%j = alloca i32*, align 8
store i32 0, i32* %retval
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 1
store i32 %add, i32* %a, align 4
store i32* %a, i32** %j, align 8
@@ -715,7 +715,7 @@ entry:
%a = alloca i32, align 4
%j = alloca i32*, align 8
store i32 0, i32* %retval
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 1
store i32 %add, i32* %a, align 4
store i32* %a, i32** %j, align 8
@@ -747,7 +747,7 @@ entry:
%a = alloca i32, align 4
%j = alloca i32*, align 8
store i32 0, i32* %retval
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 1
store i32 %add, i32* %a, align 4
store i32* %a, i32** %j, align 8
@@ -779,7 +779,7 @@ entry:
%a = alloca i32, align 4
%j = alloca i32*, align 8
store i32 0, i32* %retval
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%add = add nsw i32 %0, 1
store i32 %add, i32* %a, align 4
store i32* %a, i32** %j, align 8
@@ -1318,7 +1318,7 @@ entry:
%b = alloca i32*, align 8
%y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
store i32* %y, i32** %b, align 8
- %0 = load i32** %b, align 8
+ %0 = load i32*, i32** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32* %0)
ret void
}
@@ -1348,7 +1348,7 @@ entry:
%b = alloca i32*, align 8
%y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
store i32* %y, i32** %b, align 8
- %0 = load i32** %b, align 8
+ %0 = load i32*, i32** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32* %0)
ret void
}
@@ -1378,7 +1378,7 @@ entry:
%b = alloca i32*, align 8
%y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
store i32* %y, i32** %b, align 8
- %0 = load i32** %b, align 8
+ %0 = load i32*, i32** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32* %0)
ret void
}
@@ -1408,7 +1408,7 @@ entry:
%b = alloca i32*, align 8
%y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
store i32* %y, i32** %b, align 8
- %0 = load i32** %b, align 8
+ %0 = load i32*, i32** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32* %0)
ret void
}
@@ -1767,7 +1767,7 @@ entry:
store i32 0, i32* %a, align 4
%0 = bitcast i32* %a to float*
store float* %0, float** %b, align 8
- %1 = load float** %b, align 8
+ %1 = load float*, float** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), float* %1)
ret void
}
@@ -1799,7 +1799,7 @@ entry:
store i32 0, i32* %a, align 4
%0 = bitcast i32* %a to float*
store float* %0, float** %b, align 8
- %1 = load float** %b, align 8
+ %1 = load float*, float** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), float* %1)
ret void
}
@@ -1831,7 +1831,7 @@ entry:
store i32 0, i32* %a, align 4
%0 = bitcast i32* %a to float*
store float* %0, float** %b, align 8
- %1 = load float** %b, align 8
+ %1 = load float*, float** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), float* %1)
ret void
}
@@ -1863,7 +1863,7 @@ entry:
store i32 0, i32* %a, align 4
%0 = bitcast i32* %a to float*
store float* %0, float** %b, align 8
- %1 = load float** %b, align 8
+ %1 = load float*, float** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), float* %1)
ret void
}
@@ -2428,7 +2428,7 @@ entry:
%call = call i32* @getp()
store i32* %call, i32** %a, align 8
store i32** %a, i32*** %b, align 8
- %0 = load i32*** %b, align 8
+ %0 = load i32**, i32*** %b, align 8
call void @funcall2(i32** %0)
ret void
}
@@ -2459,7 +2459,7 @@ entry:
%call = call i32* @getp()
store i32* %call, i32** %a, align 8
store i32** %a, i32*** %b, align 8
- %0 = load i32*** %b, align 8
+ %0 = load i32**, i32*** %b, align 8
call void @funcall2(i32** %0)
ret void
}
@@ -2490,7 +2490,7 @@ entry:
%call = call i32* @getp()
store i32* %call, i32** %a, align 8
store i32** %a, i32*** %b, align 8
- %0 = load i32*** %b, align 8
+ %0 = load i32**, i32*** %b, align 8
call void @funcall2(i32** %0)
ret void
}
@@ -2521,7 +2521,7 @@ entry:
%call = call i32* @getp()
store i32* %call, i32** %a, align 8
store i32** %a, i32*** %b, align 8
- %0 = load i32*** %b, align 8
+ %0 = load i32**, i32*** %b, align 8
call void @funcall2(i32** %0)
ret void
}
@@ -2552,7 +2552,7 @@ entry:
store i32* %call, i32** %a, align 8
%0 = bitcast i32** %a to float**
store float** %0, float*** %b, align 8
- %1 = load float*** %b, align 8
+ %1 = load float**, float*** %b, align 8
call void @funfloat2(float** %1)
ret void
}
@@ -2584,7 +2584,7 @@ entry:
store i32* %call, i32** %a, align 8
%0 = bitcast i32** %a to float**
store float** %0, float*** %b, align 8
- %1 = load float*** %b, align 8
+ %1 = load float**, float*** %b, align 8
call void @funfloat2(float** %1)
ret void
}
@@ -2616,7 +2616,7 @@ entry:
store i32* %call, i32** %a, align 8
%0 = bitcast i32** %a to float**
store float** %0, float*** %b, align 8
- %1 = load float*** %b, align 8
+ %1 = load float**, float*** %b, align 8
call void @funfloat2(float** %1)
ret void
}
@@ -2648,7 +2648,7 @@ entry:
store i32* %call, i32** %a, align 8
%0 = bitcast i32** %a to float**
store float** %0, float*** %b, align 8
- %1 = load float*** %b, align 8
+ %1 = load float**, float*** %b, align 8
call void @funfloat2(float** %1)
ret void
}
@@ -2676,7 +2676,7 @@ entry:
%a = alloca %class.A, align 1
%array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
%arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
ret i8 %0
}
@@ -2704,7 +2704,7 @@ entry:
%a = alloca %class.A, align 1
%array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
%arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
ret i8 %0
}
@@ -2732,7 +2732,7 @@ entry:
%a = alloca %class.A, align 1
%array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
%arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
ret i8 %0
}
@@ -2760,7 +2760,7 @@ entry:
%a = alloca %class.A, align 1
%array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
%arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
ret i8 %0
}
@@ -2791,7 +2791,7 @@ entry:
%e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
%array = bitcast %union.anon.1* %e to [2 x i8]*
%arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
ret i8 %0
}
@@ -2823,7 +2823,7 @@ entry:
%e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
%array = bitcast %union.anon.1* %e to [2 x i8]*
%arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
ret i8 %0
}
@@ -2855,7 +2855,7 @@ entry:
%e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
%array = bitcast %union.anon.1* %e to [2 x i8]*
%arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
ret i8 %0
}
@@ -2887,7 +2887,7 @@ entry:
%e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
%array = bitcast %union.anon.1* %e to [2 x i8]*
%arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
ret i8 %0
}
@@ -2914,7 +2914,7 @@ entry:
%n.addr = alloca i32, align 4
%a = alloca i32*, align 8
store i32 %n, i32* %n.addr, align 4
- %0 = load i32* %n.addr, align 4
+ %0 = load i32, i32* %n.addr, align 4
%conv = sext i32 %0 to i64
%1 = alloca i8, i64 %conv
%2 = bitcast i8* %1 to i32*
@@ -2946,7 +2946,7 @@ entry:
%n.addr = alloca i32, align 4
%a = alloca i32*, align 8
store i32 %n, i32* %n.addr, align 4
- %0 = load i32* %n.addr, align 4
+ %0 = load i32, i32* %n.addr, align 4
%conv = sext i32 %0 to i64
%1 = alloca i8, i64 %conv
%2 = bitcast i8* %1 to i32*
@@ -2978,7 +2978,7 @@ entry:
%n.addr = alloca i32, align 4
%a = alloca i32*, align 8
store i32 %n, i32* %n.addr, align 4
- %0 = load i32* %n.addr, align 4
+ %0 = load i32, i32* %n.addr, align 4
%conv = sext i32 %0 to i64
%1 = alloca i8, i64 %conv
%2 = bitcast i8* %1 to i32*
@@ -3010,7 +3010,7 @@ entry:
%n.addr = alloca i32, align 4
%a = alloca i32*, align 8
store i32 %n, i32* %n.addr, align 4
- %0 = load i32* %n.addr, align 4
+ %0 = load i32, i32* %n.addr, align 4
%conv = sext i32 %0 to i64
%1 = alloca i8, i64 %conv
%2 = bitcast i8* %1 to i32*
@@ -3040,7 +3040,7 @@ entry:
; DARWIN-X64: .cfi_endproc
%a = alloca [4 x i32], align 16
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i64 0
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
}
@@ -3067,7 +3067,7 @@ entry:
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca [4 x i32], align 16
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i64 0
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
}
@@ -3094,7 +3094,7 @@ entry:
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca [4 x i32], align 16
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i64 0
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
}
@@ -3121,7 +3121,7 @@ entry:
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca [4 x i32], align 16
%arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i64 0
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
ret i32 %0
}
@@ -3151,7 +3151,7 @@ entry:
%c = alloca %struct.nest, align 4
%b = getelementptr inbounds %struct.nest, %struct.nest* %c, i32 0, i32 1
%_a = getelementptr inbounds %struct.pair, %struct.pair* %b, i32 0, i32 0
- %0 = load i32* %_a, align 4
+ %0 = load i32, i32* %_a, align 4
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %0)
ret void
}
@@ -3182,7 +3182,7 @@ bb:
; DARWIN-X64: callq ___stack_chk_fail
%tmp = alloca %struct.small*, align 8
%tmp1 = call i32 (...)* @dummy(%struct.small** %tmp)
- %tmp2 = load %struct.small** %tmp, align 8
+ %tmp2 = load %struct.small*, %struct.small** %tmp, align 8
%tmp3 = ptrtoint %struct.small* %tmp2 to i64
%tmp4 = trunc i64 %tmp3 to i32
%tmp5 = icmp sgt i32 %tmp4, 0
@@ -3193,7 +3193,7 @@ bb6: ; preds = %bb17, %bb
%tmp8 = phi i64 [ %tmp20, %bb17 ], [ 1, %bb ]
%tmp9 = phi i32 [ %tmp14, %bb17 ], [ %tmp1, %bb ]
%tmp10 = getelementptr inbounds %struct.small, %struct.small* %tmp7, i64 0, i32 0
- %tmp11 = load i8* %tmp10, align 1
+ %tmp11 = load i8, i8* %tmp10, align 1
%tmp12 = icmp eq i8 %tmp11, 1
%tmp13 = add nsw i32 %tmp9, 8
%tmp14 = select i1 %tmp12, i32 %tmp13, i32 %tmp9
@@ -3203,7 +3203,7 @@ bb6: ; preds = %bb17, %bb
bb17: ; preds = %bb6
%tmp18 = getelementptr inbounds %struct.small*, %struct.small** %tmp, i64 %tmp8
- %tmp19 = load %struct.small** %tmp18, align 8
+ %tmp19 = load %struct.small*, %struct.small** %tmp18, align 8
%tmp20 = add i64 %tmp8, 1
br label %bb6
@@ -3344,9 +3344,9 @@ entry:
%1 = bitcast %struct.small_char* %test to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false)
%2 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 0
- %3 = load i64* %2, align 1
+ %3 = load i64, i64* %2, align 1
%4 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 1
- %5 = load i8* %4, align 1
+ %5 = load i8, i8* %4, align 1
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %3, i8 %5)
ret i32 %call
}
@@ -3378,9 +3378,9 @@ entry:
%1 = bitcast %struct.small_char* %test to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false)
%2 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 0
- %3 = load i64* %2, align 1
+ %3 = load i64, i64* %2, align 1
%4 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 1
- %5 = load i8* %4, align 1
+ %5 = load i8, i8* %4, align 1
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %3, i8 %5)
ret i32 %call
}
@@ -3409,7 +3409,7 @@ entry:
%test = alloca i8*, align 8
%0 = alloca i8, i64 4
store i8* %0, i8** %test, align 8
- %1 = load i8** %test, align 8
+ %1 = load i8*, i8** %test, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %1)
ret i32 %call
}
@@ -3437,7 +3437,7 @@ entry:
%test = alloca i8*, align 8
%0 = alloca i8, i64 5
store i8* %0, i8** %test, align 8
- %1 = load i8** %test, align 8
+ %1 = load i8*, i8** %test, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %1)
ret i32 %call
}
diff --git a/llvm/test/CodeGen/X86/stackmap.ll b/llvm/test/CodeGen/X86/stackmap.ll
index 5e356f3e03d..fc958ec8adf 100644
--- a/llvm/test/CodeGen/X86/stackmap.ll
+++ b/llvm/test/CodeGen/X86/stackmap.ll
@@ -321,7 +321,7 @@ bb1:
unreachable
bb2:
- %tmp = load i64* inttoptr (i64 140685446136880 to i64*)
+ %tmp = load i64, i64* inttoptr (i64 140685446136880 to i64*)
br i1 undef, label %bb16, label %bb17
bb16:
diff --git a/llvm/test/CodeGen/X86/statepoint-forward.ll b/llvm/test/CodeGen/X86/statepoint-forward.ll
index 12a6ac2c72a..5a1b18af103 100644
--- a/llvm/test/CodeGen/X86/statepoint-forward.ll
+++ b/llvm/test/CodeGen/X86/statepoint-forward.ll
@@ -22,12 +22,12 @@ declare void @func() readonly
;; be valid, but is not currently implemented.
define i1 @test_load_forward(i32 addrspace(1)* addrspace(1)* %p) gc "statepoint-example" {
entry:
- %before = load i32 addrspace(1)* addrspace(1)* %p
+ %before = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %p
%cmp1 = call i1 @f(i32 addrspace(1)* %before)
call void @llvm.assume(i1 %cmp1)
%safepoint_token = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @func, i32 0, i32 0, i32 0, i32 addrspace(1)* addrspace(1)* %p)
%pnew = call i32 addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1i32(i32 %safepoint_token, i32 4, i32 4)
- %after = load i32 addrspace(1)* addrspace(1)* %pnew
+ %after = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %pnew
%cmp2 = call i1 @f(i32 addrspace(1)* %after)
ret i1 %cmp2
@@ -46,7 +46,7 @@ entry:
store i32 addrspace(1)* %v, i32 addrspace(1)* addrspace(1)* %p
%safepoint_token = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @func, i32 0, i32 0, i32 0, i32 addrspace(1)* addrspace(1)* %p)
%pnew = call i32 addrspace(1)* addrspace(1)* @llvm.experimental.gc.relocate.p1p1i32(i32 %safepoint_token, i32 4, i32 4)
- %after = load i32 addrspace(1)* addrspace(1)* %pnew
+ %after = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %pnew
%cmp2 = call i1 @f(i32 addrspace(1)* %after)
ret i1 %cmp2
@@ -69,11 +69,11 @@ declare i1 @f(i32 addrspace(1)* %v) readnone
; statepoint does not provide the collector with this root.
define i1 @test_load_forward_nongc_heap(i32 addrspace(1)** %p) gc "statepoint-example" {
entry:
- %before = load i32 addrspace(1)** %p
+ %before = load i32 addrspace(1)*, i32 addrspace(1)** %p
%cmp1 = call i1 @f(i32 addrspace(1)* %before)
call void @llvm.assume(i1 %cmp1)
call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @func, i32 0, i32 0, i32 0)
- %after = load i32 addrspace(1)** %p
+ %after = load i32 addrspace(1)*, i32 addrspace(1)** %p
%cmp2 = call i1 @f(i32 addrspace(1)* %after)
ret i1 %cmp2
@@ -91,7 +91,7 @@ entry:
call void @llvm.assume(i1 %cmp1)
store i32 addrspace(1)* %v, i32 addrspace(1)** %p
call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @func, i32 0, i32 0, i32 0)
- %after = load i32 addrspace(1)** %p
+ %after = load i32 addrspace(1)*, i32 addrspace(1)** %p
%cmp2 = call i1 @f(i32 addrspace(1)* %after)
ret i1 %cmp2
diff --git a/llvm/test/CodeGen/X86/store-narrow.ll b/llvm/test/CodeGen/X86/store-narrow.ll
index e3cc2fa668e..6c1c56e43a4 100644
--- a/llvm/test/CodeGen/X86/store-narrow.ll
+++ b/llvm/test/CodeGen/X86/store-narrow.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-apple-darwin10.2"
define void @test1(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
entry:
- %A = load i32* %a0, align 4
+ %A = load i32, i32* %a0, align 4
%B = and i32 %A, -256 ; 0xFFFFFF00
%C = zext i8 %a1 to i32
%D = or i32 %C, %B
@@ -23,7 +23,7 @@ entry:
define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
entry:
- %A = load i32* %a0, align 4
+ %A = load i32, i32* %a0, align 4
%B = and i32 %A, -65281 ; 0xFFFF00FF
%C = zext i8 %a1 to i32
%CS = shl i32 %C, 8
@@ -40,7 +40,7 @@ entry:
define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
entry:
- %A = load i32* %a0, align 4
+ %A = load i32, i32* %a0, align 4
%B = and i32 %A, -65536 ; 0xFFFF0000
%C = zext i16 %a1 to i32
%D = or i32 %B, %C
@@ -56,7 +56,7 @@ entry:
define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
entry:
- %A = load i32* %a0, align 4
+ %A = load i32, i32* %a0, align 4
%B = and i32 %A, 65535 ; 0x0000FFFF
%C = zext i16 %a1 to i32
%CS = shl i32 %C, 16
@@ -73,7 +73,7 @@ entry:
define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp {
entry:
- %A = load i64* %a0, align 4
+ %A = load i64, i64* %a0, align 4
%B = and i64 %A, -4294901761 ; 0xFFFFFFFF0000FFFF
%C = zext i16 %a1 to i64
%CS = shl i64 %C, 16
@@ -90,7 +90,7 @@ entry:
define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp {
entry:
- %A = load i64* %a0, align 4
+ %A = load i64, i64* %a0, align 4
%B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF
%C = zext i8 %a1 to i64
%CS = shl i64 %C, 40
@@ -108,8 +108,8 @@ entry:
define i32 @test7(i64* nocapture %a0, i8 zeroext %a1, i32* %P2) nounwind {
entry:
- %OtherLoad = load i32 *%P2
- %A = load i64* %a0, align 4
+ %OtherLoad = load i32 , i32 *%P2
+ %A = load i64, i64* %a0, align 4
%B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF
%C = zext i8 %a1 to i64
%CS = shl i64 %C, 40
@@ -136,7 +136,7 @@ entry:
; X64-NEXT: movl %eax, _g_16(%rip)
; X64-NEXT: ret
define void @test8() nounwind {
- %tmp = load i32* @g_16
+ %tmp = load i32, i32* @g_16
store i32 0, i32* @g_16
%or = or i32 %tmp, 1
store i32 %or, i32* @g_16
@@ -147,7 +147,7 @@ define void @test8() nounwind {
; X64-NEXT: orb $1, _g_16(%rip)
; X64-NEXT: ret
define void @test9() nounwind {
- %tmp = load i32* @g_16
+ %tmp = load i32, i32* @g_16
%or = or i32 %tmp, 1
store i32 %or, i32* @g_16
ret void
@@ -160,7 +160,7 @@ define void @test9() nounwind {
; X64-NEXT: ret
define i8 @test10(i8* %P) nounwind ssp {
entry:
- %tmp = load i8* %P, align 1
+ %tmp = load i8, i8* %P, align 1
%conv = sext i8 %tmp to i32
%shr3 = lshr i32 %conv, 8
%conv2 = trunc i32 %shr3 to i8
diff --git a/llvm/test/CodeGen/X86/store_op_load_fold.ll b/llvm/test/CodeGen/X86/store_op_load_fold.ll
index bbeb7443c07..49fef939601 100644
--- a/llvm/test/CodeGen/X86/store_op_load_fold.ll
+++ b/llvm/test/CodeGen/X86/store_op_load_fold.ll
@@ -9,7 +9,7 @@ define void @foo() nounwind {
; CHECK-NOT: mov
; CHECK: add
; CHECK-NEXT: ret
- %tmp.0 = load i16* @X ; <i16> [#uses=1]
+ %tmp.0 = load i16, i16* @X ; <i16> [#uses=1]
%tmp.3 = add i16 %tmp.0, 329 ; <i16> [#uses=1]
store i16 %tmp.3, i16* @X
ret void
@@ -23,7 +23,7 @@ define void @test2() nounwind uwtable ssp {
; CHECK: mov
; CHECK-NEXT: and
; CHECK-NEXT: ret
- %bf.load35 = load i56* bitcast ([7 x i8]* getelementptr inbounds (%struct.S2* @s2, i32 0, i32 5) to i56*), align 16
+ %bf.load35 = load i56, i56* bitcast ([7 x i8]* getelementptr inbounds (%struct.S2* @s2, i32 0, i32 5) to i56*), align 16
%bf.clear36 = and i56 %bf.load35, -1125895611875329
store i56 %bf.clear36, i56* bitcast ([7 x i8]* getelementptr inbounds (%struct.S2* @s2, i32 0, i32 5) to i56*), align 16
ret void
diff --git a/llvm/test/CodeGen/X86/store_op_load_fold2.ll b/llvm/test/CodeGen/X86/store_op_load_fold2.ll
index 9be94f999ba..f47d87f4bb8 100644
--- a/llvm/test/CodeGen/X86/store_op_load_fold2.ll
+++ b/llvm/test/CodeGen/X86/store_op_load_fold2.ll
@@ -8,8 +8,8 @@ define internal fastcc i32 @dct_chroma(i32 %uv, i32 %cr_cbp) nounwind {
cond_true2732.preheader: ; preds = %entry
%tmp2666 = getelementptr %struct.Macroblock, %struct.Macroblock* null, i32 0, i32 13 ; <i64*> [#uses=2]
%tmp2674 = trunc i32 0 to i8 ; <i8> [#uses=1]
- %tmp2667.us.us = load i64* %tmp2666 ; <i64> [#uses=1]
- %tmp2670.us.us = load i64* null ; <i64> [#uses=1]
+ %tmp2667.us.us = load i64, i64* %tmp2666 ; <i64> [#uses=1]
+ %tmp2670.us.us = load i64, i64* null ; <i64> [#uses=1]
%shift.upgrd.1 = zext i8 %tmp2674 to i64 ; <i64> [#uses=1]
%tmp2675.us.us = shl i64 %tmp2670.us.us, %shift.upgrd.1 ; <i64> [#uses=1]
%tmp2675not.us.us = xor i64 %tmp2675.us.us, -1 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll b/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll
index 0190f1dc98e..551bd7c2541 100644
--- a/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll
+++ b/llvm/test/CodeGen/X86/stride-nine-with-base-reg.ll
@@ -18,7 +18,7 @@ entry:
bb:
%i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
%tmp2 = getelementptr [1000 x i8], [1000 x i8]* @B, i32 0, i32 %i.019.0
- %tmp3 = load i8* %tmp2, align 4
+ %tmp3 = load i8, i8* %tmp2, align 4
%tmp4 = mul i8 %tmp3, 2
%tmp5 = getelementptr [1000 x i8], [1000 x i8]* @A, i32 0, i32 %i.019.0
store i8 %tmp4, i8* %tmp5, align 4
diff --git a/llvm/test/CodeGen/X86/stride-reuse.ll b/llvm/test/CodeGen/X86/stride-reuse.ll
index c4409e0c91f..af036f3a8f1 100644
--- a/llvm/test/CodeGen/X86/stride-reuse.ll
+++ b/llvm/test/CodeGen/X86/stride-reuse.ll
@@ -14,7 +14,7 @@ entry:
bb:
%i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
%tmp2 = getelementptr [1000 x float], [1000 x float]* @B, i32 0, i32 %i.019.0
- %tmp3 = load float* %tmp2, align 4
+ %tmp3 = load float, float* %tmp2, align 4
%tmp4 = fmul float %tmp3, 2.000000e+00
%tmp5 = getelementptr [1000 x float], [1000 x float]* @A, i32 0, i32 %i.019.0
store float %tmp4, float* %tmp5, align 4
diff --git a/llvm/test/CodeGen/X86/subreg-to-reg-0.ll b/llvm/test/CodeGen/X86/subreg-to-reg-0.ll
index d718c85a1d1..251a754f438 100644
--- a/llvm/test/CodeGen/X86/subreg-to-reg-0.ll
+++ b/llvm/test/CodeGen/X86/subreg-to-reg-0.ll
@@ -4,7 +4,7 @@
; x86-64's implicit zero-extension!
define i64 @foo(i32* %p) nounwind {
- %t = load i32* %p
+ %t = load i32, i32* %p
%n = add i32 %t, 1
%z = zext i32 %n to i64
ret i64 %z
diff --git a/llvm/test/CodeGen/X86/subreg-to-reg-2.ll b/llvm/test/CodeGen/X86/subreg-to-reg-2.ll
index 5b71c5322dc..6766b013a36 100644
--- a/llvm/test/CodeGen/X86/subreg-to-reg-2.ll
+++ b/llvm/test/CodeGen/X86/subreg-to-reg-2.ll
@@ -10,8 +10,8 @@
define internal fastcc %XXValue* @t(i64* %out, %"struct.XXC::ArrayStorage"* %tmp9) nounwind {
prologue:
- %array = load %XXValue** inttoptr (i64 11111111 to %XXValue**) ; <%XXValue*> [#uses=0]
- %index = load %XXValue** inttoptr (i64 22222222 to %XXValue**) ; <%XXValue*> [#uses=1]
+ %array = load %XXValue*, %XXValue** inttoptr (i64 11111111 to %XXValue**) ; <%XXValue*> [#uses=0]
+ %index = load %XXValue*, %XXValue** inttoptr (i64 22222222 to %XXValue**) ; <%XXValue*> [#uses=1]
%tmp = ptrtoint %XXValue* %index to i64 ; <i64> [#uses=2]
store i64 %tmp, i64* %out
%tmp6 = trunc i64 %tmp to i32 ; <i32> [#uses=1]
@@ -20,6 +20,6 @@ prologue:
bb5: ; preds = %prologue
%tmp10 = zext i32 %tmp6 to i64 ; <i64> [#uses=1]
%tmp11 = getelementptr %"struct.XXC::ArrayStorage", %"struct.XXC::ArrayStorage"* %tmp9, i64 0, i32 5, i64 %tmp10 ; <%XXValue**> [#uses=1]
- %tmp12 = load %XXValue** %tmp11, align 8 ; <%XXValue*> [#uses=1]
+ %tmp12 = load %XXValue*, %XXValue** %tmp11, align 8 ; <%XXValue*> [#uses=1]
ret %XXValue* %tmp12
}
diff --git a/llvm/test/CodeGen/X86/subreg-to-reg-4.ll b/llvm/test/CodeGen/X86/subreg-to-reg-4.ll
index 0693789fe5d..8340fc53614 100644
--- a/llvm/test/CodeGen/X86/subreg-to-reg-4.ll
+++ b/llvm/test/CodeGen/X86/subreg-to-reg-4.ll
@@ -28,7 +28,7 @@ entry:
}
define void @cola(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
entry:
- %p = load i64* %x
+ %p = load i64, i64* %x
%t0 = add i64 %p, %y
%t1 = and i64 %t0, 4294967295
%t2 = xor i64 %t1, %u
@@ -37,7 +37,7 @@ entry:
}
define void @yaks(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
entry:
- %p = load i64* %x
+ %p = load i64, i64* %x
%t0 = add i64 %p, %y
%t1 = xor i64 %t0, %u
%t2 = and i64 %t1, 4294967295
@@ -46,8 +46,8 @@ entry:
}
define void @foo(i64 *%x, i64 *%y, i64* %z) nounwind readnone {
entry:
- %a = load i64* %x
- %b = load i64* %y
+ %a = load i64, i64* %x
+ %b = load i64, i64* %y
%t0 = add i64 %a, %b
%t1 = and i64 %t0, 4294967295
store i64 %t1, i64* %z
@@ -94,7 +94,7 @@ entry:
}
define void @scola(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
entry:
- %p = load i64* %x
+ %p = load i64, i64* %x
%t0 = sub i64 %p, %y
%t1 = and i64 %t0, 4294967295
%t2 = xor i64 %t1, %u
@@ -103,7 +103,7 @@ entry:
}
define void @syaks(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
entry:
- %p = load i64* %x
+ %p = load i64, i64* %x
%t0 = sub i64 %p, %y
%t1 = xor i64 %t0, %u
%t2 = and i64 %t1, 4294967295
@@ -112,8 +112,8 @@ entry:
}
define void @sfoo(i64 *%x, i64 *%y, i64* %z) nounwind readnone {
entry:
- %a = load i64* %x
- %b = load i64* %y
+ %a = load i64, i64* %x
+ %b = load i64, i64* %y
%t0 = sub i64 %a, %b
%t1 = and i64 %t0, 4294967295
store i64 %t1, i64* %z
diff --git a/llvm/test/CodeGen/X86/subreg-to-reg-6.ll b/llvm/test/CodeGen/X86/subreg-to-reg-6.ll
index 76430cd783e..bef09fa944e 100644
--- a/llvm/test/CodeGen/X86/subreg-to-reg-6.ll
+++ b/llvm/test/CodeGen/X86/subreg-to-reg-6.ll
@@ -2,7 +2,7 @@
define i64 @foo() nounwind {
entry:
- %t0 = load i32* null, align 8
+ %t0 = load i32, i32* null, align 8
switch i32 %t0, label %bb65 [
i32 16, label %bb
i32 12, label %bb56
diff --git a/llvm/test/CodeGen/X86/switch-bt.ll b/llvm/test/CodeGen/X86/switch-bt.ll
index 065d8cd669b..113782169b0 100644
--- a/llvm/test/CodeGen/X86/switch-bt.ll
+++ b/llvm/test/CodeGen/X86/switch-bt.ll
@@ -16,8 +16,8 @@ define void @test(i8* %l) nounwind {
entry:
%l.addr = alloca i8*, align 8 ; <i8**> [#uses=2]
store i8* %l, i8** %l.addr
- %tmp = load i8** %l.addr ; <i8*> [#uses=1]
- %tmp1 = load i8* %tmp ; <i8> [#uses=1]
+ %tmp = load i8*, i8** %l.addr ; <i8*> [#uses=1]
+ %tmp1 = load i8, i8* %tmp ; <i8> [#uses=1]
%conv = sext i8 %tmp1 to i32 ; <i32> [#uses=1]
switch i32 %conv, label %sw.default [
i32 62, label %sw.bb
diff --git a/llvm/test/CodeGen/X86/switch-zextload.ll b/llvm/test/CodeGen/X86/switch-zextload.ll
index 55425bc7da5..2dd3f0e3ae7 100644
--- a/llvm/test/CodeGen/X86/switch-zextload.ll
+++ b/llvm/test/CodeGen/X86/switch-zextload.ll
@@ -9,7 +9,7 @@ target triple = "i386-apple-darwin9.6"
define fastcc void @set_proof_and_disproof_numbers(%struct.node_t* nocapture %node) nounwind {
entry:
- %0 = load i8* null, align 1 ; <i8> [#uses=1]
+ %0 = load i8, i8* null, align 1 ; <i8> [#uses=1]
switch i8 %0, label %return [
i8 2, label %bb31
i8 0, label %bb80
diff --git a/llvm/test/CodeGen/X86/tail-call-win64.ll b/llvm/test/CodeGen/X86/tail-call-win64.ll
index 23e9280e772..8811b75939a 100644
--- a/llvm/test/CodeGen/X86/tail-call-win64.ll
+++ b/llvm/test/CodeGen/X86/tail-call-win64.ll
@@ -27,7 +27,7 @@ define void @tail_jmp_imm() {
@g_fptr = global void ()* @tail_tgt
define void @tail_jmp_mem() {
- %fptr = load void ()** @g_fptr
+ %fptr = load void ()*, void ()** @g_fptr
tail call void ()* %fptr()
ret void
}
diff --git a/llvm/test/CodeGen/X86/tail-dup-addr.ll b/llvm/test/CodeGen/X86/tail-dup-addr.ll
index c68a8c6bf84..3e5c8c8dbc3 100644
--- a/llvm/test/CodeGen/X86/tail-dup-addr.ll
+++ b/llvm/test/CodeGen/X86/tail-dup-addr.ll
@@ -10,7 +10,7 @@
define void @foo() noreturn nounwind uwtable ssp {
entry:
- %tmp = load i32* @a, align 4
+ %tmp = load i32, i32* @a, align 4
%foo = icmp eq i32 0, %tmp
br i1 %foo, label %sw.bb, label %sw.default
diff --git a/llvm/test/CodeGen/X86/tail-opts.ll b/llvm/test/CodeGen/X86/tail-opts.ll
index 4a7f5fe02e8..f590176d981 100644
--- a/llvm/test/CodeGen/X86/tail-opts.ll
+++ b/llvm/test/CodeGen/X86/tail-opts.ll
@@ -127,11 +127,11 @@ altret:
define i1 @dont_merge_oddly(float* %result) nounwind {
entry:
%tmp4 = getelementptr float, float* %result, i32 2
- %tmp5 = load float* %tmp4, align 4
+ %tmp5 = load float, float* %tmp4, align 4
%tmp7 = getelementptr float, float* %result, i32 4
- %tmp8 = load float* %tmp7, align 4
+ %tmp8 = load float, float* %tmp7, align 4
%tmp10 = getelementptr float, float* %result, i32 6
- %tmp11 = load float* %tmp10, align 4
+ %tmp11 = load float, float* %tmp10, align 4
%tmp12 = fcmp olt float %tmp8, %tmp11
br i1 %tmp12, label %bb, label %bb21
@@ -179,7 +179,7 @@ bb30:
define fastcc void @c_expand_expr_stmt(%union.tree_node* %expr) nounwind {
entry:
- %tmp4 = load i8* null, align 8 ; <i8> [#uses=3]
+ %tmp4 = load i8, i8* null, align 8 ; <i8> [#uses=3]
switch i8 %tmp4, label %bb3 [
i8 18, label %bb
]
@@ -199,9 +199,9 @@ bb2.i: ; preds = %bb
br label %bb3
lvalue_p.exit: ; preds = %bb.i
- %tmp21 = load %union.tree_node** null, align 8 ; <%union.tree_node*> [#uses=3]
+ %tmp21 = load %union.tree_node*, %union.tree_node** null, align 8 ; <%union.tree_node*> [#uses=3]
%tmp22 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 0 ; <i8*> [#uses=1]
- %tmp23 = load i8* %tmp22, align 8 ; <i8> [#uses=1]
+ %tmp23 = load i8, i8* %tmp22, align 8 ; <i8> [#uses=1]
%tmp24 = zext i8 %tmp23 to i32 ; <i32> [#uses=1]
switch i32 %tmp24, label %lvalue_p.exit4 [
i32 0, label %bb2.i3
@@ -211,9 +211,9 @@ lvalue_p.exit: ; preds = %bb.i
bb.i1: ; preds = %lvalue_p.exit
%tmp25 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 2 ; <i32*> [#uses=1]
%tmp26 = bitcast i32* %tmp25 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
- %tmp27 = load %union.tree_node** %tmp26, align 8 ; <%union.tree_node*> [#uses=2]
+ %tmp27 = load %union.tree_node*, %union.tree_node** %tmp26, align 8 ; <%union.tree_node*> [#uses=2]
%tmp28 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
- %tmp29 = load i8* %tmp28, align 8 ; <i8> [#uses=1]
+ %tmp29 = load i8, i8* %tmp28, align 8 ; <i8> [#uses=1]
%tmp30 = zext i8 %tmp29 to i32 ; <i32> [#uses=1]
switch i32 %tmp30, label %lvalue_p.exit4 [
i32 0, label %bb2.i.i2
@@ -228,9 +228,9 @@ bb.i.i: ; preds = %bb.i1
bb2.i.i2: ; preds = %bb.i1
%tmp35 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
%tmp36 = bitcast i8* %tmp35 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
- %tmp37 = load %union.tree_node** %tmp36, align 8 ; <%union.tree_node*> [#uses=1]
+ %tmp37 = load %union.tree_node*, %union.tree_node** %tmp36, align 8 ; <%union.tree_node*> [#uses=1]
%tmp38 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp37, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
- %tmp39 = load i8* %tmp38, align 8 ; <i8> [#uses=1]
+ %tmp39 = load i8, i8* %tmp38, align 8 ; <i8> [#uses=1]
switch i8 %tmp39, label %bb2 [
i8 16, label %lvalue_p.exit4
i8 23, label %lvalue_p.exit4
@@ -239,9 +239,9 @@ bb2.i.i2: ; preds = %bb.i1
bb2.i3: ; preds = %lvalue_p.exit
%tmp40 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
%tmp41 = bitcast i8* %tmp40 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
- %tmp42 = load %union.tree_node** %tmp41, align 8 ; <%union.tree_node*> [#uses=1]
+ %tmp42 = load %union.tree_node*, %union.tree_node** %tmp41, align 8 ; <%union.tree_node*> [#uses=1]
%tmp43 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp42, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
- %tmp44 = load i8* %tmp43, align 8 ; <i8> [#uses=1]
+ %tmp44 = load i8, i8* %tmp43, align 8 ; <i8> [#uses=1]
switch i8 %tmp44, label %bb2 [
i8 16, label %lvalue_p.exit4
i8 23, label %lvalue_p.exit4
diff --git a/llvm/test/CodeGen/X86/tailcall-64.ll b/llvm/test/CodeGen/X86/tailcall-64.ll
index 962854a3bc4..f4d51c2eea7 100644
--- a/llvm/test/CodeGen/X86/tailcall-64.ll
+++ b/llvm/test/CodeGen/X86/tailcall-64.ll
@@ -188,7 +188,7 @@ define { i64, i64 } @crash(i8* %this) {
define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp {
entry:
%dsplen = getelementptr inbounds [0 x %struct.funcs], [0 x %struct.funcs]* @func_table, i64 0, i64 %idxprom, i32 2
- %x1 = load i32 (i8*)** %dsplen, align 8
+ %x1 = load i32 (i8*)*, i32 (i8*)** %dsplen, align 8
%call = tail call i32 %x1(i8* %mbstr) nounwind
ret void
}
@@ -214,7 +214,7 @@ define i32 @rdar12282281(i32 %n) nounwind uwtable ssp {
entry:
%idxprom = sext i32 %n to i64
%arrayidx = getelementptr inbounds [0 x i32 (i8*, ...)*], [0 x i32 (i8*, ...)*]* @funcs, i64 0, i64 %idxprom
- %0 = load i32 (i8*, ...)** %arrayidx, align 8
+ %0 = load i32 (i8*, ...)*, i32 (i8*, ...)** %arrayidx, align 8
%call = tail call i32 (i8*, ...)* %0(i8* null, i32 0, i32 0, i32 0, i32 0, i32 0) nounwind
ret i32 %call
}
diff --git a/llvm/test/CodeGen/X86/tailcall-returndup-void.ll b/llvm/test/CodeGen/X86/tailcall-returndup-void.ll
index 6be0e6c09e7..d9406eccc10 100644
--- a/llvm/test/CodeGen/X86/tailcall-returndup-void.ll
+++ b/llvm/test/CodeGen/X86/tailcall-returndup-void.ll
@@ -16,7 +16,7 @@ n26p: ; preds = %c263
br i1 icmp ne (i64 and (i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 7), i64 0), label %c1ZP.i, label %n1ZQ.i
n1ZQ.i: ; preds = %n26p
- %ln1ZT.i = load i64* getelementptr inbounds ([0 x i64]* @sES_closure, i64 0, i64 0), align 8
+ %ln1ZT.i = load i64, i64* getelementptr inbounds ([0 x i64]* @sES_closure, i64 0, i64 0), align 8
%ln1ZU.i = inttoptr i64 %ln1ZT.i to void (i64*, i64*, i64*, i64, i64, i64)*
tail call ghccc void %ln1ZU.i(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 %R3_Arg) nounwind
br label %rBL_info.exit
@@ -30,7 +30,7 @@ rBL_info.exit: ; preds = %c1ZP.i, %n1ZQ.i
c26a: ; preds = %c263
%ln27h = getelementptr inbounds i64, i64* %Base_Arg, i64 -2
- %ln27j = load i64* %ln27h, align 8
+ %ln27j = load i64, i64* %ln27h, align 8
%ln27k = inttoptr i64 %ln27j to void (i64*, i64*, i64*, i64, i64, i64)*
tail call ghccc void %ln27k(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind
ret void
diff --git a/llvm/test/CodeGen/X86/tailcall-ri64.ll b/llvm/test/CodeGen/X86/tailcall-ri64.ll
index 4cdf2d89767..443d48868e5 100644
--- a/llvm/test/CodeGen/X86/tailcall-ri64.ll
+++ b/llvm/test/CodeGen/X86/tailcall-ri64.ll
@@ -16,9 +16,9 @@ define %vt* @_ZN4llvm9UnsetInit20convertInitializerToEPNS_5RecTyE(%class*
%this, %vt* %Ty) align 2 {
entry:
%0 = bitcast %vt* %Ty to %vt* (%vt*, %class*)***
- %vtable = load %vt* (%vt*, %class*)*** %0, align 8
+ %vtable = load %vt* (%vt*, %class*)**, %vt* (%vt*, %class*)*** %0, align 8
%vfn = getelementptr inbounds %vt* (%vt*, %class*)*, %vt* (%vt*, %class*)** %vtable, i64 4
- %1 = load %vt* (%vt*, %class*)** %vfn, align 8
+ %1 = load %vt* (%vt*, %class*)*, %vt* (%vt*, %class*)** %vfn, align 8
%call = tail call %vt* %1(%vt* %Ty, %class* %this)
ret %vt* %call
}
diff --git a/llvm/test/CodeGen/X86/tailcallbyval.ll b/llvm/test/CodeGen/X86/tailcallbyval.ll
index d3a740cee52..8a0113a645a 100644
--- a/llvm/test/CodeGen/X86/tailcallbyval.ll
+++ b/llvm/test/CodeGen/X86/tailcallbyval.ll
@@ -6,7 +6,7 @@
define fastcc i32 @tailcallee(%struct.s* byval %a) nounwind {
entry:
%tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
- %tmp3 = load i32* %tmp2
+ %tmp3 = load i32, i32* %tmp2
ret i32 %tmp3
; CHECK: tailcallee
; CHECK: movl 4(%esp), %eax
diff --git a/llvm/test/CodeGen/X86/tailcallbyval64.ll b/llvm/test/CodeGen/X86/tailcallbyval64.ll
index db912ca5429..9df1470c67f 100644
--- a/llvm/test/CodeGen/X86/tailcallbyval64.ll
+++ b/llvm/test/CodeGen/X86/tailcallbyval64.ll
@@ -36,7 +36,7 @@ declare fastcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %v
define fastcc i64 @tailcaller(i64 %b, %struct.s* byval %a) {
entry:
%tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 1
- %tmp3 = load i64* %tmp2, align 8
+ %tmp3 = load i64, i64* %tmp2, align 8
%tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
ret i64 %tmp4
}
diff --git a/llvm/test/CodeGen/X86/tbm-intrinsics-x86_64.ll b/llvm/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
index 1beee72dfd0..12218cc8ec4 100644
--- a/llvm/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
+++ b/llvm/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
@@ -16,7 +16,7 @@ entry:
; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
; CHECK-NOT: mov
; CHECK: bextr $
- %tmp1 = load i32* %a, align 4
+ %tmp1 = load i32, i32* %a, align 4
%0 = tail call i32 @llvm.x86.tbm.bextri.u32(i32 %tmp1, i32 2814)
ret i32 %0
}
@@ -37,7 +37,7 @@ entry:
; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
; CHECK-NOT: mov
; CHECK: bextr $
- %tmp1 = load i64* %a, align 8
+ %tmp1 = load i64, i64* %a, align 8
%0 = tail call i64 @llvm.x86.tbm.bextri.u64(i64 %tmp1, i64 2814)
ret i64 %0
}
diff --git a/llvm/test/CodeGen/X86/tbm_patterns.ll b/llvm/test/CodeGen/X86/tbm_patterns.ll
index 79eea10af3a..80d36d5af4d 100644
--- a/llvm/test/CodeGen/X86/tbm_patterns.ll
+++ b/llvm/test/CodeGen/X86/tbm_patterns.ll
@@ -15,7 +15,7 @@ entry:
; CHECK-LABEL: test_x86_tbm_bextri_u32_m:
; CHECK-NOT: mov
; CHECK: bextr $
- %0 = load i32* %a
+ %0 = load i32, i32* %a
%1 = lshr i32 %0, 4
%2 = and i32 %1, 4095
ret i32 %2
@@ -36,7 +36,7 @@ entry:
; CHECK-LABEL: test_x86_tbm_bextri_u64_m:
; CHECK-NOT: mov
; CHECK: bextr $
- %0 = load i64* %a
+ %0 = load i64, i64* %a
%1 = lshr i64 %0, 4
%2 = and i64 %1, 4095
ret i64 %2
diff --git a/llvm/test/CodeGen/X86/test-shrink-bug.ll b/llvm/test/CodeGen/X86/test-shrink-bug.ll
index 64631ea5fc9..1bb1e638483 100644
--- a/llvm/test/CodeGen/X86/test-shrink-bug.ll
+++ b/llvm/test/CodeGen/X86/test-shrink-bug.ll
@@ -14,7 +14,7 @@ declare i32 @func_16(i8 signext %p_19, i32 %p_20) nounwind
define i32 @func_35(i64 %p_38) nounwind ssp {
entry:
- %tmp = load i8* @g_14 ; <i8> [#uses=2]
+ %tmp = load i8, i8* @g_14 ; <i8> [#uses=2]
%conv = zext i8 %tmp to i32 ; <i32> [#uses=1]
%cmp = icmp sle i32 1, %conv ; <i1> [#uses=1]
%conv2 = zext i1 %cmp to i32 ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/testl-commute.ll b/llvm/test/CodeGen/X86/testl-commute.ll
index bf6debf1754..a9a9e581d99 100644
--- a/llvm/test/CodeGen/X86/testl-commute.ll
+++ b/llvm/test/CodeGen/X86/testl-commute.ll
@@ -13,8 +13,8 @@ define i32 @test(i32* %P, i32* %G) nounwind {
; CHECK: ret
entry:
- %0 = load i32* %P, align 4 ; <i32> [#uses=3]
- %1 = load i32* %G, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %P, align 4 ; <i32> [#uses=3]
+ %1 = load i32, i32* %G, align 4 ; <i32> [#uses=1]
%2 = and i32 %1, %0 ; <i32> [#uses=1]
%3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
br i1 %3, label %bb1, label %bb
@@ -34,8 +34,8 @@ define i32 @test2(i32* %P, i32* %G) nounwind {
; CHECK: ret
entry:
- %0 = load i32* %P, align 4 ; <i32> [#uses=3]
- %1 = load i32* %G, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %P, align 4 ; <i32> [#uses=3]
+ %1 = load i32, i32* %G, align 4 ; <i32> [#uses=1]
%2 = and i32 %0, %1 ; <i32> [#uses=1]
%3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
br i1 %3, label %bb1, label %bb
@@ -55,8 +55,8 @@ define i32 @test3(i32* %P, i32* %G) nounwind {
; CHECK: ret
entry:
- %0 = load i32* %P, align 4 ; <i32> [#uses=3]
- %1 = load i32* %G, align 4 ; <i32> [#uses=1]
+ %0 = load i32, i32* %P, align 4 ; <i32> [#uses=3]
+ %1 = load i32, i32* %G, align 4 ; <i32> [#uses=1]
%2 = and i32 %0, %1 ; <i32> [#uses=1]
%3 = icmp eq i32 %2, 0 ; <i1> [#uses=1]
br i1 %3, label %bb1, label %bb
diff --git a/llvm/test/CodeGen/X86/tls-addr-non-leaf-function.ll b/llvm/test/CodeGen/X86/tls-addr-non-leaf-function.ll
index ec47232059f..b9cab65465b 100644
--- a/llvm/test/CodeGen/X86/tls-addr-non-leaf-function.ll
+++ b/llvm/test/CodeGen/X86/tls-addr-non-leaf-function.ll
@@ -32,6 +32,6 @@ target triple = "x86_64-unknown-linux-gnu"
@x = thread_local global i32 0
define i32 @foo() "no-frame-pointer-elim-non-leaf" {
- %a = load i32* @x, align 4
+ %a = load i32, i32* @x, align 4
ret i32 %a
}
diff --git a/llvm/test/CodeGen/X86/tls-local-dynamic.ll b/llvm/test/CodeGen/X86/tls-local-dynamic.ll
index 4841e52c5b0..1f1b41a8a6d 100644
--- a/llvm/test/CodeGen/X86/tls-local-dynamic.ll
+++ b/llvm/test/CodeGen/X86/tls-local-dynamic.ll
@@ -32,7 +32,7 @@ entry:
if.else:
- %0 = load i32* @x, align 4
+ %0 = load i32, i32* @x, align 4
%cmp1 = icmp eq i32 %i, 2
br i1 %cmp1, label %if.then2, label %return
; Now we call __tls_get_addr.
@@ -43,7 +43,7 @@ if.else:
if.then2:
- %1 = load i32* @y, align 4
+ %1 = load i32, i32* @y, align 4
%add = add nsw i32 %1, %0
br label %return
; This accesses TLS, but is dominated by the previous block,
diff --git a/llvm/test/CodeGen/X86/tls-pic.ll b/llvm/test/CodeGen/X86/tls-pic.ll
index 0c79da6667a..805bc25c17b 100644
--- a/llvm/test/CodeGen/X86/tls-pic.ll
+++ b/llvm/test/CodeGen/X86/tls-pic.ll
@@ -7,7 +7,7 @@
define i32 @f1() {
entry:
- %tmp1 = load i32* @i
+ %tmp1 = load i32, i32* @i
ret i32 %tmp1
}
@@ -39,7 +39,7 @@ entry:
define i32 @f3() {
entry:
- %tmp1 = load i32* @i ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* @i ; <i32> [#uses=1]
ret i32 %tmp1
}
@@ -68,8 +68,8 @@ entry:
define i32 @f5() nounwind {
entry:
- %0 = load i32* @j, align 4
- %1 = load i32* @k, align 4
+ %0 = load i32, i32* @j, align 4
+ %1 = load i32, i32* @k, align 4
%add = add nsw i32 %0, %1
ret i32 %add
}
diff --git a/llvm/test/CodeGen/X86/tls-pie.ll b/llvm/test/CodeGen/X86/tls-pie.ll
index d1e09c2442f..10fe1e94bbd 100644
--- a/llvm/test/CodeGen/X86/tls-pie.ll
+++ b/llvm/test/CodeGen/X86/tls-pie.ll
@@ -15,7 +15,7 @@ define i32 @f1() {
; X64-NEXT: ret
entry:
- %tmp1 = load i32* @i
+ %tmp1 = load i32, i32* @i
ret i32 %tmp1
}
@@ -49,7 +49,7 @@ define i32 @f3() {
; X64-NEXT: ret
entry:
- %tmp1 = load i32* @i2
+ %tmp1 = load i32, i32* @i2
ret i32 %tmp1
}
diff --git a/llvm/test/CodeGen/X86/tls.ll b/llvm/test/CodeGen/X86/tls.ll
index 75e7fc4f6bb..93ea8333d5c 100644
--- a/llvm/test/CodeGen/X86/tls.ll
+++ b/llvm/test/CodeGen/X86/tls.ll
@@ -40,7 +40,7 @@ define i32 @f1() {
; MINGW32-NEXT: retl
entry:
- %tmp1 = load i32* @i1
+ %tmp1 = load i32, i32* @i1
ret i32 %tmp1
}
@@ -105,7 +105,7 @@ define i32 @f3() nounwind {
; MINGW32-NEXT: retl
entry:
- %tmp1 = load i32* @i2
+ %tmp1 = load i32, i32* @i2
ret i32 %tmp1
}
@@ -168,7 +168,7 @@ define i32 @f5() nounwind {
; MINGW32-NEXT: retl
entry:
- %tmp1 = load i32* @i3
+ %tmp1 = load i32, i32* @i3
ret i32 %tmp1
}
@@ -219,7 +219,7 @@ define i32 @f7() {
; MINGW32-NEXT: retl
entry:
- %tmp1 = load i32* @i4
+ %tmp1 = load i32, i32* @i4
ret i32 %tmp1
}
@@ -258,7 +258,7 @@ define i32 @f9() {
; MINGW32-NEXT: retl
entry:
- %tmp1 = load i32* @i5
+ %tmp1 = load i32, i32* @i5
ret i32 %tmp1
}
@@ -309,7 +309,7 @@ define i16 @f11() {
; MINGW32: retl
entry:
- %tmp1 = load i16* @s1
+ %tmp1 = load i16, i16* @s1
ret i16 %tmp1
}
@@ -341,7 +341,7 @@ define i32 @f12() {
entry:
- %tmp1 = load i16* @s1
+ %tmp1 = load i16, i16* @s1
%tmp2 = sext i16 %tmp1 to i32
ret i32 %tmp2
}
@@ -373,7 +373,7 @@ define i8 @f13() {
; MINGW32-NEXT: retl
entry:
- %tmp1 = load i8* @b1
+ %tmp1 = load i8, i8* @b1
ret i8 %tmp1
}
@@ -404,7 +404,7 @@ define i32 @f14() {
; MINGW32-NEXT: retl
entry:
- %tmp1 = load i8* @b1
+ %tmp1 = load i8, i8* @b1
%tmp2 = sext i8 %tmp1 to i32
ret i32 %tmp2
}
diff --git a/llvm/test/CodeGen/X86/tlv-1.ll b/llvm/test/CodeGen/X86/tlv-1.ll
index 66e2f819ee2..f06810cf7cd 100644
--- a/llvm/test/CodeGen/X86/tlv-1.ll
+++ b/llvm/test/CodeGen/X86/tlv-1.ll
@@ -25,8 +25,8 @@ entry:
; CHECK: movq _b@TLVP(%rip),
; CHECK: callq *
; CHECK: subl (%rax), [[REGISTER]]
- %0 = load i32* @a, align 4
- %1 = load i32* @b, align 4
+ %0 = load i32, i32* @a, align 4
+ %1 = load i32, i32* @b, align 4
%sub = sub nsw i32 %0, %1
ret i32 %sub
}
diff --git a/llvm/test/CodeGen/X86/trunc-ext-ld-st.ll b/llvm/test/CodeGen/X86/trunc-ext-ld-st.ll
index 8de6297906c..65f42bb7140 100644
--- a/llvm/test/CodeGen/X86/trunc-ext-ld-st.ll
+++ b/llvm/test/CodeGen/X86/trunc-ext-ld-st.ll
@@ -10,7 +10,7 @@
;CHECK: ret
define void @load_2_i8(<2 x i8>* %A) {
- %T = load <2 x i8>* %A
+ %T = load <2 x i8>, <2 x i8>* %A
%G = add <2 x i8> %T, <i8 9, i8 7>
store <2 x i8> %G, <2 x i8>* %A
ret void
@@ -24,7 +24,7 @@ define void @load_2_i8(<2 x i8>* %A) {
;CHECK: movd
;CHECK: ret
define void @load_2_i16(<2 x i16>* %A) {
- %T = load <2 x i16>* %A
+ %T = load <2 x i16>, <2 x i16>* %A
%G = add <2 x i16> %T, <i16 9, i16 7>
store <2 x i16> %G, <2 x i16>* %A
ret void
@@ -36,7 +36,7 @@ define void @load_2_i16(<2 x i16>* %A) {
;CHECK: pshufd
;CHECK: ret
define void @load_2_i32(<2 x i32>* %A) {
- %T = load <2 x i32>* %A
+ %T = load <2 x i32>, <2 x i32>* %A
%G = add <2 x i32> %T, <i32 9, i32 7>
store <2 x i32> %G, <2 x i32>* %A
ret void
@@ -48,7 +48,7 @@ define void @load_2_i32(<2 x i32>* %A) {
;CHECK: pshufb
;CHECK: ret
define void @load_4_i8(<4 x i8>* %A) {
- %T = load <4 x i8>* %A
+ %T = load <4 x i8>, <4 x i8>* %A
%G = add <4 x i8> %T, <i8 1, i8 4, i8 9, i8 7>
store <4 x i8> %G, <4 x i8>* %A
ret void
@@ -60,7 +60,7 @@ define void @load_4_i8(<4 x i8>* %A) {
;CHECK: pshufb
;CHECK: ret
define void @load_4_i16(<4 x i16>* %A) {
- %T = load <4 x i16>* %A
+ %T = load <4 x i16>, <4 x i16>* %A
%G = add <4 x i16> %T, <i16 1, i16 4, i16 9, i16 7>
store <4 x i16> %G, <4 x i16>* %A
ret void
@@ -72,7 +72,7 @@ define void @load_4_i16(<4 x i16>* %A) {
;CHECK: pshufb
;CHECK: ret
define void @load_8_i8(<8 x i8>* %A) {
- %T = load <8 x i8>* %A
+ %T = load <8 x i8>, <8 x i8>* %A
%G = add <8 x i8> %T, %T
store <8 x i8> %G, <8 x i8>* %A
ret void
diff --git a/llvm/test/CodeGen/X86/trunc-to-bool.ll b/llvm/test/CodeGen/X86/trunc-to-bool.ll
index 0ed634774ab..3dd98eea7fa 100644
--- a/llvm/test/CodeGen/X86/trunc-to-bool.ll
+++ b/llvm/test/CodeGen/X86/trunc-to-bool.ll
@@ -25,7 +25,7 @@ ret_false:
; CHECK: btl
define i32 @test3(i8* %ptr) nounwind {
- %val = load i8* %ptr
+ %val = load i8, i8* %ptr
%tmp = trunc i8 %val to i1
br i1 %tmp, label %cond_true, label %cond_false
cond_true:
diff --git a/llvm/test/CodeGen/X86/twoaddr-pass-sink.ll b/llvm/test/CodeGen/X86/twoaddr-pass-sink.ll
index 7564324b29f..9a98e4794f9 100644
--- a/llvm/test/CodeGen/X86/twoaddr-pass-sink.ll
+++ b/llvm/test/CodeGen/X86/twoaddr-pass-sink.ll
@@ -13,15 +13,15 @@ bb: ; preds = %bb, %entry
%skiplist_addr.0 = getelementptr i8, i8* %skiplist, i32 %skiplist_addr.0.rec ; <i8*> [#uses=1]
%vDct_addr.0.sum43 = or i32 %vYp_addr.0.rec, 1 ; <i32> [#uses=1]
%tmp7 = getelementptr <2 x i64>, <2 x i64>* %vDct, i32 %vDct_addr.0.sum43 ; <<2 x i64>*> [#uses=1]
- %tmp8 = load <2 x i64>* %tmp7, align 16 ; <<2 x i64>> [#uses=1]
- %tmp11 = load <2 x i64>* %vDct_addr.0, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp8 = load <2 x i64>, <2 x i64>* %tmp7, align 16 ; <<2 x i64>> [#uses=1]
+ %tmp11 = load <2 x i64>, <2 x i64>* %vDct_addr.0, align 16 ; <<2 x i64>> [#uses=1]
%tmp13 = bitcast <2 x i64> %tmp8 to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp15 = bitcast <2 x i64> %tmp11 to <8 x i16> ; <<8 x i16>> [#uses=1]
%tmp16 = shufflevector <8 x i16> %tmp15, <8 x i16> %tmp13, <8 x i32> < i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11 > ; <<8 x i16>> [#uses=1]
%tmp26 = mul <8 x i16> %tmp25, %tmp16 ; <<8 x i16>> [#uses=1]
%tmp27 = bitcast <8 x i16> %tmp26 to <2 x i64> ; <<2 x i64>> [#uses=1]
store <2 x i64> %tmp27, <2 x i64>* %vYp_addr.0, align 16
- %tmp37 = load i8* %skiplist_addr.0, align 1 ; <i8> [#uses=1]
+ %tmp37 = load i8, i8* %skiplist_addr.0, align 1 ; <i8> [#uses=1]
%tmp38 = icmp eq i8 %tmp37, 0 ; <i1> [#uses=1]
%indvar.next = add i32 %skiplist_addr.0.rec, 1 ; <i32> [#uses=1]
br i1 %tmp38, label %return, label %bb
diff --git a/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll b/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll
index 4b73c8c6f4a..a44d44d1b69 100644
--- a/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll
+++ b/llvm/test/CodeGen/X86/unaligned-32-byte-memops.ll
@@ -20,7 +20,7 @@ define <8 x float> @load32bytes(<8 x float>* %Ap) {
; HASWELL: vmovups
; HASWELL: retq
- %A = load <8 x float>* %Ap, align 16
+ %A = load <8 x float>, <8 x float>* %Ap, align 16
ret <8 x float> %A
}
@@ -67,8 +67,8 @@ define <8 x float> @combine_16_byte_loads(<4 x float>* %ptr) {
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 1
%ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 2
- %v1 = load <4 x float>* %ptr1, align 1
- %v2 = load <4 x float>* %ptr2, align 1
+ %v1 = load <4 x float>, <4 x float>* %ptr1, align 1
+ %v2 = load <4 x float>, <4 x float>* %ptr2, align 1
%shuffle = shufflevector <4 x float> %v1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
%v3 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %shuffle, <4 x float> %v2, i8 1)
ret <8 x float> %v3
@@ -91,8 +91,8 @@ define <8 x float> @combine_16_byte_loads_swap(<4 x float>* %ptr) {
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 2
%ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
- %v1 = load <4 x float>* %ptr1, align 1
- %v2 = load <4 x float>* %ptr2, align 1
+ %v1 = load <4 x float>, <4 x float>* %ptr1, align 1
+ %v2 = load <4 x float>, <4 x float>* %ptr2, align 1
%shuffle = shufflevector <4 x float> %v2, <4 x float> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3>
%v3 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %shuffle, <4 x float> %v1, i8 0)
ret <8 x float> %v3
@@ -115,8 +115,8 @@ define <8 x float> @combine_16_byte_loads_no_intrinsic(<4 x float>* %ptr) {
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
%ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4
- %v1 = load <4 x float>* %ptr1, align 1
- %v2 = load <4 x float>* %ptr2, align 1
+ %v1 = load <4 x float>, <4 x float>* %ptr1, align 1
+ %v2 = load <4 x float>, <4 x float>* %ptr2, align 1
%v3 = shufflevector <4 x float> %v1, <4 x float> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x float> %v3
}
@@ -138,8 +138,8 @@ define <8 x float> @combine_16_byte_loads_no_intrinsic_swap(<4 x float>* %ptr) {
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4
%ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 5
- %v1 = load <4 x float>* %ptr1, align 1
- %v2 = load <4 x float>* %ptr2, align 1
+ %v1 = load <4 x float>, <4 x float>* %ptr1, align 1
+ %v2 = load <4 x float>, <4 x float>* %ptr2, align 1
%v3 = shufflevector <4 x float> %v2, <4 x float> %v1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
ret <8 x float> %v3
}
@@ -170,8 +170,8 @@ define <4 x i64> @combine_16_byte_loads_i64(<2 x i64>* %ptr, <4 x i64> %x) {
%ptr1 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 5
%ptr2 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 6
- %v1 = load <2 x i64>* %ptr1, align 1
- %v2 = load <2 x i64>* %ptr2, align 1
+ %v1 = load <2 x i64>, <2 x i64>* %ptr1, align 1
+ %v2 = load <2 x i64>, <2 x i64>* %ptr2, align 1
%v3 = shufflevector <2 x i64> %v1, <2 x i64> %v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%v4 = add <4 x i64> %v3, %x
ret <4 x i64> %v4
@@ -198,8 +198,8 @@ define <8 x i32> @combine_16_byte_loads_i32(<4 x i32>* %ptr, <8 x i32> %x) {
%ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 6
%ptr2 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 7
- %v1 = load <4 x i32>* %ptr1, align 1
- %v2 = load <4 x i32>* %ptr2, align 1
+ %v1 = load <4 x i32>, <4 x i32>* %ptr1, align 1
+ %v2 = load <4 x i32>, <4 x i32>* %ptr2, align 1
%v3 = shufflevector <4 x i32> %v1, <4 x i32> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%v4 = add <8 x i32> %v3, %x
ret <8 x i32> %v4
@@ -226,8 +226,8 @@ define <16 x i16> @combine_16_byte_loads_i16(<8 x i16>* %ptr, <16 x i16> %x) {
%ptr1 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 7
%ptr2 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 8
- %v1 = load <8 x i16>* %ptr1, align 1
- %v2 = load <8 x i16>* %ptr2, align 1
+ %v1 = load <8 x i16>, <8 x i16>* %ptr1, align 1
+ %v2 = load <8 x i16>, <8 x i16>* %ptr2, align 1
%v3 = shufflevector <8 x i16> %v1, <8 x i16> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%v4 = add <16 x i16> %v3, %x
ret <16 x i16> %v4
@@ -254,8 +254,8 @@ define <32 x i8> @combine_16_byte_loads_i8(<16 x i8>* %ptr, <32 x i8> %x) {
%ptr1 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 8
%ptr2 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 9
- %v1 = load <16 x i8>* %ptr1, align 1
- %v2 = load <16 x i8>* %ptr2, align 1
+ %v1 = load <16 x i8>, <16 x i8>* %ptr1, align 1
+ %v2 = load <16 x i8>, <16 x i8>* %ptr2, align 1
%v3 = shufflevector <16 x i8> %v1, <16 x i8> %v2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%v4 = add <32 x i8> %v3, %x
ret <32 x i8> %v4
@@ -279,8 +279,8 @@ define <4 x double> @combine_16_byte_loads_double(<2 x double>* %ptr, <4 x doubl
%ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 9
%ptr2 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 10
- %v1 = load <2 x double>* %ptr1, align 1
- %v2 = load <2 x double>* %ptr2, align 1
+ %v1 = load <2 x double>, <2 x double>* %ptr1, align 1
+ %v2 = load <2 x double>, <2 x double>* %ptr2, align 1
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%v4 = fadd <4 x double> %v3, %x
ret <4 x double> %v4
diff --git a/llvm/test/CodeGen/X86/unaligned-spill-folding.ll b/llvm/test/CodeGen/X86/unaligned-spill-folding.ll
index 3feeb0dc790..33e2daf9dc1 100644
--- a/llvm/test/CodeGen/X86/unaligned-spill-folding.ll
+++ b/llvm/test/CodeGen/X86/unaligned-spill-folding.ll
@@ -13,7 +13,7 @@ vector.body:
%index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
%0 = getelementptr inbounds [32 x i32], [32 x i32]* @arr, i32 0, i32 %index
%1 = bitcast i32* %0 to <4 x i32>*
- %wide.load = load <4 x i32>* %1, align 16
+ %wide.load = load <4 x i32>, <4 x i32>* %1, align 16
%2 = add nsw <4 x i32> %wide.load, <i32 10, i32 10, i32 10, i32 10>
%3 = xor <4 x i32> %2, <i32 123345, i32 123345, i32 123345, i32 123345>
%4 = add nsw <4 x i32> %3, <i32 112, i32 112, i32 112, i32 112>
diff --git a/llvm/test/CodeGen/X86/unwindraise.ll b/llvm/test/CodeGen/X86/unwindraise.ll
index b9b2626114c..fb8319b63c2 100644
--- a/llvm/test/CodeGen/X86/unwindraise.ll
+++ b/llvm/test/CodeGen/X86/unwindraise.ll
@@ -50,12 +50,12 @@ while.body: ; preds = %uw_update_context.e
]
if.end3: ; preds = %while.body
- %4 = load i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)** %personality, align 8
+ %4 = load i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)*, i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)** %personality, align 8
%tobool = icmp eq i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)* %4, null
br i1 %tobool, label %if.end13, label %if.then4
if.then4: ; preds = %if.end3
- %5 = load i64* %exception_class, align 8
+ %5 = load i64, i64* %exception_class, align 8
%call6 = call i32 %4(i32 1, i32 1, i64 %5, %struct._Unwind_Exception* %exc, %struct._Unwind_Context* %cur_context)
switch i32 %call6, label %do.end21.loopexit46 [
i32 6, label %while.end
@@ -64,7 +64,7 @@ if.then4: ; preds = %if.end3
if.end13: ; preds = %if.then4, %if.end3
call fastcc void @uw_update_context_1(%struct._Unwind_Context* %cur_context, %struct._Unwind_FrameState* %fs)
- %6 = load i64* %retaddr_column.i, align 8
+ %6 = load i64, i64* %retaddr_column.i, align 8
%conv.i = trunc i64 %6 to i32
%cmp.i.i.i = icmp slt i32 %conv.i, 18
br i1 %cmp.i.i.i, label %cond.end.i.i.i, label %cond.true.i.i.i
@@ -77,17 +77,17 @@ cond.end.i.i.i: ; preds = %if.end13
%sext.i = shl i64 %6, 32
%idxprom.i.i.i = ashr exact i64 %sext.i, 32
%arrayidx.i.i.i = getelementptr inbounds [18 x i8], [18 x i8]* @dwarf_reg_size_table, i64 0, i64 %idxprom.i.i.i
- %7 = load i8* %arrayidx.i.i.i, align 1
+ %7 = load i8, i8* %arrayidx.i.i.i, align 1
%arrayidx2.i.i.i = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 0, i64 %idxprom.i.i.i
- %8 = load i8** %arrayidx2.i.i.i, align 8
- %9 = load i64* %flags.i.i.i.i, align 8
+ %8 = load i8*, i8** %arrayidx2.i.i.i, align 8
+ %9 = load i64, i64* %flags.i.i.i.i, align 8
%and.i.i.i.i = and i64 %9, 4611686018427387904
%tobool.i.i.i = icmp eq i64 %and.i.i.i.i, 0
br i1 %tobool.i.i.i, label %if.end.i.i.i, label %land.lhs.true.i.i.i
land.lhs.true.i.i.i: ; preds = %cond.end.i.i.i
%arrayidx4.i.i.i = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 8, i64 %idxprom.i.i.i
- %10 = load i8* %arrayidx4.i.i.i, align 1
+ %10 = load i8, i8* %arrayidx4.i.i.i, align 1
%tobool6.i.i.i = icmp eq i8 %10, 0
br i1 %tobool6.i.i.i, label %if.end.i.i.i, label %if.then.i.i.i
@@ -101,7 +101,7 @@ if.end.i.i.i: ; preds = %land.lhs.true.i.i.i
if.then10.i.i.i: ; preds = %if.end.i.i.i
%12 = bitcast i8* %8 to i64*
- %13 = load i64* %12, align 8
+ %13 = load i64, i64* %12, align 8
br label %uw_update_context.exit
cond.true14.i.i.i: ; preds = %if.end.i.i.i
@@ -117,7 +117,7 @@ uw_update_context.exit: ; preds = %if.then10.i.i.i, %i
while.end: ; preds = %if.then4
%private_1 = getelementptr inbounds %struct._Unwind_Exception, %struct._Unwind_Exception* %exc, i64 0, i32 2
store i64 0, i64* %private_1, align 8
- %15 = load i8** %ra.i, align 8
+ %15 = load i8*, i8** %ra.i, align 8
%16 = ptrtoint i8* %15 to i64
%private_2 = getelementptr inbounds %struct._Unwind_Exception, %struct._Unwind_Exception* %exc, i64 0, i32 3
store i64 %16, i64* %private_2, align 8
@@ -130,21 +130,21 @@ while.end: ; preds = %if.then4
while.body.i: ; preds = %uw_update_context.exit44, %while.end
%call.i = call fastcc i32 @uw_frame_state_for(%struct._Unwind_Context* %cur_context, %struct._Unwind_FrameState* %fs.i)
- %18 = load i8** %ra.i, align 8
+ %18 = load i8*, i8** %ra.i, align 8
%19 = ptrtoint i8* %18 to i64
- %20 = load i64* %private_2, align 8
+ %20 = load i64, i64* %private_2, align 8
%cmp.i = icmp eq i64 %19, %20
%cmp2.i = icmp eq i32 %call.i, 0
br i1 %cmp2.i, label %if.end.i, label %do.end21
if.end.i: ; preds = %while.body.i
- %21 = load i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)** %personality.i, align 8
+ %21 = load i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)*, i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)** %personality.i, align 8
%tobool.i = icmp eq i32 (i32, i32, i64, %struct._Unwind_Exception*, %struct._Unwind_Context*)* %21, null
br i1 %tobool.i, label %if.end12.i, label %if.then3.i
if.then3.i: ; preds = %if.end.i
%or.i = select i1 %cmp.i, i32 6, i32 2
- %22 = load i64* %exception_class, align 8
+ %22 = load i64, i64* %exception_class, align 8
%call5.i = call i32 %21(i32 1, i32 %or.i, i64 %22, %struct._Unwind_Exception* %exc, %struct._Unwind_Context* %cur_context)
switch i32 %call5.i, label %do.end21 [
i32 7, label %do.body19
@@ -160,7 +160,7 @@ cond.true.i: ; preds = %if.end12.i
cond.end.i: ; preds = %if.end12.i
call fastcc void @uw_update_context_1(%struct._Unwind_Context* %cur_context, %struct._Unwind_FrameState* %fs.i)
- %23 = load i64* %retaddr_column.i22, align 8
+ %23 = load i64, i64* %retaddr_column.i22, align 8
%conv.i23 = trunc i64 %23 to i32
%cmp.i.i.i24 = icmp slt i32 %conv.i23, 18
br i1 %cmp.i.i.i24, label %cond.end.i.i.i33, label %cond.true.i.i.i25
@@ -173,17 +173,17 @@ cond.end.i.i.i33: ; preds = %cond.end.i
%sext.i26 = shl i64 %23, 32
%idxprom.i.i.i27 = ashr exact i64 %sext.i26, 32
%arrayidx.i.i.i28 = getelementptr inbounds [18 x i8], [18 x i8]* @dwarf_reg_size_table, i64 0, i64 %idxprom.i.i.i27
- %24 = load i8* %arrayidx.i.i.i28, align 1
+ %24 = load i8, i8* %arrayidx.i.i.i28, align 1
%arrayidx2.i.i.i29 = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 0, i64 %idxprom.i.i.i27
- %25 = load i8** %arrayidx2.i.i.i29, align 8
- %26 = load i64* %flags.i.i.i.i, align 8
+ %25 = load i8*, i8** %arrayidx2.i.i.i29, align 8
+ %26 = load i64, i64* %flags.i.i.i.i, align 8
%and.i.i.i.i31 = and i64 %26, 4611686018427387904
%tobool.i.i.i32 = icmp eq i64 %and.i.i.i.i31, 0
br i1 %tobool.i.i.i32, label %if.end.i.i.i39, label %land.lhs.true.i.i.i36
land.lhs.true.i.i.i36: ; preds = %cond.end.i.i.i33
%arrayidx4.i.i.i34 = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 8, i64 %idxprom.i.i.i27
- %27 = load i8* %arrayidx4.i.i.i34, align 1
+ %27 = load i8, i8* %arrayidx4.i.i.i34, align 1
%tobool6.i.i.i35 = icmp eq i8 %27, 0
br i1 %tobool6.i.i.i35, label %if.end.i.i.i39, label %if.then.i.i.i37
@@ -197,7 +197,7 @@ if.end.i.i.i39: ; preds = %land.lhs.true.i.i.i
if.then10.i.i.i40: ; preds = %if.end.i.i.i39
%29 = bitcast i8* %25 to i64*
- %30 = load i64* %29, align 8
+ %30 = load i64, i64* %29, align 8
br label %uw_update_context.exit44
cond.true14.i.i.i41: ; preds = %if.end.i.i.i39
@@ -213,7 +213,7 @@ uw_update_context.exit44: ; preds = %if.then10.i.i.i40,
do.body19: ; preds = %if.then3.i
call void @llvm.lifetime.end(i64 -1, i8* %17)
%call20 = call fastcc i64 @uw_install_context_1(%struct._Unwind_Context* %this_context, %struct._Unwind_Context* %cur_context)
- %32 = load i8** %ra.i, align 8
+ %32 = load i8*, i8** %ra.i, align 8
call void @llvm.eh.return.i64(i64 %call20, i8* %32)
unreachable
diff --git a/llvm/test/CodeGen/X86/use-add-flags.ll b/llvm/test/CodeGen/X86/use-add-flags.ll
index fd57f5ca8d2..da0002cc252 100644
--- a/llvm/test/CodeGen/X86/use-add-flags.ll
+++ b/llvm/test/CodeGen/X86/use-add-flags.ll
@@ -13,7 +13,7 @@
; CHECK: ret
define i32 @test1(i32* %x, i32 %y, i32 %a, i32 %b) nounwind {
- %tmp2 = load i32* %x, align 4 ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %x, align 4 ; <i32> [#uses=1]
%tmp4 = add i32 %tmp2, %y ; <i32> [#uses=1]
%tmp5 = icmp slt i32 %tmp4, 0 ; <i1> [#uses=1]
%tmp.0 = select i1 %tmp5, i32 %a, i32 %b ; <i32> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/v4i32load-crash.ll b/llvm/test/CodeGen/X86/v4i32load-crash.ll
index 3e7f9e63c9a..2025a2f872d 100644
--- a/llvm/test/CodeGen/X86/v4i32load-crash.ll
+++ b/llvm/test/CodeGen/X86/v4i32load-crash.ll
@@ -13,10 +13,10 @@
; Function Attrs: nounwind
define void @fn3(i32 %el) {
entry:
- %0 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i32 0)
- %1 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i32 1)
- %2 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i32 2)
- %3 = load i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i32 3)
+ %0 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i32 0)
+ %1 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i32 1)
+ %2 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i32 2)
+ %3 = load i32, i32* getelementptr inbounds ([4 x i32]* @e, i32 0, i32 3)
%4 = insertelement <4 x i32> undef, i32 %0, i32 0
%5 = insertelement <4 x i32> %4, i32 %1, i32 1
%6 = insertelement <4 x i32> %5, i32 %2, i32 2
diff --git a/llvm/test/CodeGen/X86/v8i1-masks.ll b/llvm/test/CodeGen/X86/v8i1-masks.ll
index 5da6e9636eb..21fe9632198 100644
--- a/llvm/test/CodeGen/X86/v8i1-masks.ll
+++ b/llvm/test/CodeGen/X86/v8i1-masks.ll
@@ -10,10 +10,10 @@
;CHECK: ret
define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
- %v0 = load <8 x float>* %a, align 16
- %v1 = load <8 x float>* %b, align 16
+ %v0 = load <8 x float>, <8 x float>* %a, align 16
+ %v1 = load <8 x float>, <8 x float>* %b, align 16
%m0 = fcmp olt <8 x float> %v1, %v0
- %v2 = load <8 x float>* %c, align 16
+ %v2 = load <8 x float>, <8 x float>* %c, align 16
%m1 = fcmp olt <8 x float> %v2, %v0
%mand = and <8 x i1> %m1, %m0
%r = zext <8 x i1> %mand to <8 x i32>
@@ -28,8 +28,8 @@ define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
;CHECK: vmovaps
;CHECK: ret
define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
- %v0 = load <8 x float>* %a, align 16
- %v1 = load <8 x float>* %b, align 16
+ %v0 = load <8 x float>, <8 x float>* %a, align 16
+ %v1 = load <8 x float>, <8 x float>* %b, align 16
%m0 = fcmp olt <8 x float> %v1, %v0
%mand = xor <8 x i1> %m0, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
%r = zext <8 x i1> %mand to <8 x i32>
diff --git a/llvm/test/CodeGen/X86/vaargs.ll b/llvm/test/CodeGen/X86/vaargs.ll
index 8b16b35b861..3767f41c2aa 100644
--- a/llvm/test/CodeGen/X86/vaargs.ll
+++ b/llvm/test/CodeGen/X86/vaargs.ll
@@ -30,7 +30,7 @@ define i32 @sum(i32 %count, ...) nounwind optsize ssp uwtable {
.lr.ph: ; preds = %0
%3 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 0
%4 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 2
- %.pre = load i32* %3, align 16
+ %.pre = load i32, i32* %3, align 16
br label %5
; <label>:5 ; preds = %.lr.ph, %13
@@ -45,7 +45,7 @@ define i32 @sum(i32 %count, ...) nounwind optsize ssp uwtable {
br label %13
; <label>:10 ; preds = %5
- %11 = load i8** %4, align 8
+ %11 = load i8*, i8** %4, align 8
%12 = getelementptr i8, i8* %11, i64 8
store i8* %12, i8** %4, align 8
br label %13
diff --git a/llvm/test/CodeGen/X86/vararg_tailcall.ll b/llvm/test/CodeGen/X86/vararg_tailcall.ll
index eeda5e15a16..8a90a4d3a99 100644
--- a/llvm/test/CodeGen/X86/vararg_tailcall.ll
+++ b/llvm/test/CodeGen/X86/vararg_tailcall.ll
@@ -39,7 +39,7 @@ declare void @bar2(i8*, i64) optsize noredzone
; WIN64: callq
define i8* @foo2(i8* %arg) nounwind optsize ssp noredzone {
entry:
- %tmp1 = load i8** @sel, align 8
+ %tmp1 = load i8*, i8** @sel, align 8
%call = tail call i8* (i8*, i8*, ...)* @x2(i8* %arg, i8* %tmp1) nounwind optsize noredzone
ret i8* %call
}
@@ -52,10 +52,10 @@ declare i8* @x2(i8*, i8*, ...) optsize noredzone
; WIN64: callq
define i8* @foo6(i8* %arg1, i8* %arg2) nounwind optsize ssp noredzone {
entry:
- %tmp2 = load i8** @sel3, align 8
- %tmp3 = load i8** @sel4, align 8
- %tmp4 = load i8** @sel5, align 8
- %tmp5 = load i8** @sel6, align 8
+ %tmp2 = load i8*, i8** @sel3, align 8
+ %tmp3 = load i8*, i8** @sel4, align 8
+ %tmp4 = load i8*, i8** @sel5, align 8
+ %tmp5 = load i8*, i8** @sel6, align 8
%call = tail call i8* (i8*, i8*, i8*, ...)* @x3(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5) nounwind optsize noredzone
ret i8* %call
}
@@ -68,11 +68,11 @@ declare i8* @x3(i8*, i8*, i8*, ...) optsize noredzone
; WIN64: callq
define i8* @foo7(i8* %arg1, i8* %arg2) nounwind optsize ssp noredzone {
entry:
- %tmp2 = load i8** @sel3, align 8
- %tmp3 = load i8** @sel4, align 8
- %tmp4 = load i8** @sel5, align 8
- %tmp5 = load i8** @sel6, align 8
- %tmp6 = load i8** @sel7, align 8
+ %tmp2 = load i8*, i8** @sel3, align 8
+ %tmp3 = load i8*, i8** @sel4, align 8
+ %tmp4 = load i8*, i8** @sel5, align 8
+ %tmp5 = load i8*, i8** @sel6, align 8
+ %tmp6 = load i8*, i8** @sel7, align 8
%call = tail call i8* (i8*, i8*, i8*, i8*, i8*, i8*, i8*, ...)* @x7(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5, i8* %tmp6) nounwind optsize noredzone
ret i8* %call
}
@@ -85,10 +85,10 @@ declare i8* @x7(i8*, i8*, i8*, i8*, i8*, i8*, i8*, ...) optsize noredzone
; WIN64: callq
define i8* @foo8(i8* %arg1, i8* %arg2) nounwind optsize ssp noredzone {
entry:
- %tmp2 = load i8** @sel3, align 8
- %tmp3 = load i8** @sel4, align 8
- %tmp4 = load i8** @sel5, align 8
- %tmp5 = load i8** @sel6, align 8
+ %tmp2 = load i8*, i8** @sel3, align 8
+ %tmp3 = load i8*, i8** @sel4, align 8
+ %tmp4 = load i8*, i8** @sel5, align 8
+ %tmp5 = load i8*, i8** @sel6, align 8
%call = tail call i8* (i8*, i8*, i8*, ...)* @x3(i8* %arg1, i8* %arg2, i8* %tmp2, i8* %tmp3, i8* %tmp4, i8* %tmp5, i32 48879, i32 48879) nounwind optsize noredzone
ret i8* %call
}
diff --git a/llvm/test/CodeGen/X86/vec-loadsingles-alignment.ll b/llvm/test/CodeGen/X86/vec-loadsingles-alignment.ll
index 6aa2adb228e..ad99c9f9226 100644
--- a/llvm/test/CodeGen/X86/vec-loadsingles-alignment.ll
+++ b/llvm/test/CodeGen/X86/vec-loadsingles-alignment.ll
@@ -10,14 +10,14 @@ define i32 @subb() nounwind ssp {
; CHECK-LABEL: subb:
; CHECK: vmovups e(%rip), %ymm
entry:
- %0 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 7), align 4
- %1 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 6), align 8
- %2 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 5), align 4
- %3 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 4), align 16
- %4 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 3), align 4
- %5 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 2), align 8
- %6 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 1), align 4
- %7 = load i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 0), align 16
+ %0 = load i32, i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 7), align 4
+ %1 = load i32, i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 6), align 8
+ %2 = load i32, i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 5), align 4
+ %3 = load i32, i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 4), align 16
+ %4 = load i32, i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 3), align 4
+ %5 = load i32, i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 2), align 8
+ %6 = load i32, i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 1), align 4
+ %7 = load i32, i32* getelementptr inbounds ([8 x i32]* @e, i64 0, i64 0), align 16
%vecinit.i = insertelement <8 x i32> undef, i32 %7, i32 0
%vecinit1.i = insertelement <8 x i32> %vecinit.i, i32 %6, i32 1
%vecinit2.i = insertelement <8 x i32> %vecinit1.i, i32 %5, i32 2
diff --git a/llvm/test/CodeGen/X86/vec-trunc-store.ll b/llvm/test/CodeGen/X86/vec-trunc-store.ll
index 4d665f1843e..d7897f8f3fa 100644
--- a/llvm/test/CodeGen/X86/vec-trunc-store.ll
+++ b/llvm/test/CodeGen/X86/vec-trunc-store.ll
@@ -1,14 +1,14 @@
; RUN: llc < %s -march=x86-64
define void @foo(<8 x i32>* %p) nounwind {
- %t = load <8 x i32>* %p
+ %t = load <8 x i32>, <8 x i32>* %p
%cti69 = trunc <8 x i32> %t to <8 x i16> ; <<8 x i16>> [#uses=1]
store <8 x i16> %cti69, <8 x i16>* undef
ret void
}
define void @bar(<4 x i32>* %p) nounwind {
- %t = load <4 x i32>* %p
+ %t = load <4 x i32>, <4 x i32>* %p
%cti44 = trunc <4 x i32> %t to <4 x i16> ; <<4 x i16>> [#uses=1]
store <4 x i16> %cti44, <4 x i16>* undef
ret void
diff --git a/llvm/test/CodeGen/X86/vec_align.ll b/llvm/test/CodeGen/X86/vec_align.ll
index af53aa3fc50..558d768aced 100644
--- a/llvm/test/CodeGen/X86/vec_align.ll
+++ b/llvm/test/CodeGen/X86/vec_align.ll
@@ -21,10 +21,10 @@ define %f4 @test2() nounwind {
%Yp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 2
%Zp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 3
- %W = load float* %Wp
- %X = load float* %Xp
- %Y = load float* %Yp
- %Z = load float* %Zp
+ %W = load float, float* %Wp
+ %X = load float, float* %Xp
+ %Y = load float, float* %Yp
+ %Z = load float, float* %Zp
%tmp = insertelement %f4 undef, float %W, i32 0
%tmp2 = insertelement %f4 %tmp, float %X, i32 1
diff --git a/llvm/test/CodeGen/X86/vec_anyext.ll b/llvm/test/CodeGen/X86/vec_anyext.ll
index d2a4c7f60dd..c088d7f57b1 100644
--- a/llvm/test/CodeGen/X86/vec_anyext.ll
+++ b/llvm/test/CodeGen/X86/vec_anyext.ll
@@ -2,9 +2,9 @@
; PR 9267
define<4 x i16> @func_16_32() {
- %F = load <4 x i32>* undef
+ %F = load <4 x i32>, <4 x i32>* undef
%G = trunc <4 x i32> %F to <4 x i16>
- %H = load <4 x i32>* undef
+ %H = load <4 x i32>, <4 x i32>* undef
%Y = trunc <4 x i32> %H to <4 x i16>
%T = add <4 x i16> %Y, %G
store <4 x i16>%T , <4 x i16>* undef
@@ -12,9 +12,9 @@ define<4 x i16> @func_16_32() {
}
define<4 x i16> @func_16_64() {
- %F = load <4 x i64>* undef
+ %F = load <4 x i64>, <4 x i64>* undef
%G = trunc <4 x i64> %F to <4 x i16>
- %H = load <4 x i64>* undef
+ %H = load <4 x i64>, <4 x i64>* undef
%Y = trunc <4 x i64> %H to <4 x i16>
%T = xor <4 x i16> %Y, %G
store <4 x i16>%T , <4 x i16>* undef
@@ -22,36 +22,36 @@ define<4 x i16> @func_16_64() {
}
define<4 x i32> @func_32_64() {
- %F = load <4 x i64>* undef
+ %F = load <4 x i64>, <4 x i64>* undef
%G = trunc <4 x i64> %F to <4 x i32>
- %H = load <4 x i64>* undef
+ %H = load <4 x i64>, <4 x i64>* undef
%Y = trunc <4 x i64> %H to <4 x i32>
%T = or <4 x i32> %Y, %G
ret <4 x i32> %T
}
define<4 x i8> @func_8_16() {
- %F = load <4 x i16>* undef
+ %F = load <4 x i16>, <4 x i16>* undef
%G = trunc <4 x i16> %F to <4 x i8>
- %H = load <4 x i16>* undef
+ %H = load <4 x i16>, <4 x i16>* undef
%Y = trunc <4 x i16> %H to <4 x i8>
%T = add <4 x i8> %Y, %G
ret <4 x i8> %T
}
define<4 x i8> @func_8_32() {
- %F = load <4 x i32>* undef
+ %F = load <4 x i32>, <4 x i32>* undef
%G = trunc <4 x i32> %F to <4 x i8>
- %H = load <4 x i32>* undef
+ %H = load <4 x i32>, <4 x i32>* undef
%Y = trunc <4 x i32> %H to <4 x i8>
%T = sub <4 x i8> %Y, %G
ret <4 x i8> %T
}
define<4 x i8> @func_8_64() {
- %F = load <4 x i64>* undef
+ %F = load <4 x i64>, <4 x i64>* undef
%G = trunc <4 x i64> %F to <4 x i8>
- %H = load <4 x i64>* undef
+ %H = load <4 x i64>, <4 x i64>* undef
%Y = trunc <4 x i64> %H to <4 x i8>
%T = add <4 x i8> %Y, %G
ret <4 x i8> %T
diff --git a/llvm/test/CodeGen/X86/vec_extract-mmx.ll b/llvm/test/CodeGen/X86/vec_extract-mmx.ll
index c6c93a11d6d..780066d2da1 100644
--- a/llvm/test/CodeGen/X86/vec_extract-mmx.ll
+++ b/llvm/test/CodeGen/X86/vec_extract-mmx.ll
@@ -8,7 +8,7 @@ define i32 @test0(<1 x i64>* %v4) {
; CHECK-NEXT: addl $32, %eax
; CHECK-NEXT: retq
entry:
- %v5 = load <1 x i64>* %v4, align 8
+ %v5 = load <1 x i64>, <1 x i64>* %v4, align 8
%v12 = bitcast <1 x i64> %v5 to <4 x i16>
%v13 = bitcast <4 x i16> %v12 to x86_mmx
%v14 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %v13, i8 -18)
@@ -30,7 +30,7 @@ define i32 @test1(i32* nocapture readonly %ptr) {
; CHECK-NEXT: emms
; CHECK-NEXT: retq
entry:
- %0 = load i32* %ptr, align 4
+ %0 = load i32, i32* %ptr, align 4
%1 = insertelement <2 x i32> undef, i32 %0, i32 0
%2 = insertelement <2 x i32> %1, i32 0, i32 1
%3 = bitcast <2 x i32> %2 to x86_mmx
@@ -56,7 +56,7 @@ define i32 @test2(i32* nocapture readonly %ptr) {
; CHECK-NEXT: retq
entry:
%0 = bitcast i32* %ptr to x86_mmx*
- %1 = load x86_mmx* %0, align 8
+ %1 = load x86_mmx, x86_mmx* %0, align 8
%2 = tail call x86_mmx @llvm.x86.sse.pshuf.w(x86_mmx %1, i8 -24)
%3 = bitcast x86_mmx %2 to <4 x i16>
%4 = bitcast <4 x i16> %3 to <1 x i64>
diff --git a/llvm/test/CodeGen/X86/vec_extract-sse4.ll b/llvm/test/CodeGen/X86/vec_extract-sse4.ll
index 530911add12..9f4210f7847 100644
--- a/llvm/test/CodeGen/X86/vec_extract-sse4.ll
+++ b/llvm/test/CodeGen/X86/vec_extract-sse4.ll
@@ -9,7 +9,7 @@ define void @t1(float* %R, <4 x float>* %P1) nounwind {
; CHECK-NEXT: movss %xmm0, (%eax)
; CHECK-NEXT: retl
- %X = load <4 x float>* %P1
+ %X = load <4 x float>, <4 x float>* %P1
%tmp = extractelement <4 x float> %X, i32 3
store float %tmp, float* %R
ret void
@@ -27,7 +27,7 @@ define float @t2(<4 x float>* %P1) nounwind {
; CHECK-NEXT: popl %eax
; CHECK-NEXT: retl
- %X = load <4 x float>* %P1
+ %X = load <4 x float>, <4 x float>* %P1
%tmp = extractelement <4 x float> %X, i32 2
ret float %tmp
}
@@ -41,7 +41,7 @@ define void @t3(i32* %R, <4 x i32>* %P1) nounwind {
; CHECK-NEXT: movl %ecx, (%eax)
; CHECK-NEXT: retl
- %X = load <4 x i32>* %P1
+ %X = load <4 x i32>, <4 x i32>* %P1
%tmp = extractelement <4 x i32> %X, i32 3
store i32 %tmp, i32* %R
ret void
@@ -54,7 +54,7 @@ define i32 @t4(<4 x i32>* %P1) nounwind {
; CHECK-NEXT: movl 12(%eax), %eax
; CHECK-NEXT: retl
- %X = load <4 x i32>* %P1
+ %X = load <4 x i32>, <4 x i32>* %P1
%tmp = extractelement <4 x i32> %X, i32 3
ret i32 %tmp
}
diff --git a/llvm/test/CodeGen/X86/vec_extract.ll b/llvm/test/CodeGen/X86/vec_extract.ll
index 6df7be7a087..3b478880590 100644
--- a/llvm/test/CodeGen/X86/vec_extract.ll
+++ b/llvm/test/CodeGen/X86/vec_extract.ll
@@ -12,7 +12,7 @@ define void @test1(<4 x float>* %F, float* %f) nounwind {
; CHECK-NEXT: movss %xmm0, (%eax)
; CHECK-NEXT: retl
entry:
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp7 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
%tmp2 = extractelement <4 x float> %tmp7, i32 0 ; <float> [#uses=1]
store float %tmp2, float* %f
@@ -32,7 +32,7 @@ define float @test2(<4 x float>* %F, float* %f) nounwind {
; CHECK-NEXT: popl %eax
; CHECK-NEXT: retl
entry:
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp7 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
%tmp2 = extractelement <4 x float> %tmp7, i32 2 ; <float> [#uses=1]
ret float %tmp2
@@ -47,7 +47,7 @@ define void @test3(float* %R, <4 x float>* %P1) nounwind {
; CHECK-NEXT: movss %xmm0, (%eax)
; CHECK-NEXT: retl
entry:
- %X = load <4 x float>* %P1 ; <<4 x float>> [#uses=1]
+ %X = load <4 x float>, <4 x float>* %P1 ; <<4 x float>> [#uses=1]
%tmp = extractelement <4 x float> %X, i32 3 ; <float> [#uses=1]
store float %tmp, float* %R
ret void
diff --git a/llvm/test/CodeGen/X86/vec_fpext.ll b/llvm/test/CodeGen/X86/vec_fpext.ll
index b882a5e272b..8488757c846 100644
--- a/llvm/test/CodeGen/X86/vec_fpext.ll
+++ b/llvm/test/CodeGen/X86/vec_fpext.ll
@@ -8,7 +8,7 @@ define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
entry:
; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
; AVX: vcvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
- %0 = load <2 x float>* %in, align 8
+ %0 = load <2 x float>, <2 x float>* %in, align 8
%1 = fpext <2 x float> %0 to <2 x double>
store <2 x double> %1, <2 x double>* %out, align 1
ret void
@@ -21,7 +21,7 @@ entry:
; CHECK: cvtps2pd (%{{.+}}), %xmm{{[0-9]+}}
; CHECK: cvtps2pd 8(%{{.+}}), %xmm{{[0-9]+}}
; AVX: vcvtps2pd (%{{.+}}), %ymm{{[0-9]+}}
- %0 = load <4 x float>* %in
+ %0 = load <4 x float>, <4 x float>* %in
%1 = fpext <4 x float> %0 to <4 x double>
store <4 x double> %1, <4 x double>* %out, align 1
ret void
@@ -37,7 +37,7 @@ entry:
; CHECK: cvtps2pd 24(%{{.+}}), %xmm{{[0-9]+}}
; AVX: vcvtps2pd 16(%{{.+}}), %ymm{{[0-9]+}}
; AVX: vcvtps2pd (%{{.+}}), %ymm{{[0-9]+}}
- %0 = load <8 x float>* %in
+ %0 = load <8 x float>, <8 x float>* %in
%1 = fpext <8 x float> %0 to <8 x double>
store <8 x double> %1, <8 x double>* %out, align 1
ret void
diff --git a/llvm/test/CodeGen/X86/vec_i64.ll b/llvm/test/CodeGen/X86/vec_i64.ll
index 462e16e1302..48ca1ff021d 100644
--- a/llvm/test/CodeGen/X86/vec_i64.ll
+++ b/llvm/test/CodeGen/X86/vec_i64.ll
@@ -5,7 +5,7 @@
define <2 x i64> @foo1(i64* %y) nounwind {
entry:
- %tmp1 = load i64* %y, align 8 ; <i64> [#uses=1]
+ %tmp1 = load i64, i64* %y, align 8 ; <i64> [#uses=1]
%s2v = insertelement <2 x i64> undef, i64 %tmp1, i32 0
%loadl = shufflevector <2 x i64> zeroinitializer, <2 x i64> %s2v, <2 x i32> <i32 2, i32 1>
ret <2 x i64> %loadl
@@ -14,7 +14,7 @@ entry:
define <4 x float> @foo2(i64* %p) nounwind {
entry:
- %load = load i64* %p
+ %load = load i64, i64* %p
%s2v = insertelement <2 x i64> undef, i64 %load, i32 0
%loadl = shufflevector <2 x i64> zeroinitializer, <2 x i64> %s2v, <2 x i32> <i32 2, i32 1>
%0 = bitcast <2 x i64> %loadl to <4 x float>
diff --git a/llvm/test/CodeGen/X86/vec_ins_extract.ll b/llvm/test/CodeGen/X86/vec_ins_extract.ll
index c65a9487f14..e92f46dbabb 100644
--- a/llvm/test/CodeGen/X86/vec_ins_extract.ll
+++ b/llvm/test/CodeGen/X86/vec_ins_extract.ll
@@ -7,7 +7,7 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
define void @test(<4 x float>* %F, float %f) {
entry:
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
%tmp10 = insertelement <4 x float> %tmp3, float %f, i32 0 ; <<4 x float>> [#uses=2]
%tmp6 = fadd <4 x float> %tmp10, %tmp10 ; <<4 x float>> [#uses=1]
@@ -18,12 +18,12 @@ entry:
define void @test2(<4 x float>* %F, float %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3]
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%tmp.upgrd.1 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
store float %f, float* %tmp.upgrd.1
- %tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
+ %tmp4 = load <4 x float>, <4 x float>* %G ; <<4 x float>> [#uses=2]
%tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
store <4 x float> %tmp6, <4 x float>* %F
ret void
@@ -32,18 +32,18 @@ entry:
define void @test3(<4 x float>* %F, float* %f) {
entry:
%G = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2]
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
%tmp.upgrd.2 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
- %tmp.upgrd.3 = load float* %tmp.upgrd.2 ; <float> [#uses=1]
+ %tmp.upgrd.3 = load float, float* %tmp.upgrd.2 ; <float> [#uses=1]
store float %tmp.upgrd.3, float* %f
ret void
}
define void @test4(<4 x float>* %F, float* %f) {
entry:
- %tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
+ %tmp = load <4 x float>, <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp5.lhs = extractelement <4 x float> %tmp, i32 0 ; <float> [#uses=1]
%tmp5.rhs = extractelement <4 x float> %tmp, i32 0 ; <float> [#uses=1]
%tmp5 = fadd float %tmp5.lhs, %tmp5.rhs ; <float> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/vec_insert-5.ll b/llvm/test/CodeGen/X86/vec_insert-5.ll
index b77a1b5f493..0f8951529dd 100644
--- a/llvm/test/CodeGen/X86/vec_insert-5.ll
+++ b/llvm/test/CodeGen/X86/vec_insert-5.ll
@@ -28,7 +28,7 @@ define <4 x float> @t2(<4 x float>* %P) nounwind {
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; CHECK-NEXT: retl
- %tmp1 = load <4 x float>* %P
+ %tmp1 = load <4 x float>, <4 x float>* %P
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 4, i32 4, i32 4, i32 0 >
ret <4 x float> %tmp2
}
@@ -41,7 +41,7 @@ define <4 x float> @t3(<4 x float>* %P) nounwind {
; CHECK-NEXT: xorpd %xmm1, %xmm1
; CHECK-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; CHECK-NEXT: retl
- %tmp1 = load <4 x float>* %P
+ %tmp1 = load <4 x float>, <4 x float>* %P
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> zeroinitializer, <4 x i32> < i32 2, i32 3, i32 4, i32 4 >
ret <4 x float> %tmp2
}
@@ -55,7 +55,7 @@ define <4 x float> @t4(<4 x float>* %P) nounwind {
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,0],xmm1[1,0]
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,3]
; CHECK-NEXT: retl
- %tmp1 = load <4 x float>* %P
+ %tmp1 = load <4 x float>, <4 x float>* %P
%tmp2 = shufflevector <4 x float> zeroinitializer, <4 x float> %tmp1, <4 x i32> < i32 7, i32 0, i32 0, i32 0 >
ret <4 x float> %tmp2
}
diff --git a/llvm/test/CodeGen/X86/vec_insert-mmx.ll b/llvm/test/CodeGen/X86/vec_insert-mmx.ll
index d397d80e8be..447f97ab8e2 100644
--- a/llvm/test/CodeGen/X86/vec_insert-mmx.ll
+++ b/llvm/test/CodeGen/X86/vec_insert-mmx.ll
@@ -50,8 +50,8 @@ define void @t3() {
; X86-64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; X86-64-NEXT: movq %xmm0
; X86-64-NEXT: retq
- load i16* @g0
- load <4 x i16>* @g1
+ load i16, i16* @g0
+ load <4 x i16>, <4 x i16>* @g1
insertelement <4 x i16> %2, i16 %1, i32 0
store <4 x i16> %3, <4 x i16>* @g1
ret void
diff --git a/llvm/test/CodeGen/X86/vec_loadsingles.ll b/llvm/test/CodeGen/X86/vec_loadsingles.ll
index 312b277c4d0..ecae5d96282 100644
--- a/llvm/test/CodeGen/X86/vec_loadsingles.ll
+++ b/llvm/test/CodeGen/X86/vec_loadsingles.ll
@@ -2,10 +2,10 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+slow-unaligned-mem-32 | FileCheck %s --check-prefix=ALL --check-prefix=SLOW32
define <4 x float> @merge_2_floats(float* nocapture %p) nounwind readonly {
- %tmp1 = load float* %p
+ %tmp1 = load float, float* %p
%vecins = insertelement <4 x float> undef, float %tmp1, i32 0
%add.ptr = getelementptr float, float* %p, i32 1
- %tmp5 = load float* %add.ptr
+ %tmp5 = load float, float* %add.ptr
%vecins7 = insertelement <4 x float> %vecins, float %tmp5, i32 1
ret <4 x float> %vecins7
@@ -17,13 +17,13 @@ define <4 x float> @merge_2_floats(float* nocapture %p) nounwind readonly {
; Test-case generated due to a crash when trying to treat loading the first
; two i64s of a <4 x i64> as a load of two i32s.
define <4 x i64> @merge_2_floats_into_4() {
- %1 = load i64** undef, align 8
+ %1 = load i64*, i64** undef, align 8
%2 = getelementptr inbounds i64, i64* %1, i64 0
- %3 = load i64* %2
+ %3 = load i64, i64* %2
%4 = insertelement <4 x i64> undef, i64 %3, i32 0
- %5 = load i64** undef, align 8
+ %5 = load i64*, i64** undef, align 8
%6 = getelementptr inbounds i64, i64* %5, i64 1
- %7 = load i64* %6
+ %7 = load i64, i64* %6
%8 = insertelement <4 x i64> %4, i64 %7, i32 1
%9 = shufflevector <4 x i64> %8, <4 x i64> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
ret <4 x i64> %9
@@ -34,16 +34,16 @@ define <4 x i64> @merge_2_floats_into_4() {
}
define <4 x float> @merge_4_floats(float* %ptr) {
- %a = load float* %ptr, align 8
+ %a = load float, float* %ptr, align 8
%vec = insertelement <4 x float> undef, float %a, i32 0
%idx1 = getelementptr inbounds float, float* %ptr, i64 1
- %b = load float* %idx1, align 8
+ %b = load float, float* %idx1, align 8
%vec2 = insertelement <4 x float> %vec, float %b, i32 1
%idx3 = getelementptr inbounds float, float* %ptr, i64 2
- %c = load float* %idx3, align 8
+ %c = load float, float* %idx3, align 8
%vec4 = insertelement <4 x float> %vec2, float %c, i32 2
%idx5 = getelementptr inbounds float, float* %ptr, i64 3
- %d = load float* %idx5, align 8
+ %d = load float, float* %idx5, align 8
%vec6 = insertelement <4 x float> %vec4, float %d, i32 3
ret <4 x float> %vec6
@@ -58,28 +58,28 @@ define <4 x float> @merge_4_floats(float* %ptr) {
; 16-byte loads.
define <8 x float> @merge_8_floats(float* %ptr) {
- %a = load float* %ptr, align 4
+ %a = load float, float* %ptr, align 4
%vec = insertelement <8 x float> undef, float %a, i32 0
%idx1 = getelementptr inbounds float, float* %ptr, i64 1
- %b = load float* %idx1, align 4
+ %b = load float, float* %idx1, align 4
%vec2 = insertelement <8 x float> %vec, float %b, i32 1
%idx3 = getelementptr inbounds float, float* %ptr, i64 2
- %c = load float* %idx3, align 4
+ %c = load float, float* %idx3, align 4
%vec4 = insertelement <8 x float> %vec2, float %c, i32 2
%idx5 = getelementptr inbounds float, float* %ptr, i64 3
- %d = load float* %idx5, align 4
+ %d = load float, float* %idx5, align 4
%vec6 = insertelement <8 x float> %vec4, float %d, i32 3
%idx7 = getelementptr inbounds float, float* %ptr, i64 4
- %e = load float* %idx7, align 4
+ %e = load float, float* %idx7, align 4
%vec8 = insertelement <8 x float> %vec6, float %e, i32 4
%idx9 = getelementptr inbounds float, float* %ptr, i64 5
- %f = load float* %idx9, align 4
+ %f = load float, float* %idx9, align 4
%vec10 = insertelement <8 x float> %vec8, float %f, i32 5
%idx11 = getelementptr inbounds float, float* %ptr, i64 6
- %g = load float* %idx11, align 4
+ %g = load float, float* %idx11, align 4
%vec12 = insertelement <8 x float> %vec10, float %g, i32 6
%idx13 = getelementptr inbounds float, float* %ptr, i64 7
- %h = load float* %idx13, align 4
+ %h = load float, float* %idx13, align 4
%vec14 = insertelement <8 x float> %vec12, float %h, i32 7
ret <8 x float> %vec14
@@ -94,16 +94,16 @@ define <8 x float> @merge_8_floats(float* %ptr) {
}
define <4 x double> @merge_4_doubles(double* %ptr) {
- %a = load double* %ptr, align 8
+ %a = load double, double* %ptr, align 8
%vec = insertelement <4 x double> undef, double %a, i32 0
%idx1 = getelementptr inbounds double, double* %ptr, i64 1
- %b = load double* %idx1, align 8
+ %b = load double, double* %idx1, align 8
%vec2 = insertelement <4 x double> %vec, double %b, i32 1
%idx3 = getelementptr inbounds double, double* %ptr, i64 2
- %c = load double* %idx3, align 8
+ %c = load double, double* %idx3, align 8
%vec4 = insertelement <4 x double> %vec2, double %c, i32 2
%idx5 = getelementptr inbounds double, double* %ptr, i64 3
- %d = load double* %idx5, align 8
+ %d = load double, double* %idx5, align 8
%vec6 = insertelement <4 x double> %vec4, double %d, i32 3
ret <4 x double> %vec6
@@ -124,10 +124,10 @@ define <4 x double> @merge_4_doubles_offset(double* %ptr) {
%arrayidx5 = getelementptr inbounds double, double* %ptr, i64 5
%arrayidx6 = getelementptr inbounds double, double* %ptr, i64 6
%arrayidx7 = getelementptr inbounds double, double* %ptr, i64 7
- %e = load double* %arrayidx4, align 8
- %f = load double* %arrayidx5, align 8
- %g = load double* %arrayidx6, align 8
- %h = load double* %arrayidx7, align 8
+ %e = load double, double* %arrayidx4, align 8
+ %f = load double, double* %arrayidx5, align 8
+ %g = load double, double* %arrayidx6, align 8
+ %h = load double, double* %arrayidx7, align 8
%vecinit4 = insertelement <4 x double> undef, double %e, i32 0
%vecinit5 = insertelement <4 x double> %vecinit4, double %f, i32 1
%vecinit6 = insertelement <4 x double> %vecinit5, double %g, i32 2
diff --git a/llvm/test/CodeGen/X86/vec_logical.ll b/llvm/test/CodeGen/X86/vec_logical.ll
index 1dc0b163aeb..6ab2d8963ab 100644
--- a/llvm/test/CodeGen/X86/vec_logical.ll
+++ b/llvm/test/CodeGen/X86/vec_logical.ll
@@ -29,7 +29,7 @@ entry:
define void @t3(<4 x float> %a, <4 x float> %b, <4 x float>* %c, <4 x float>* %d) {
entry:
- %tmp3 = load <4 x float>* %c ; <<4 x float>> [#uses=1]
+ %tmp3 = load <4 x float>, <4 x float>* %c ; <<4 x float>> [#uses=1]
%tmp11 = bitcast <4 x float> %a to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp12 = bitcast <4 x float> %b to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp13 = xor <4 x i32> %tmp11, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/vec_set-7.ll b/llvm/test/CodeGen/X86/vec_set-7.ll
index d993178a989..1701e491da6 100644
--- a/llvm/test/CodeGen/X86/vec_set-7.ll
+++ b/llvm/test/CodeGen/X86/vec_set-7.ll
@@ -2,7 +2,7 @@
define <2 x i64> @test(<2 x i64>* %p) nounwind {
%tmp = bitcast <2 x i64>* %p to double*
- %tmp.upgrd.1 = load double* %tmp
+ %tmp.upgrd.1 = load double, double* %tmp
%tmp.upgrd.2 = insertelement <2 x double> undef, double %tmp.upgrd.1, i32 0
%tmp5 = insertelement <2 x double> %tmp.upgrd.2, double 0.0, i32 1
%tmp.upgrd.3 = bitcast <2 x double> %tmp5 to <2 x i64>
diff --git a/llvm/test/CodeGen/X86/vec_set-F.ll b/llvm/test/CodeGen/X86/vec_set-F.ll
index 6dd3cb0abeb..aa17f9bfbf5 100644
--- a/llvm/test/CodeGen/X86/vec_set-F.ll
+++ b/llvm/test/CodeGen/X86/vec_set-F.ll
@@ -4,7 +4,7 @@
define <2 x i64> @t1(<2 x i64>* %ptr) nounwind {
%tmp45 = bitcast <2 x i64>* %ptr to <2 x i32>*
- %tmp615 = load <2 x i32>* %tmp45
+ %tmp615 = load <2 x i32>, <2 x i32>* %tmp45
%tmp7 = bitcast <2 x i32> %tmp615 to i64
%tmp8 = insertelement <2 x i64> zeroinitializer, i64 %tmp7, i32 0
ret <2 x i64> %tmp8
diff --git a/llvm/test/CodeGen/X86/vec_setcc-2.ll b/llvm/test/CodeGen/X86/vec_setcc-2.ll
index e2d02c803e1..e150882642a 100644
--- a/llvm/test/CodeGen/X86/vec_setcc-2.ll
+++ b/llvm/test/CodeGen/X86/vec_setcc-2.ll
@@ -26,7 +26,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %in, i64 %indvars.iv
- %arrayidx1.val = load <2 x i64>* %arrayidx1, align 16
+ %arrayidx1.val = load <2 x i64>, <2 x i64>* %arrayidx1, align 16
%0 = bitcast <2 x i64> %arrayidx1.val to <8 x i16>
%cmp.i.i = icmp ult <8 x i16> %0, <i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26>
%sext.i.i = sext <8 x i1> %cmp.i.i to <8 x i16>
@@ -55,7 +55,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %in, i64 %indvars.iv
- %arrayidx1.val = load <2 x i64>* %arrayidx1, align 16
+ %arrayidx1.val = load <2 x i64>, <2 x i64>* %arrayidx1, align 16
%0 = bitcast <2 x i64> %arrayidx1.val to <8 x i16>
%cmp.i.i = icmp ult <8 x i16> %0, <i16 0, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26>
%sext.i.i = sext <8 x i1> %cmp.i.i to <8 x i16>
diff --git a/llvm/test/CodeGen/X86/vec_ss_load_fold.ll b/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
index 80f12a2dec2..ab5031e267d 100644
--- a/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
+++ b/llvm/test/CodeGen/X86/vec_ss_load_fold.ll
@@ -51,7 +51,7 @@ declare <4 x float> @llvm.x86.sse41.round.ss(<4 x float>, <4 x float>, i32)
declare <4 x float> @f()
define <4 x float> @test3(<4 x float> %A, float *%b, i32 %C) nounwind {
- %a = load float *%b
+ %a = load float , float *%b
%B = insertelement <4 x float> undef, float %a, i32 0
%X = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %A, <4 x float> %B, i32 4)
ret <4 x float> %X
@@ -60,7 +60,7 @@ define <4 x float> @test3(<4 x float> %A, float *%b, i32 %C) nounwind {
}
define <4 x float> @test4(<4 x float> %A, float *%b, i32 %C) nounwind {
- %a = load float *%b
+ %a = load float , float *%b
%B = insertelement <4 x float> undef, float %a, i32 0
%q = call <4 x float> @f()
%X = call <4 x float> @llvm.x86.sse41.round.ss(<4 x float> %q, <4 x float> %B, i32 4)
diff --git a/llvm/test/CodeGen/X86/vec_trunc_sext.ll b/llvm/test/CodeGen/X86/vec_trunc_sext.ll
index 3c446bba4ea..dcfe423eb74 100644
--- a/llvm/test/CodeGen/X86/vec_trunc_sext.ll
+++ b/llvm/test/CodeGen/X86/vec_trunc_sext.ll
@@ -9,7 +9,7 @@
; but that is beyond our current codegen capabilities.
define <4 x i32> @trunc_sext(<4 x i16>* %in) {
- %load = load <4 x i16>* %in
+ %load = load <4 x i16>, <4 x i16>* %in
%trunc = trunc <4 x i16> %load to <4 x i8>
%sext = sext <4 x i8> %trunc to <4 x i32>
ret <4 x i32> %sext
diff --git a/llvm/test/CodeGen/X86/vec_zero.ll b/llvm/test/CodeGen/X86/vec_zero.ll
index c3ea0ad2023..1d900a0919f 100644
--- a/llvm/test/CodeGen/X86/vec_zero.ll
+++ b/llvm/test/CodeGen/X86/vec_zero.ll
@@ -3,7 +3,7 @@
; CHECK: foo
; CHECK: xorps
define void @foo(<4 x float>* %P) {
- %T = load <4 x float>* %P ; <<4 x float>> [#uses=1]
+ %T = load <4 x float>, <4 x float>* %P ; <<4 x float>> [#uses=1]
%S = fadd <4 x float> zeroinitializer, %T ; <<4 x float>> [#uses=1]
store <4 x float> %S, <4 x float>* %P
ret void
@@ -12,7 +12,7 @@ define void @foo(<4 x float>* %P) {
; CHECK: bar
; CHECK: pxor
define void @bar(<4 x i32>* %P) {
- %T = load <4 x i32>* %P ; <<4 x i32>> [#uses=1]
+ %T = load <4 x i32>, <4 x i32>* %P ; <<4 x i32>> [#uses=1]
%S = sub <4 x i32> zeroinitializer, %T ; <<4 x i32>> [#uses=1]
store <4 x i32> %S, <4 x i32>* %P
ret void
diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll
index fcf985dea14..ce98e6759b6 100644
--- a/llvm/test/CodeGen/X86/vector-gep.ll
+++ b/llvm/test/CodeGen/X86/vector-gep.ll
@@ -26,7 +26,7 @@ entry:
;CHECK-NEXT: movl
%A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
%k = extractelement <4 x i32*> %A2, i32 3
- %v = load i32* %k
+ %v = load i32, i32* %k
ret i32 %v
;CHECK: ret
}
@@ -39,7 +39,7 @@ entry:
;CHECK-NEXT: vpadd
%A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> %off
%k = extractelement <4 x i32*> %A2, i32 3
- %v = load i32* %k
+ %v = load i32, i32* %k
ret i32 %v
;CHECK: ret
}
diff --git a/llvm/test/CodeGen/X86/vector-intrinsics.ll b/llvm/test/CodeGen/X86/vector-intrinsics.ll
index cabacb572ce..c140468d300 100644
--- a/llvm/test/CodeGen/X86/vector-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-intrinsics.ll
@@ -32,20 +32,20 @@ declare <9 x double> @llvm.pow.v9f64(<9 x double> %a, <9 x double> %b)
declare <9 x double> @llvm.powi.v9f64(<9 x double> %a, i32)
define void @a(<9 x double>* %p) nounwind {
- %a = load <9 x double>* %p
+ %a = load <9 x double>, <9 x double>* %p
%r = call <9 x double> @llvm.exp.v9f64(<9 x double> %a)
store <9 x double> %r, <9 x double>* %p
ret void
}
define void @b(<9 x double>* %p, <9 x double>* %q) nounwind {
- %a = load <9 x double>* %p
- %b = load <9 x double>* %q
+ %a = load <9 x double>, <9 x double>* %p
+ %b = load <9 x double>, <9 x double>* %q
%r = call <9 x double> @llvm.pow.v9f64(<9 x double> %a, <9 x double> %b)
store <9 x double> %r, <9 x double>* %p
ret void
}
define void @c(<9 x double>* %p, i32 %n) nounwind {
- %a = load <9 x double>* %p
+ %a = load <9 x double>, <9 x double>* %p
%r = call <9 x double> @llvm.powi.v9f64(<9 x double> %a, i32 %n)
store <9 x double> %r, <9 x double>* %p
ret void
diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index 962d0381cd2..c4273710a91 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -206,7 +206,7 @@ define <4 x i32> @load_sext_test1(<4 x i16> *%ptr) {
; X32-SSE41-NEXT: pmovsxwd (%eax), %xmm0
; X32-SSE41-NEXT: retl
entry:
- %X = load <4 x i16>* %ptr
+ %X = load <4 x i16>, <4 x i16>* %ptr
%Y = sext <4 x i16> %X to <4 x i32>
ret <4 x i32>%Y
}
@@ -244,7 +244,7 @@ define <4 x i32> @load_sext_test2(<4 x i8> *%ptr) {
; X32-SSE41-NEXT: pmovsxbd (%eax), %xmm0
; X32-SSE41-NEXT: retl
entry:
- %X = load <4 x i8>* %ptr
+ %X = load <4 x i8>, <4 x i8>* %ptr
%Y = sext <4 x i8> %X to <4 x i32>
ret <4 x i32>%Y
}
@@ -284,7 +284,7 @@ define <2 x i64> @load_sext_test3(<2 x i8> *%ptr) {
; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
; X32-SSE41-NEXT: retl
entry:
- %X = load <2 x i8>* %ptr
+ %X = load <2 x i8>, <2 x i8>* %ptr
%Y = sext <2 x i8> %X to <2 x i64>
ret <2 x i64>%Y
}
@@ -324,7 +324,7 @@ define <2 x i64> @load_sext_test4(<2 x i16> *%ptr) {
; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
; X32-SSE41-NEXT: retl
entry:
- %X = load <2 x i16>* %ptr
+ %X = load <2 x i16>, <2 x i16>* %ptr
%Y = sext <2 x i16> %X to <2 x i64>
ret <2 x i64>%Y
}
@@ -364,7 +364,7 @@ define <2 x i64> @load_sext_test5(<2 x i32> *%ptr) {
; X32-SSE41-NEXT: pmovsxdq (%eax), %xmm0
; X32-SSE41-NEXT: retl
entry:
- %X = load <2 x i32>* %ptr
+ %X = load <2 x i32>, <2 x i32>* %ptr
%Y = sext <2 x i32> %X to <2 x i64>
ret <2 x i64>%Y
}
@@ -400,7 +400,7 @@ define <8 x i16> @load_sext_test6(<8 x i8> *%ptr) {
; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
; X32-SSE41-NEXT: retl
entry:
- %X = load <8 x i8>* %ptr
+ %X = load <8 x i8>, <8 x i8>* %ptr
%Y = sext <8 x i8> %X to <8 x i16>
ret <8 x i16>%Y
}
@@ -566,7 +566,7 @@ define <16 x i16> @sext_16i8_to_16i16(<16 x i8> *%ptr) {
; X32-SSE41-NEXT: pmovsxbw 8(%eax), %xmm1
; X32-SSE41-NEXT: retl
entry:
- %X = load <16 x i8>* %ptr
+ %X = load <16 x i8>, <16 x i8>* %ptr
%Y = sext <16 x i8> %X to <16 x i16>
ret <16 x i16> %Y
}
@@ -742,7 +742,7 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm1
; X32-SSE41-NEXT: retl
entry:
- %X = load <4 x i8>* %ptr
+ %X = load <4 x i8>, <4 x i8>* %ptr
%Y = sext <4 x i8> %X to <4 x i64>
ret <4 x i64>%Y
}
@@ -803,7 +803,7 @@ define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
; X32-SSE41-NEXT: pmovsxwq 4(%eax), %xmm1
; X32-SSE41-NEXT: retl
entry:
- %X = load <4 x i16>* %ptr
+ %X = load <4 x i16>, <4 x i16>* %ptr
%Y = sext <4 x i16> %X to <4 x i64>
ret <4 x i64>%Y
}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll
index 72148038f50..30eceacb734 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll
@@ -835,7 +835,7 @@ define <2 x i64> @insert_mem_and_zero_v2i64(i64* %ptr) {
; AVX: # BB#0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
- %a = load i64* %ptr
+ %a = load i64, i64* %ptr
%v = insertelement <2 x i64> undef, i64 %a, i32 0
%shuffle = shufflevector <2 x i64> %v, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 3>
ret <2 x i64> %shuffle
@@ -866,7 +866,7 @@ define <2 x double> @insert_mem_and_zero_v2f64(double* %ptr) {
; AVX: # BB#0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
- %a = load double* %ptr
+ %a = load double, double* %ptr
%v = insertelement <2 x double> undef, double %a, i32 0
%shuffle = shufflevector <2 x double> %v, <2 x double> zeroinitializer, <2 x i32> <i32 0, i32 3>
ret <2 x double> %shuffle
@@ -946,7 +946,7 @@ define <2 x i64> @insert_mem_lo_v2i64(i64* %ptr, <2 x i64> %b) {
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
- %a = load i64* %ptr
+ %a = load i64, i64* %ptr
%v = insertelement <2 x i64> undef, i64 %a, i32 0
%shuffle = shufflevector <2 x i64> %v, <2 x i64> %b, <2 x i32> <i32 0, i32 3>
ret <2 x i64> %shuffle
@@ -981,7 +981,7 @@ define <2 x i64> @insert_mem_hi_v2i64(i64* %ptr, <2 x i64> %b) {
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
- %a = load i64* %ptr
+ %a = load i64, i64* %ptr
%v = insertelement <2 x i64> undef, i64 %a, i32 0
%shuffle = shufflevector <2 x i64> %v, <2 x i64> %b, <2 x i32> <i32 2, i32 0>
ret <2 x i64> %shuffle
@@ -1013,7 +1013,7 @@ define <2 x double> @insert_mem_lo_v2f64(double* %ptr, <2 x double> %b) {
; AVX: # BB#0:
; AVX-NEXT: vmovlpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
- %a = load double* %ptr
+ %a = load double, double* %ptr
%v = insertelement <2 x double> undef, double %a, i32 0
%shuffle = shufflevector <2 x double> %v, <2 x double> %b, <2 x i32> <i32 0, i32 3>
ret <2 x double> %shuffle
@@ -1045,7 +1045,7 @@ define <2 x double> @insert_mem_hi_v2f64(double* %ptr, <2 x double> %b) {
; AVX: # BB#0:
; AVX-NEXT: vmovhpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
- %a = load double* %ptr
+ %a = load double, double* %ptr
%v = insertelement <2 x double> undef, double %a, i32 0
%shuffle = shufflevector <2 x double> %v, <2 x double> %b, <2 x i32> <i32 2, i32 0>
ret <2 x double> %shuffle
@@ -1108,7 +1108,7 @@ define <2 x double> @insert_dup_mem_v2f64(double* %ptr) {
; AVX: # BB#0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
- %a = load double* %ptr
+ %a = load double, double* %ptr
%v = insertelement <2 x double> undef, double %a, i32 0
%shuffle = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 0, i32 0>
ret <2 x double> %shuffle
@@ -1125,7 +1125,7 @@ define <2 x double> @shuffle_mem_v2f64_10(<2 x double>* %ptr) {
; AVX: # BB#0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = mem[1,0]
; AVX-NEXT: retq
- %a = load <2 x double>* %ptr
+ %a = load <2 x double>, <2 x double>* %ptr
%shuffle = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 1, i32 0>
ret <2 x double> %shuffle
}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
index a684e5ed51d..2021905a223 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -1599,7 +1599,7 @@ define <4 x i32> @insert_mem_and_zero_v4i32(i32* %ptr) {
; AVX: # BB#0:
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
- %a = load i32* %ptr
+ %a = load i32, i32* %ptr
%v = insertelement <4 x i32> undef, i32 %a, i32 0
%shuffle = shufflevector <4 x i32> %v, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x i32> %shuffle
@@ -1653,7 +1653,7 @@ define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
; AVX: # BB#0:
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
- %a = load float* %ptr
+ %a = load float, float* %ptr
%v = insertelement <4 x float> undef, float %a, i32 0
%shuffle = shufflevector <4 x float> %v, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x float> %shuffle
@@ -1734,7 +1734,7 @@ define <4 x i32> @insert_mem_lo_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
; AVX2-NEXT: retq
- %a = load <2 x i32>* %ptr
+ %a = load <2 x i32>, <2 x i32>* %ptr
%v = shufflevector <2 x i32> %a, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%shuffle = shufflevector <4 x i32> %v, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
ret <4 x i32> %shuffle
@@ -1770,7 +1770,7 @@ define <4 x i32> @insert_mem_hi_v4i32(<2 x i32>* %ptr, <4 x i32> %b) {
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
- %a = load <2 x i32>* %ptr
+ %a = load <2 x i32>, <2 x i32>* %ptr
%v = shufflevector <2 x i32> %a, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%shuffle = shufflevector <4 x i32> %v, <4 x i32> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
ret <4 x i32> %shuffle
@@ -1803,7 +1803,7 @@ define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; AVX: # BB#0:
; AVX-NEXT: vmovlpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
- %a = load <2 x float>* %ptr
+ %a = load <2 x float>, <2 x float>* %ptr
%v = shufflevector <2 x float> %a, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%shuffle = shufflevector <4 x float> %v, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
ret <4 x float> %shuffle
@@ -1836,7 +1836,7 @@ define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; AVX: # BB#0:
; AVX-NEXT: vmovhpd (%rdi), %xmm0, %xmm0
; AVX-NEXT: retq
- %a = load <2 x float>* %ptr
+ %a = load <2 x float>, <2 x float>* %ptr
%v = shufflevector <2 x float> %a, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%shuffle = shufflevector <4 x float> %v, <4 x float> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
ret <4 x float> %shuffle
@@ -1853,7 +1853,7 @@ define <4 x float> @shuffle_mem_v4f32_3210(<4 x float>* %ptr) {
; AVX: # BB#0:
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,2,1,0]
; AVX-NEXT: retq
- %a = load <4 x float>* %ptr
+ %a = load <4 x float>, <4 x float>* %ptr
%shuffle = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x float> %shuffle
}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 3d6ada6c75e..0ac9a2b62eb 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -842,7 +842,7 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
; AVX2-NEXT: retq
- %a = load i64* %ptr
+ %a = load i64, i64* %ptr
%v = insertelement <4 x i64> undef, i64 %a, i64 0
%shuffle = shufflevector <4 x i64> %v, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x i64> %shuffle
@@ -864,7 +864,7 @@ define <4 x double> @insert_mem_and_zero_v4f64(double* %ptr) {
; ALL: # BB#0:
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
- %a = load double* %ptr
+ %a = load double, double* %ptr
%v = insertelement <4 x double> undef, double %a, i32 0
%shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x double> %shuffle
@@ -875,7 +875,7 @@ define <4 x double> @splat_mem_v4f64(double* %ptr) {
; ALL: # BB#0:
; ALL-NEXT: vbroadcastsd (%rdi), %ymm0
; ALL-NEXT: retq
- %a = load double* %ptr
+ %a = load double, double* %ptr
%v = insertelement <4 x double> undef, double %a, i32 0
%shuffle = shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
ret <4 x double> %shuffle
@@ -891,7 +891,7 @@ define <4 x i64> @splat_mem_v4i64(i64* %ptr) {
; AVX2: # BB#0:
; AVX2-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX2-NEXT: retq
- %a = load i64* %ptr
+ %a = load i64, i64* %ptr
%v = insertelement <4 x i64> undef, i64 %a, i64 0
%shuffle = shufflevector <4 x i64> %v, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
ret <4 x i64> %shuffle
@@ -902,7 +902,7 @@ define <4 x double> @splat_mem_v4f64_2(double* %p) {
; ALL: # BB#0:
; ALL-NEXT: vbroadcastsd (%rdi), %ymm0
; ALL-NEXT: retq
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = shufflevector <2 x double> %2, <2 x double> undef, <4 x i32> zeroinitializer
ret <4 x double> %3
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
index f4e9a3b69b3..624b3f2f003 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
@@ -1906,7 +1906,7 @@ define <8 x float> @splat_mem_v8f32_2(float* %p) {
; ALL: # BB#0:
; ALL-NEXT: vbroadcastss (%rdi), %ymm0
; ALL-NEXT: retq
- %1 = load float* %p
+ %1 = load float, float* %p
%2 = insertelement <4 x float> undef, float %1, i32 0
%3 = shufflevector <4 x float> %2, <4 x float> undef, <8 x i32> zeroinitializer
ret <8 x float> %3
@@ -2058,8 +2058,8 @@ define <8x float> @concat_v2f32_1(<2 x float>* %tmp64, <2 x float>* %tmp65) {
; ALL-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
- %tmp74 = load <2 x float>* %tmp65, align 8
- %tmp72 = load <2 x float>* %tmp64, align 8
+ %tmp74 = load <2 x float>, <2 x float>* %tmp65, align 8
+ %tmp72 = load <2 x float>, <2 x float>* %tmp64, align 8
%tmp73 = shufflevector <2 x float> %tmp72, <2 x float> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%tmp75 = shufflevector <2 x float> %tmp74, <2 x float> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%tmp76 = shufflevector <8 x float> %tmp73, <8 x float> %tmp75, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -2073,8 +2073,8 @@ define <8x float> @concat_v2f32_2(<2 x float>* %tmp64, <2 x float>* %tmp65) {
; ALL-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
- %tmp74 = load <2 x float>* %tmp65, align 8
- %tmp72 = load <2 x float>* %tmp64, align 8
+ %tmp74 = load <2 x float>, <2 x float>* %tmp65, align 8
+ %tmp72 = load <2 x float>, <2 x float>* %tmp64, align 8
%tmp76 = shufflevector <2 x float> %tmp72, <2 x float> %tmp74, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x float> %tmp76
}
@@ -2086,8 +2086,8 @@ define <8x float> @concat_v2f32_3(<2 x float>* %tmp64, <2 x float>* %tmp65) {
; ALL-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
; ALL-NEXT: retq
entry:
- %tmp74 = load <2 x float>* %tmp65, align 8
- %tmp72 = load <2 x float>* %tmp64, align 8
+ %tmp74 = load <2 x float>, <2 x float>* %tmp65, align 8
+ %tmp72 = load <2 x float>, <2 x float>* %tmp64, align 8
%tmp76 = shufflevector <2 x float> %tmp72, <2 x float> %tmp74, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%res = shufflevector <4 x float> %tmp76, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x float> %res
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index b99946f4610..92c59e2fca0 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -1791,8 +1791,8 @@ define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) {
; AVX-NEXT: vmovhpd (%rsi), %xmm0, %xmm0
; AVX-NEXT: retq
; Current AVX2 lowering of this is still awful, not adding a test case.
- %1 = load <2 x float>* %a, align 8
- %2 = load <2 x float>* %b, align 8
+ %1 = load <2 x float>, <2 x float>* %a, align 8
+ %2 = load <2 x float>, <2 x float>* %b, align 8
%3 = shufflevector <2 x float> %1, <2 x float> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x float> %3
}
@@ -1933,8 +1933,8 @@ define <4 x i8> @combine_test1c(<4 x i8>* %a, <4 x i8>* %b) {
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; AVX2-NEXT: retq
- %A = load <4 x i8>* %a
- %B = load <4 x i8>* %b
+ %A = load <4 x i8>, <4 x i8>* %a
+ %B = load <4 x i8>, <4 x i8>* %b
%1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
%2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
ret <4 x i8> %2
@@ -1976,8 +1976,8 @@ define <4 x i8> @combine_test2c(<4 x i8>* %a, <4 x i8>* %b) {
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-NEXT: retq
- %A = load <4 x i8>* %a
- %B = load <4 x i8>* %b
+ %A = load <4 x i8>, <4 x i8>* %a
+ %B = load <4 x i8>, <4 x i8>* %b
%1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 0, i32 5, i32 1, i32 5>
%2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 2, i32 4, i32 1>
ret <4 x i8> %2
@@ -2019,8 +2019,8 @@ define <4 x i8> @combine_test3c(<4 x i8>* %a, <4 x i8>* %b) {
; AVX-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm0[1]
; AVX-NEXT: retq
- %A = load <4 x i8>* %a
- %B = load <4 x i8>* %b
+ %A = load <4 x i8>, <4 x i8>* %a
+ %B = load <4 x i8>, <4 x i8>* %b
%1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 2, i32 3, i32 5, i32 5>
%2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 6, i32 7, i32 0, i32 1>
ret <4 x i8> %2
@@ -2071,8 +2071,8 @@ define <4 x i8> @combine_test4c(<4 x i8>* %a, <4 x i8>* %b) {
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
; AVX2-NEXT: retq
- %A = load <4 x i8>* %a
- %B = load <4 x i8>* %b
+ %A = load <4 x i8>, <4 x i8>* %a
+ %B = load <4 x i8>, <4 x i8>* %b
%1 = shufflevector <4 x i8> %A, <4 x i8> %B, <4 x i32> <i32 4, i32 1, i32 6, i32 3>
%2 = shufflevector <4 x i8> %1, <4 x i8> %B, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
ret <4 x i8> %2
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
index 19608bdf557..c585414d92d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-mmx.ll
@@ -19,7 +19,7 @@ define void @test0(<1 x i64>* %x) {
; X64-NEXT: movq %xmm0, (%rdi)
; X64-NEXT: retq
entry:
- %tmp2 = load <1 x i64>* %x
+ %tmp2 = load <1 x i64>, <1 x i64>* %x
%tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32>
%tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
%tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64>
@@ -97,7 +97,7 @@ define void @test2() nounwind {
; X64-NEXT: movq %xmm0, (%rax)
; X64-NEXT: retq
entry:
- %0 = load <2 x i32>* @tmp_V2i, align 8
+ %0 = load <2 x i32>, <2 x i32>* @tmp_V2i, align 8
%1 = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
store <2 x i32> %1, <2 x i32>* @tmp_V2i, align 8
ret void
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-sse1.ll b/llvm/test/CodeGen/X86/vector-shuffle-sse1.ll
index b4cb0ec1442..66e53bbb750 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-sse1.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-sse1.ll
@@ -176,7 +176,7 @@ define <4 x float> @insert_mem_and_zero_v4f32(float* %ptr) {
; SSE1: # BB#0:
; SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE1-NEXT: retq
- %a = load float* %ptr
+ %a = load float, float* %ptr
%v = insertelement <4 x float> undef, float %a, i32 0
%shuffle = shufflevector <4 x float> %v, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
ret <4 x float> %shuffle
@@ -197,7 +197,7 @@ define <4 x float> @insert_mem_lo_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3]
; SSE1-NEXT: movaps %xmm1, %xmm0
; SSE1-NEXT: retq
- %a = load <2 x float>* %ptr
+ %a = load <2 x float>, <2 x float>* %ptr
%v = shufflevector <2 x float> %a, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%shuffle = shufflevector <4 x float> %v, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
ret <4 x float> %shuffle
@@ -217,7 +217,7 @@ define <4 x float> @insert_mem_hi_v4f32(<2 x float>* %ptr, <4 x float> %b) {
; SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,1]
; SSE1-NEXT: retq
- %a = load <2 x float>* %ptr
+ %a = load <2 x float>, <2 x float>* %ptr
%v = shufflevector <2 x float> %a, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%shuffle = shufflevector <4 x float> %v, <4 x float> %b, <4 x i32> <i32 4, i32 5, i32 0, i32 1>
ret <4 x float> %shuffle
@@ -229,7 +229,7 @@ define <4 x float> @shuffle_mem_v4f32_3210(<4 x float>* %ptr) {
; SSE1-NEXT: movaps (%rdi), %xmm0
; SSE1-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,1,0]
; SSE1-NEXT: retq
- %a = load <4 x float>* %ptr
+ %a = load <4 x float>, <4 x float>* %ptr
%shuffle = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x float> %shuffle
}
diff --git a/llvm/test/CodeGen/X86/vector-variable-idx2.ll b/llvm/test/CodeGen/X86/vector-variable-idx2.ll
index 6e8ae2e42c9..df65257bac7 100644
--- a/llvm/test/CodeGen/X86/vector-variable-idx2.ll
+++ b/llvm/test/CodeGen/X86/vector-variable-idx2.ll
@@ -8,8 +8,8 @@ define i64 @__builtin_ia32_vec_ext_v2di(<2 x i64> %a, i32 %i) nounwind {
%2 = alloca i32, align 4
store <2 x i64> %a, <2 x i64>* %1, align 16
store i32 %i, i32* %2, align 4
- %3 = load <2 x i64>* %1, align 16
- %4 = load i32* %2, align 4
+ %3 = load <2 x i64>, <2 x i64>* %1, align 16
+ %4 = load i32, i32* %2, align 4
%5 = extractelement <2 x i64> %3, i32 %4
ret i64 %5
}
@@ -19,8 +19,8 @@ define <2 x i64> @__builtin_ia32_vec_int_v2di(<2 x i64> %a, i32 %i) nounwind {
%2 = alloca i32, align 4
store <2 x i64> %a, <2 x i64>* %1, align 16
store i32 %i, i32* %2, align 4
- %3 = load <2 x i64>* %1, align 16
- %4 = load i32* %2, align 4
+ %3 = load <2 x i64>, <2 x i64>* %1, align 16
+ %4 = load i32, i32* %2, align 4
%5 = insertelement <2 x i64> %3, i64 1, i32 %4
ret <2 x i64> %5
}
diff --git a/llvm/test/CodeGen/X86/vector-zext.ll b/llvm/test/CodeGen/X86/vector-zext.ll
index 568687dfd17..055c25679bc 100644
--- a/llvm/test/CodeGen/X86/vector-zext.ll
+++ b/llvm/test/CodeGen/X86/vector-zext.ll
@@ -236,7 +236,7 @@ define <16 x i16> @load_zext_16i8_to_16i16(<16 x i8> *%ptr) {
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX2-NEXT: retq
entry:
- %X = load <16 x i8>* %ptr
+ %X = load <16 x i8>, <16 x i8>* %ptr
%Y = zext <16 x i8> %X to <16 x i16>
ret <16 x i16> %Y
}
@@ -280,7 +280,7 @@ define <8 x i32> @load_zext_8i16_to_8i32(<8 x i16> *%ptr) {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: retq
entry:
- %X = load <8 x i16>* %ptr
+ %X = load <8 x i16>, <8 x i16>* %ptr
%Y = zext <8 x i16> %X to <8 x i32>
ret <8 x i32>%Y
}
@@ -324,7 +324,7 @@ define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; AVX2-NEXT: retq
entry:
- %X = load <4 x i32>* %ptr
+ %X = load <4 x i32>, <4 x i32>* %ptr
%Y = zext <4 x i32> %X to <4 x i64>
ret <4 x i64>%Y
}
diff --git a/llvm/test/CodeGen/X86/vector-zmov.ll b/llvm/test/CodeGen/X86/vector-zmov.ll
index 4de2543a1d6..cf592b1e9f4 100644
--- a/llvm/test/CodeGen/X86/vector-zmov.ll
+++ b/llvm/test/CodeGen/X86/vector-zmov.ll
@@ -15,7 +15,7 @@ define <4 x i32> @load_zmov_4i32_to_0zzz(<4 x i32> *%ptr) {
; AVX-NEXT: vmovd (%rdi), %xmm0
; AVX-NEXT: retq
entry:
- %X = load <4 x i32>* %ptr
+ %X = load <4 x i32>, <4 x i32>* %ptr
%Y = shufflevector <4 x i32> %X, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 4, i32 4, i32 4>
ret <4 x i32>%Y
}
@@ -31,7 +31,7 @@ define <2 x i64> @load_zmov_2i64_to_0z(<2 x i64> *%ptr) {
; AVX-NEXT: vmovq (%rdi), %xmm0
; AVX-NEXT: retq
entry:
- %X = load <2 x i64>* %ptr
+ %X = load <2 x i64>, <2 x i64>* %ptr
%Y = shufflevector <2 x i64> %X, <2 x i64> zeroinitializer, <2 x i32> <i32 0, i32 2>
ret <2 x i64>%Y
}
diff --git a/llvm/test/CodeGen/X86/vector.ll b/llvm/test/CodeGen/X86/vector.ll
index 82d20a23f35..39e7f0e0b06 100644
--- a/llvm/test/CodeGen/X86/vector.ll
+++ b/llvm/test/CodeGen/X86/vector.ll
@@ -13,48 +13,48 @@
;;; TEST HANDLING OF VARIOUS VECTOR SIZES
define void @test_f1(%f1* %P, %f1* %Q, %f1* %S) {
- %p = load %f1* %P ; <%f1> [#uses=1]
- %q = load %f1* %Q ; <%f1> [#uses=1]
+ %p = load %f1, %f1* %P ; <%f1> [#uses=1]
+ %q = load %f1, %f1* %Q ; <%f1> [#uses=1]
%R = fadd %f1 %p, %q ; <%f1> [#uses=1]
store %f1 %R, %f1* %S
ret void
}
define void @test_f2(%f2* %P, %f2* %Q, %f2* %S) {
- %p = load %f2* %P ; <%f2> [#uses=1]
- %q = load %f2* %Q ; <%f2> [#uses=1]
+ %p = load %f2, %f2* %P ; <%f2> [#uses=1]
+ %q = load %f2, %f2* %Q ; <%f2> [#uses=1]
%R = fadd %f2 %p, %q ; <%f2> [#uses=1]
store %f2 %R, %f2* %S
ret void
}
define void @test_f4(%f4* %P, %f4* %Q, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
+ %p = load %f4, %f4* %P ; <%f4> [#uses=1]
+ %q = load %f4, %f4* %Q ; <%f4> [#uses=1]
%R = fadd %f4 %p, %q ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
}
define void @test_f8(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
+ %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
%R = fadd %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
}
define void @test_fmul(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
+ %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
%R = fmul %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
}
define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
- %p = load %f8* %P ; <%f8> [#uses=1]
- %q = load %f8* %Q ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
+ %q = load %f8, %f8* %Q ; <%f8> [#uses=1]
%R = fdiv %f8 %p, %q ; <%f8> [#uses=1]
store %f8 %R, %f8* %S
ret void
@@ -63,21 +63,21 @@ define void @test_div(%f8* %P, %f8* %Q, %f8* %S) {
;;; TEST VECTOR CONSTRUCTS
define void @test_cst(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
+ %p = load %f4, %f4* %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, < float 0x3FB99999A0000000, float 1.000000e+00, float 2.000000e+00, float 4.500000e+00 > ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
}
define void @test_zero(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
+ %p = load %f4, %f4* %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, zeroinitializer ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
}
define void @test_undef(%f4* %P, %f4* %S) {
- %p = load %f4* %P ; <%f4> [#uses=1]
+ %p = load %f4, %f4* %P ; <%f4> [#uses=1]
%R = fadd %f4 %p, undef ; <%f4> [#uses=1]
store %f4 %R, %f4* %S
ret void
@@ -102,19 +102,19 @@ define void @test_scalar_to_vector(float %F, %f4* %S) {
}
define float @test_extract_elt(%f8* %P) {
- %p = load %f8* %P ; <%f8> [#uses=1]
+ %p = load %f8, %f8* %P ; <%f8> [#uses=1]
%R = extractelement %f8 %p, i32 3 ; <float> [#uses=1]
ret float %R
}
define double @test_extract_elt2(%d8* %P) {
- %p = load %d8* %P ; <%d8> [#uses=1]
+ %p = load %d8, %d8* %P ; <%d8> [#uses=1]
%R = extractelement %d8 %p, i32 3 ; <double> [#uses=1]
ret double %R
}
define void @test_cast_1(%f4* %b, %i4* %a) {
- %tmp = load %f4* %b ; <%f4> [#uses=1]
+ %tmp = load %f4, %f4* %b ; <%f4> [#uses=1]
%tmp2 = fadd %f4 %tmp, < float 1.000000e+00, float 2.000000e+00, float 3.000000e+00, float 4.000000e+00 > ; <%f4> [#uses=1]
%tmp3 = bitcast %f4 %tmp2 to %i4 ; <%i4> [#uses=1]
%tmp4 = add %i4 %tmp3, < i32 1, i32 2, i32 3, i32 4 > ; <%i4> [#uses=1]
@@ -123,7 +123,7 @@ define void @test_cast_1(%f4* %b, %i4* %a) {
}
define void @test_cast_2(%f8* %a, <8 x i32>* %b) {
- %T = load %f8* %a ; <%f8> [#uses=1]
+ %T = load %f8, %f8* %a ; <%f8> [#uses=1]
%T2 = bitcast %f8 %T to <8 x i32> ; <<8 x i32>> [#uses=1]
store <8 x i32> %T2, <8 x i32>* %b
ret void
@@ -137,7 +137,7 @@ define void @splat(%f4* %P, %f4* %Q, float %X) {
%tmp2 = insertelement %f4 %tmp, float %X, i32 1 ; <%f4> [#uses=1]
%tmp4 = insertelement %f4 %tmp2, float %X, i32 2 ; <%f4> [#uses=1]
%tmp6 = insertelement %f4 %tmp4, float %X, i32 3 ; <%f4> [#uses=1]
- %q = load %f4* %Q ; <%f4> [#uses=1]
+ %q = load %f4, %f4* %Q ; <%f4> [#uses=1]
%R = fadd %f4 %q, %tmp6 ; <%f4> [#uses=1]
store %f4 %R, %f4* %P
ret void
@@ -148,7 +148,7 @@ define void @splat_i4(%i4* %P, %i4* %Q, i32 %X) {
%tmp2 = insertelement %i4 %tmp, i32 %X, i32 1 ; <%i4> [#uses=1]
%tmp4 = insertelement %i4 %tmp2, i32 %X, i32 2 ; <%i4> [#uses=1]
%tmp6 = insertelement %i4 %tmp4, i32 %X, i32 3 ; <%i4> [#uses=1]
- %q = load %i4* %Q ; <%i4> [#uses=1]
+ %q = load %i4, %i4* %Q ; <%i4> [#uses=1]
%R = add %i4 %q, %tmp6 ; <%i4> [#uses=1]
store %i4 %R, %i4* %P
ret void
diff --git a/llvm/test/CodeGen/X86/viabs.ll b/llvm/test/CodeGen/X86/viabs.ll
index c00923594e7..fe528fd4ea2 100644
--- a/llvm/test/CodeGen/X86/viabs.ll
+++ b/llvm/test/CodeGen/X86/viabs.ll
@@ -262,7 +262,7 @@ define <8 x i64> @test13(<8 x i64>* %a.ptr) nounwind {
; AVX512-LABEL: test13:
; AVX512: vpabsq (%
; AVX512-NEXT: ret
- %a = load <8 x i64>* %a.ptr, align 8
+ %a = load <8 x i64>, <8 x i64>* %a.ptr, align 8
%tmp1neg = sub <8 x i64> zeroinitializer, %a
%b = icmp sle <8 x i64> %a, zeroinitializer
%abs = select <8 x i1> %b, <8 x i64> %tmp1neg, <8 x i64> %a
diff --git a/llvm/test/CodeGen/X86/visibility2.ll b/llvm/test/CodeGen/X86/visibility2.ll
index 72ea7338de6..48a0ac69219 100644
--- a/llvm/test/CodeGen/X86/visibility2.ll
+++ b/llvm/test/CodeGen/X86/visibility2.ll
@@ -8,7 +8,7 @@
define void @foo1() nounwind ssp {
entry:
- %tmp = load i8** @foo_private_extern_str, align 8
+ %tmp = load i8*, i8** @foo_private_extern_str, align 8
call void @foo3(i8* %tmp)
ret void
}
diff --git a/llvm/test/CodeGen/X86/volatile.ll b/llvm/test/CodeGen/X86/volatile.ll
index 1a82014536e..8d521b46f7c 100644
--- a/llvm/test/CodeGen/X86/volatile.ll
+++ b/llvm/test/CodeGen/X86/volatile.ll
@@ -4,14 +4,14 @@
@x = external global double
define void @foo() nounwind {
- %a = load volatile double* @x
+ %a = load volatile double, double* @x
store volatile double 0.0, double* @x
store volatile double 0.0, double* @x
- %b = load volatile double* @x
+ %b = load volatile double, double* @x
ret void
}
define void @bar() nounwind {
- %c = load volatile double* @x
+ %c = load volatile double, double* @x
ret void
}
diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll
index c5e2a6b6ba3..ff26aebaac6 100644
--- a/llvm/test/CodeGen/X86/vselect-avx.ll
+++ b/llvm/test/CodeGen/X86/vselect-avx.ll
@@ -43,7 +43,7 @@ body:
define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) {
bb:
%arrayidx1928 = getelementptr inbounds double*, double** %call1559, i64 %indvars.iv4198
- %tmp1888 = load double** %arrayidx1928, align 8
+ %tmp1888 = load double*, double** %arrayidx1928, align 8
%predphi.v.v = select <4 x i1> %tmp1895, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01>
%tmp1900 = bitcast double* %tmp1888 to <4 x double>*
store <4 x double> %predphi.v.v, <4 x double>* %tmp1900, align 8
diff --git a/llvm/test/CodeGen/X86/vselect-minmax.ll b/llvm/test/CodeGen/X86/vselect-minmax.ll
index feacf089f47..5ed687f5057 100644
--- a/llvm/test/CodeGen/X86/vselect-minmax.ll
+++ b/llvm/test/CodeGen/X86/vselect-minmax.ll
@@ -15,8 +15,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp slt <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -50,8 +50,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp sle <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -85,8 +85,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp sgt <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -120,8 +120,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp sge <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -155,8 +155,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp ult <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -190,8 +190,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp ule <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -225,8 +225,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp ugt <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -260,8 +260,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp uge <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.a, <16 x i8> %load.b
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -295,8 +295,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp slt <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -330,8 +330,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp sle <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -365,8 +365,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp sgt <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -400,8 +400,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp sge <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -435,8 +435,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp ult <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -470,8 +470,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp ule <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -505,8 +505,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp ugt <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -540,8 +540,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp uge <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.a, <8 x i16> %load.b
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -575,8 +575,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp slt <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -610,8 +610,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp sle <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -645,8 +645,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp sgt <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -680,8 +680,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp sge <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -715,8 +715,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp ult <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -750,8 +750,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp ule <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -785,8 +785,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp ugt <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -820,8 +820,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp uge <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.a, <4 x i32> %load.b
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -855,8 +855,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp slt <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -884,8 +884,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp sle <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -913,8 +913,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp sgt <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -942,8 +942,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp sge <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -971,8 +971,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp ult <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -1000,8 +1000,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp ule <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -1029,8 +1029,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp ugt <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -1058,8 +1058,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp uge <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.a, <32 x i8> %load.b
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -1087,8 +1087,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp slt <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -1116,8 +1116,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp sle <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -1145,8 +1145,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp sgt <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -1174,8 +1174,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp sge <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -1203,8 +1203,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp ult <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -1232,8 +1232,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp ule <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -1261,8 +1261,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp ugt <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -1290,8 +1290,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp uge <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.a, <16 x i16> %load.b
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -1319,8 +1319,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp slt <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -1348,8 +1348,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp sle <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -1377,8 +1377,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp sgt <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -1406,8 +1406,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp sge <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -1435,8 +1435,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp ult <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -1464,8 +1464,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp ule <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -1493,8 +1493,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp ugt <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -1522,8 +1522,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp uge <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.a, <8 x i32> %load.b
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -1551,8 +1551,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp slt <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -1586,8 +1586,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp sle <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -1621,8 +1621,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp sgt <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -1656,8 +1656,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp sge <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -1691,8 +1691,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp ult <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -1726,8 +1726,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp ule <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -1761,8 +1761,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp ugt <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -1796,8 +1796,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
- %load.a = load <16 x i8>* %ptr.a, align 2
- %load.b = load <16 x i8>* %ptr.b, align 2
+ %load.a = load <16 x i8>, <16 x i8>* %ptr.a, align 2
+ %load.b = load <16 x i8>, <16 x i8>* %ptr.b, align 2
%cmp = icmp uge <16 x i8> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i8> %load.b, <16 x i8> %load.a
store <16 x i8> %sel, <16 x i8>* %ptr.a, align 2
@@ -1831,8 +1831,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp slt <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -1866,8 +1866,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp sle <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -1901,8 +1901,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp sgt <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -1936,8 +1936,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp sge <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -1971,8 +1971,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp ult <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -2006,8 +2006,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp ule <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -2041,8 +2041,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp ugt <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -2076,8 +2076,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
- %load.a = load <8 x i16>* %ptr.a, align 2
- %load.b = load <8 x i16>* %ptr.b, align 2
+ %load.a = load <8 x i16>, <8 x i16>* %ptr.a, align 2
+ %load.b = load <8 x i16>, <8 x i16>* %ptr.b, align 2
%cmp = icmp uge <8 x i16> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i16> %load.b, <8 x i16> %load.a
store <8 x i16> %sel, <8 x i16>* %ptr.a, align 2
@@ -2111,8 +2111,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp slt <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -2146,8 +2146,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp sle <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -2181,8 +2181,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp sgt <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -2216,8 +2216,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp sge <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -2251,8 +2251,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp ult <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -2286,8 +2286,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp ule <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -2321,8 +2321,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp ugt <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -2356,8 +2356,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
- %load.a = load <4 x i32>* %ptr.a, align 2
- %load.b = load <4 x i32>* %ptr.b, align 2
+ %load.a = load <4 x i32>, <4 x i32>* %ptr.a, align 2
+ %load.b = load <4 x i32>, <4 x i32>* %ptr.b, align 2
%cmp = icmp uge <4 x i32> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i32> %load.b, <4 x i32> %load.a
store <4 x i32> %sel, <4 x i32>* %ptr.a, align 2
@@ -2391,8 +2391,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp slt <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -2420,8 +2420,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp sle <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -2449,8 +2449,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp sgt <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -2478,8 +2478,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp sge <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -2507,8 +2507,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp ult <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -2536,8 +2536,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp ule <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -2565,8 +2565,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp ugt <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -2594,8 +2594,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
- %load.a = load <32 x i8>* %ptr.a, align 2
- %load.b = load <32 x i8>* %ptr.b, align 2
+ %load.a = load <32 x i8>, <32 x i8>* %ptr.a, align 2
+ %load.b = load <32 x i8>, <32 x i8>* %ptr.b, align 2
%cmp = icmp uge <32 x i8> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i8> %load.b, <32 x i8> %load.a
store <32 x i8> %sel, <32 x i8>* %ptr.a, align 2
@@ -2623,8 +2623,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp slt <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -2652,8 +2652,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp sle <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -2681,8 +2681,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp sgt <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -2710,8 +2710,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp sge <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -2739,8 +2739,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp ult <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -2768,8 +2768,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp ule <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -2797,8 +2797,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp ugt <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -2826,8 +2826,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
- %load.a = load <16 x i16>* %ptr.a, align 2
- %load.b = load <16 x i16>* %ptr.b, align 2
+ %load.a = load <16 x i16>, <16 x i16>* %ptr.a, align 2
+ %load.b = load <16 x i16>, <16 x i16>* %ptr.b, align 2
%cmp = icmp uge <16 x i16> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i16> %load.b, <16 x i16> %load.a
store <16 x i16> %sel, <16 x i16>* %ptr.a, align 2
@@ -2855,8 +2855,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp slt <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -2884,8 +2884,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp sle <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -2913,8 +2913,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp sgt <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -2942,8 +2942,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp sge <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -2971,8 +2971,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp ult <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -3000,8 +3000,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp ule <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -3029,8 +3029,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp ugt <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -3058,8 +3058,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
- %load.a = load <8 x i32>* %ptr.a, align 2
- %load.b = load <8 x i32>* %ptr.b, align 2
+ %load.a = load <8 x i32>, <8 x i32>* %ptr.a, align 2
+ %load.b = load <8 x i32>, <8 x i32>* %ptr.b, align 2
%cmp = icmp uge <8 x i32> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i32> %load.b, <8 x i32> %load.a
store <8 x i32> %sel, <8 x i32>* %ptr.a, align 2
@@ -3089,8 +3089,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp slt <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3115,8 +3115,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp sle <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3141,8 +3141,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp sgt <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3167,8 +3167,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp sge <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3193,8 +3193,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp ult <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3219,8 +3219,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp ule <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3245,8 +3245,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp ugt <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3271,8 +3271,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp uge <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.a, <64 x i8> %load.b
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3297,8 +3297,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp slt <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -3323,8 +3323,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp sle <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -3349,8 +3349,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp sgt <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -3375,8 +3375,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp sge <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -3401,8 +3401,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp ult <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -3427,8 +3427,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp ule <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -3453,8 +3453,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp ugt <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -3479,8 +3479,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp uge <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.a, <32 x i16> %load.b
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -3505,8 +3505,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp slt <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -3531,8 +3531,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp sle <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -3557,8 +3557,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp sgt <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -3583,8 +3583,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp sge <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -3609,8 +3609,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp ult <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -3635,8 +3635,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp ule <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -3661,8 +3661,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp ugt <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -3687,8 +3687,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp uge <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.a, <16 x i32> %load.b
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -3713,8 +3713,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp slt <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -3739,8 +3739,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp sle <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -3765,8 +3765,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp sgt <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -3791,8 +3791,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp sge <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -3817,8 +3817,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp ult <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -3843,8 +3843,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp ule <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -3869,8 +3869,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp ugt <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -3895,8 +3895,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp uge <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.a, <8 x i64> %load.b
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -3921,8 +3921,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp slt <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3947,8 +3947,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp sle <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3973,8 +3973,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp sgt <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -3999,8 +3999,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp sge <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -4025,8 +4025,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp ult <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -4051,8 +4051,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp ule <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -4077,8 +4077,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp ugt <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -4103,8 +4103,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
- %load.a = load <64 x i8>* %ptr.a, align 2
- %load.b = load <64 x i8>* %ptr.b, align 2
+ %load.a = load <64 x i8>, <64 x i8>* %ptr.a, align 2
+ %load.b = load <64 x i8>, <64 x i8>* %ptr.b, align 2
%cmp = icmp uge <64 x i8> %load.a, %load.b
%sel = select <64 x i1> %cmp, <64 x i8> %load.b, <64 x i8> %load.a
store <64 x i8> %sel, <64 x i8>* %ptr.a, align 2
@@ -4129,8 +4129,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp slt <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -4155,8 +4155,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp sle <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -4181,8 +4181,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp sgt <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -4207,8 +4207,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp sge <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -4233,8 +4233,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp ult <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -4259,8 +4259,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp ule <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -4285,8 +4285,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp ugt <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -4311,8 +4311,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
- %load.a = load <32 x i16>* %ptr.a, align 2
- %load.b = load <32 x i16>* %ptr.b, align 2
+ %load.a = load <32 x i16>, <32 x i16>* %ptr.a, align 2
+ %load.b = load <32 x i16>, <32 x i16>* %ptr.b, align 2
%cmp = icmp uge <32 x i16> %load.a, %load.b
%sel = select <32 x i1> %cmp, <32 x i16> %load.b, <32 x i16> %load.a
store <32 x i16> %sel, <32 x i16>* %ptr.a, align 2
@@ -4337,8 +4337,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp slt <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -4363,8 +4363,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp sle <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -4389,8 +4389,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp sgt <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -4415,8 +4415,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp sge <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -4441,8 +4441,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp ult <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -4467,8 +4467,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp ule <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -4493,8 +4493,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp ugt <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -4519,8 +4519,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
- %load.a = load <16 x i32>* %ptr.a, align 2
- %load.b = load <16 x i32>* %ptr.b, align 2
+ %load.a = load <16 x i32>, <16 x i32>* %ptr.a, align 2
+ %load.b = load <16 x i32>, <16 x i32>* %ptr.b, align 2
%cmp = icmp uge <16 x i32> %load.a, %load.b
%sel = select <16 x i1> %cmp, <16 x i32> %load.b, <16 x i32> %load.a
store <16 x i32> %sel, <16 x i32>* %ptr.a, align 2
@@ -4547,8 +4547,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp slt <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -4573,8 +4573,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp sle <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -4599,8 +4599,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp sgt <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -4625,8 +4625,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp sge <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -4651,8 +4651,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp ult <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -4677,8 +4677,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp ule <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -4703,8 +4703,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp ugt <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -4729,8 +4729,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
- %load.a = load <8 x i64>* %ptr.a, align 2
- %load.b = load <8 x i64>* %ptr.b, align 2
+ %load.a = load <8 x i64>, <8 x i64>* %ptr.a, align 2
+ %load.b = load <8 x i64>, <8 x i64>* %ptr.b, align 2
%cmp = icmp uge <8 x i64> %load.a, %load.b
%sel = select <8 x i1> %cmp, <8 x i64> %load.b, <8 x i64> %load.a
store <8 x i64> %sel, <8 x i64>* %ptr.a, align 2
@@ -4755,8 +4755,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp slt <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4781,8 +4781,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp sle <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4807,8 +4807,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp sgt <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4833,8 +4833,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp sge <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4859,8 +4859,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp ult <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4885,8 +4885,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp ule <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4911,8 +4911,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp ugt <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4937,8 +4937,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp uge <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.a, <4 x i64> %load.b
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4963,8 +4963,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp slt <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -4989,8 +4989,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp sle <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -5015,8 +5015,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp sgt <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -5041,8 +5041,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp sge <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -5067,8 +5067,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp ult <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -5093,8 +5093,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp ule <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -5119,8 +5119,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp ugt <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -5145,8 +5145,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
- %load.a = load <4 x i64>* %ptr.a, align 2
- %load.b = load <4 x i64>* %ptr.b, align 2
+ %load.a = load <4 x i64>, <4 x i64>* %ptr.a, align 2
+ %load.b = load <4 x i64>, <4 x i64>* %ptr.b, align 2
%cmp = icmp uge <4 x i64> %load.a, %load.b
%sel = select <4 x i1> %cmp, <4 x i64> %load.b, <4 x i64> %load.a
store <4 x i64> %sel, <4 x i64>* %ptr.a, align 2
@@ -5171,8 +5171,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp slt <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5197,8 +5197,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp sle <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5223,8 +5223,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp sgt <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5249,8 +5249,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp sge <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5275,8 +5275,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp ult <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5301,8 +5301,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp ule <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5327,8 +5327,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp ugt <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5353,8 +5353,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp uge <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.a, <2 x i64> %load.b
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5379,8 +5379,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp slt <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5405,8 +5405,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp sle <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5431,8 +5431,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp sgt <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5457,8 +5457,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp sge <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5483,8 +5483,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp ult <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5509,8 +5509,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp ule <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5535,8 +5535,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp ugt <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
@@ -5561,8 +5561,8 @@ vector.body: ; preds = %vector.body, %vecto
%gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
- %load.a = load <2 x i64>* %ptr.a, align 2
- %load.b = load <2 x i64>* %ptr.b, align 2
+ %load.a = load <2 x i64>, <2 x i64>* %ptr.a, align 2
+ %load.b = load <2 x i64>, <2 x i64>* %ptr.b, align 2
%cmp = icmp uge <2 x i64> %load.a, %load.b
%sel = select <2 x i1> %cmp, <2 x i64> %load.b, <2 x i64> %load.a
store <2 x i64> %sel, <2 x i64>* %ptr.a, align 2
diff --git a/llvm/test/CodeGen/X86/vshift-5.ll b/llvm/test/CodeGen/X86/vshift-5.ll
index 562e520c552..a6ae8d54bef 100644
--- a/llvm/test/CodeGen/X86/vshift-5.ll
+++ b/llvm/test/CodeGen/X86/vshift-5.ll
@@ -7,7 +7,7 @@ entry:
; CHECK-LABEL: shift5a:
; CHECK: movd
; CHECK: pslld
- %amt = load i32* %pamt
+ %amt = load i32, i32* %pamt
%tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0
%shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer
%shl = shl <4 x i32> %val, %shamt
@@ -21,7 +21,7 @@ entry:
; CHECK-LABEL: shift5b:
; CHECK: movd
; CHECK: psrad
- %amt = load i32* %pamt
+ %amt = load i32, i32* %pamt
%tmp0 = insertelement <4 x i32> undef, i32 %amt, i32 0
%shamt = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> zeroinitializer
%shr = ashr <4 x i32> %val, %shamt
diff --git a/llvm/test/CodeGen/X86/vshift-6.ll b/llvm/test/CodeGen/X86/vshift-6.ll
index 175b649f357..551a1385003 100644
--- a/llvm/test/CodeGen/X86/vshift-6.ll
+++ b/llvm/test/CodeGen/X86/vshift-6.ll
@@ -25,7 +25,7 @@
define <16 x i8> @do_not_crash(i8*, i32*, i64*, i32, i64, i8) {
entry:
store i8 %5, i8* %0
- %L5 = load i8* %0
+ %L5 = load i8, i8* %0
%I8 = insertelement <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i8 %L5, i32 7
%B51 = shl <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, %I8
ret <16 x i8> %B51
diff --git a/llvm/test/CodeGen/X86/weak_def_can_be_hidden.ll b/llvm/test/CodeGen/X86/weak_def_can_be_hidden.ll
index b17f372afed..8e6d34c89d8 100644
--- a/llvm/test/CodeGen/X86/weak_def_can_be_hidden.ll
+++ b/llvm/test/CodeGen/X86/weak_def_can_be_hidden.ll
@@ -12,7 +12,7 @@
; CHECK-D89: .weak_definition _v1
define i32 @f1() {
- %x = load i32 * @v1
+ %x = load i32 , i32 * @v1
ret i32 %x
}
@@ -46,6 +46,6 @@ define i32* @f3() {
; CHECK-D89: .weak_definition _v4
define i32 @f4() {
- %x = load i32 * @v4
+ %x = load i32 , i32 * @v4
ret i32 %x
}
diff --git a/llvm/test/CodeGen/X86/widen_arith-1.ll b/llvm/test/CodeGen/X86/widen_arith-1.ll
index 03459d506c4..5663b8b4094 100644
--- a/llvm/test/CodeGen/X86/widen_arith-1.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-1.ll
@@ -16,25 +16,25 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %n.addr ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <3 x i8>** %dst.addr ; <<3 x i8>*> [#uses=1]
+ %tmp2 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp3 = load <3 x i8>*, <3 x i8>** %dst.addr ; <<3 x i8>*> [#uses=1]
%arrayidx = getelementptr <3 x i8>, <3 x i8>* %tmp3, i32 %tmp2 ; <<3 x i8>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <3 x i8>** %src.addr ; <<3 x i8>*> [#uses=1]
+ %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp5 = load <3 x i8>*, <3 x i8>** %src.addr ; <<3 x i8>*> [#uses=1]
%arrayidx6 = getelementptr <3 x i8>, <3 x i8>* %tmp5, i32 %tmp4 ; <<3 x i8>*> [#uses=1]
- %tmp7 = load <3 x i8>* %arrayidx6 ; <<3 x i8>> [#uses=1]
+ %tmp7 = load <3 x i8>, <3 x i8>* %arrayidx6 ; <<3 x i8>> [#uses=1]
%add = add <3 x i8> %tmp7, < i8 1, i8 1, i8 1 > ; <<3 x i8>> [#uses=1]
store <3 x i8> %add, <3 x i8>* %arrayidx
br label %forinc
forinc: ; preds = %forbody
- %tmp8 = load i32* %i ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp8, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_arith-2.ll b/llvm/test/CodeGen/X86/widen_arith-2.ll
index fb775a5ad8d..6c219c1720e 100644
--- a/llvm/test/CodeGen/X86/widen_arith-2.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-2.ll
@@ -19,36 +19,36 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %n.addr ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load i64** %dst_i.addr ; <i64*> [#uses=1]
+ %tmp2 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp3 = load i64*, i64** %dst_i.addr ; <i64*> [#uses=1]
%arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2 ; <i64*> [#uses=1]
%conv = bitcast i64* %arrayidx to <8 x i8>* ; <<8 x i8>*> [#uses=1]
store <8 x i8>* %conv, <8 x i8>** %dst
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load i64** %src_i.addr ; <i64*> [#uses=1]
+ %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp5 = load i64*, i64** %src_i.addr ; <i64*> [#uses=1]
%arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4 ; <i64*> [#uses=1]
%conv7 = bitcast i64* %arrayidx6 to <8 x i8>* ; <<8 x i8>*> [#uses=1]
store <8 x i8>* %conv7, <8 x i8>** %src
- %tmp8 = load i32* %i ; <i32> [#uses=1]
- %tmp9 = load <8 x i8>** %dst ; <<8 x i8>*> [#uses=1]
+ %tmp8 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp9 = load <8 x i8>*, <8 x i8>** %dst ; <<8 x i8>*> [#uses=1]
%arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1]
- %tmp11 = load i32* %i ; <i32> [#uses=1]
- %tmp12 = load <8 x i8>** %src ; <<8 x i8>*> [#uses=1]
+ %tmp11 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp12 = load <8 x i8>*, <8 x i8>** %src ; <<8 x i8>*> [#uses=1]
%arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1]
- %tmp14 = load <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1]
+ %tmp14 = load <8 x i8>, <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1]
%add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 > ; <<8 x i8>> [#uses=1]
%and = and <8 x i8> %add, < i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4 > ; <<8 x i8>> [#uses=1]
store <8 x i8> %and, <8 x i8>* %arrayidx10
br label %forinc
forinc: ; preds = %forbody
- %tmp15 = load i32* %i ; <i32> [#uses=1]
+ %tmp15 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp15, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_arith-3.ll b/llvm/test/CodeGen/X86/widen_arith-3.ll
index 1574bc07603..aea7975a045 100644
--- a/llvm/test/CodeGen/X86/widen_arith-3.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-3.ll
@@ -21,25 +21,25 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %n.addr ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <3 x i16>** %dst.addr ; <<3 x i16>*> [#uses=1]
+ %tmp2 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp3 = load <3 x i16>*, <3 x i16>** %dst.addr ; <<3 x i16>*> [#uses=1]
%arrayidx = getelementptr <3 x i16>, <3 x i16>* %tmp3, i32 %tmp2 ; <<3 x i16>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <3 x i16>** %src.addr ; <<3 x i16>*> [#uses=1]
+ %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp5 = load <3 x i16>*, <3 x i16>** %src.addr ; <<3 x i16>*> [#uses=1]
%arrayidx6 = getelementptr <3 x i16>, <3 x i16>* %tmp5, i32 %tmp4 ; <<3 x i16>*> [#uses=1]
- %tmp7 = load <3 x i16>* %arrayidx6 ; <<3 x i16>> [#uses=1]
+ %tmp7 = load <3 x i16>, <3 x i16>* %arrayidx6 ; <<3 x i16>> [#uses=1]
%add = add <3 x i16> %tmp7, < i16 1, i16 1, i16 1 > ; <<3 x i16>> [#uses=1]
store <3 x i16> %add, <3 x i16>* %arrayidx
br label %forinc
forinc: ; preds = %forbody
- %tmp8 = load i32* %i ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp8, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_arith-4.ll b/llvm/test/CodeGen/X86/widen_arith-4.ll
index f50fcab99de..5dba063d8af 100644
--- a/llvm/test/CodeGen/X86/widen_arith-4.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-4.ll
@@ -19,26 +19,26 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %n.addr ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <5 x i16>** %dst.addr ; <<5 x i16>*> [#uses=1]
+ %tmp2 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp3 = load <5 x i16>*, <5 x i16>** %dst.addr ; <<5 x i16>*> [#uses=1]
%arrayidx = getelementptr <5 x i16>, <5 x i16>* %tmp3, i32 %tmp2 ; <<5 x i16>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <5 x i16>** %src.addr ; <<5 x i16>*> [#uses=1]
+ %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp5 = load <5 x i16>*, <5 x i16>** %src.addr ; <<5 x i16>*> [#uses=1]
%arrayidx6 = getelementptr <5 x i16>, <5 x i16>* %tmp5, i32 %tmp4 ; <<5 x i16>*> [#uses=1]
- %tmp7 = load <5 x i16>* %arrayidx6 ; <<5 x i16>> [#uses=1]
+ %tmp7 = load <5 x i16>, <5 x i16>* %arrayidx6 ; <<5 x i16>> [#uses=1]
%sub = sub <5 x i16> %tmp7, < i16 271, i16 271, i16 271, i16 271, i16 271 > ; <<5 x i16>> [#uses=1]
%mul = mul <5 x i16> %sub, < i16 2, i16 4, i16 2, i16 2, i16 2 > ; <<5 x i16>> [#uses=1]
store <5 x i16> %mul, <5 x i16>* %arrayidx
br label %forinc
forinc: ; preds = %forbody
- %tmp8 = load i32* %i ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp8, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_arith-5.ll b/llvm/test/CodeGen/X86/widen_arith-5.ll
index cdb0878f7a9..04c9ec2def2 100644
--- a/llvm/test/CodeGen/X86/widen_arith-5.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-5.ll
@@ -20,26 +20,26 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %n.addr ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <3 x i32>** %dst.addr ; <<3 x i32>*> [#uses=1]
+ %tmp2 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp3 = load <3 x i32>*, <3 x i32>** %dst.addr ; <<3 x i32>*> [#uses=1]
%arrayidx = getelementptr <3 x i32>, <3 x i32>* %tmp3, i32 %tmp2 ; <<3 x i32>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <3 x i32>** %src.addr ; <<3 x i32>*> [#uses=1]
+ %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp5 = load <3 x i32>*, <3 x i32>** %src.addr ; <<3 x i32>*> [#uses=1]
%arrayidx6 = getelementptr <3 x i32>, <3 x i32>* %tmp5, i32 %tmp4 ; <<3 x i32>*> [#uses=1]
- %tmp7 = load <3 x i32>* %arrayidx6 ; <<3 x i32>> [#uses=1]
+ %tmp7 = load <3 x i32>, <3 x i32>* %arrayidx6 ; <<3 x i32>> [#uses=1]
%mul = mul <3 x i32> %tmp7, < i32 4, i32 4, i32 4 > ; <<3 x i32>> [#uses=1]
%sub = sub <3 x i32> %mul, < i32 3, i32 3, i32 3 > ; <<3 x i32>> [#uses=1]
store <3 x i32> %sub, <3 x i32>* %arrayidx
br label %forinc
forinc: ; preds = %forbody
- %tmp8 = load i32* %i ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp8, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_arith-6.ll b/llvm/test/CodeGen/X86/widen_arith-6.ll
index 65ffeb5afba..09998a027d2 100644
--- a/llvm/test/CodeGen/X86/widen_arith-6.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-6.ll
@@ -19,27 +19,27 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %n.addr ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load <3 x float>** %dst.addr ; <<3 x float>*> [#uses=1]
+ %tmp2 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp3 = load <3 x float>*, <3 x float>** %dst.addr ; <<3 x float>*> [#uses=1]
%arrayidx = getelementptr <3 x float>, <3 x float>* %tmp3, i32 %tmp2 ; <<3 x float>*> [#uses=1]
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load <3 x float>** %src.addr ; <<3 x float>*> [#uses=1]
+ %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp5 = load <3 x float>*, <3 x float>** %src.addr ; <<3 x float>*> [#uses=1]
%arrayidx6 = getelementptr <3 x float>, <3 x float>* %tmp5, i32 %tmp4 ; <<3 x float>*> [#uses=1]
- %tmp7 = load <3 x float>* %arrayidx6 ; <<3 x float>> [#uses=1]
- %tmp8 = load <3 x float>* %v ; <<3 x float>> [#uses=1]
+ %tmp7 = load <3 x float>, <3 x float>* %arrayidx6 ; <<3 x float>> [#uses=1]
+ %tmp8 = load <3 x float>, <3 x float>* %v ; <<3 x float>> [#uses=1]
%mul = fmul <3 x float> %tmp7, %tmp8 ; <<3 x float>> [#uses=1]
%add = fadd <3 x float> %mul, < float 0x409EE02900000000, float 0x409EE02900000000, float 0x409EE02900000000 > ; <<3 x float>> [#uses=1]
store <3 x float> %add, <3 x float>* %arrayidx
br label %forinc
forinc: ; preds = %forbody
- %tmp9 = load i32* %i ; <i32> [#uses=1]
+ %tmp9 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp9, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_cast-1.ll b/llvm/test/CodeGen/X86/widen_cast-1.ll
index 206d5a40a9a..6b7f489bf85 100644
--- a/llvm/test/CodeGen/X86/widen_cast-1.ll
+++ b/llvm/test/CodeGen/X86/widen_cast-1.ll
@@ -23,25 +23,25 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, 4 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp1 = load i32* %i ; <i32> [#uses=1]
- %tmp2 = load <2 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1]
+ %tmp1 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp2 = load <2 x i32>*, <2 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1]
%arrayidx = getelementptr <2 x i32>, <2 x i32>* %tmp2, i32 %tmp1 ; <<2 x i32>*> [#uses=1]
- %tmp3 = load i32* %i ; <i32> [#uses=1]
- %tmp4 = load <4 x i16>** %src.addr ; <<4 x i16>*> [#uses=1]
+ %tmp3 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp4 = load <4 x i16>*, <4 x i16>** %src.addr ; <<4 x i16>*> [#uses=1]
%arrayidx5 = getelementptr <4 x i16>, <4 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1]
- %tmp6 = load <4 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1]
+ %tmp6 = load <4 x i16>, <4 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1]
%add = add <4 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1 > ; <<4 x i16>> [#uses=1]
%conv = bitcast <4 x i16> %add to <2 x i32> ; <<2 x i32>> [#uses=1]
store <2 x i32> %conv, <2 x i32>* %arrayidx
br label %forinc
forinc: ; preds = %forbody
- %tmp7 = load i32* %i ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp7, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_cast-2.ll b/llvm/test/CodeGen/X86/widen_cast-2.ll
index 95202501e02..5a9acbd52f2 100644
--- a/llvm/test/CodeGen/X86/widen_cast-2.ll
+++ b/llvm/test/CodeGen/X86/widen_cast-2.ll
@@ -18,25 +18,25 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, 4 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp1 = load i32* %i ; <i32> [#uses=1]
- %tmp2 = load <7 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1]
+ %tmp1 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp2 = load <7 x i32>*, <7 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1]
%arrayidx = getelementptr <7 x i32>, <7 x i32>* %tmp2, i32 %tmp1 ; <<7 x i32>*> [#uses=1]
- %tmp3 = load i32* %i ; <i32> [#uses=1]
- %tmp4 = load <14 x i16>** %src.addr ; <<4 x i16>*> [#uses=1]
+ %tmp3 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp4 = load <14 x i16>*, <14 x i16>** %src.addr ; <<4 x i16>*> [#uses=1]
%arrayidx5 = getelementptr <14 x i16>, <14 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1]
- %tmp6 = load <14 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1]
+ %tmp6 = load <14 x i16>, <14 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1]
%add = add <14 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 > ; <<4 x i16>> [#uses=1]
%conv = bitcast <14 x i16> %add to <7 x i32> ; <<7 x i32>> [#uses=1]
store <7 x i32> %conv, <7 x i32>* %arrayidx
br label %forinc
forinc: ; preds = %forbody
- %tmp7 = load i32* %i ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp7, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_cast-4.ll b/llvm/test/CodeGen/X86/widen_cast-4.ll
index a8994413634..060dfb1011b 100644
--- a/llvm/test/CodeGen/X86/widen_cast-4.ll
+++ b/llvm/test/CodeGen/X86/widen_cast-4.ll
@@ -18,29 +18,29 @@ entry:
br label %forcond
forcond: ; preds = %forinc, %entry
- %tmp = load i32* %i ; <i32> [#uses=1]
- %tmp1 = load i32* %n.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %n.addr ; <i32> [#uses=1]
%cmp = icmp slt i32 %tmp, %tmp1 ; <i1> [#uses=1]
br i1 %cmp, label %forbody, label %afterfor
forbody: ; preds = %forcond
- %tmp2 = load i32* %i ; <i32> [#uses=1]
- %tmp3 = load i64** %dst_i.addr ; <i64*> [#uses=1]
+ %tmp2 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp3 = load i64*, i64** %dst_i.addr ; <i64*> [#uses=1]
%arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2 ; <i64*> [#uses=1]
%conv = bitcast i64* %arrayidx to <8 x i8>* ; <<8 x i8>*> [#uses=1]
store <8 x i8>* %conv, <8 x i8>** %dst
- %tmp4 = load i32* %i ; <i32> [#uses=1]
- %tmp5 = load i64** %src_i.addr ; <i64*> [#uses=1]
+ %tmp4 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp5 = load i64*, i64** %src_i.addr ; <i64*> [#uses=1]
%arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4 ; <i64*> [#uses=1]
%conv7 = bitcast i64* %arrayidx6 to <8 x i8>* ; <<8 x i8>*> [#uses=1]
store <8 x i8>* %conv7, <8 x i8>** %src
- %tmp8 = load i32* %i ; <i32> [#uses=1]
- %tmp9 = load <8 x i8>** %dst ; <<8 x i8>*> [#uses=1]
+ %tmp8 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp9 = load <8 x i8>*, <8 x i8>** %dst ; <<8 x i8>*> [#uses=1]
%arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1]
- %tmp11 = load i32* %i ; <i32> [#uses=1]
- %tmp12 = load <8 x i8>** %src ; <<8 x i8>*> [#uses=1]
+ %tmp11 = load i32, i32* %i ; <i32> [#uses=1]
+ %tmp12 = load <8 x i8>*, <8 x i8>** %src ; <<8 x i8>*> [#uses=1]
%arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1]
- %tmp14 = load <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1]
+ %tmp14 = load <8 x i8>, <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1]
%add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 > ; <<8 x i8>> [#uses=1]
%shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 > ; <<8 x i8>> [#uses=1]
store <8 x i8> %shr, <8 x i8>* %arrayidx10
@@ -67,7 +67,7 @@ forbody: ; preds = %forcond
; CHECK-WIDE-NEXT: movd
forinc: ; preds = %forbody
- %tmp15 = load i32* %i ; <i32> [#uses=1]
+ %tmp15 = load i32, i32* %i ; <i32> [#uses=1]
%inc = add i32 %tmp15, 1 ; <i32> [#uses=1]
store i32 %inc, i32* %i
br label %forcond
diff --git a/llvm/test/CodeGen/X86/widen_conversions.ll b/llvm/test/CodeGen/X86/widen_conversions.ll
index fa85400e6b9..dd75097a251 100644
--- a/llvm/test/CodeGen/X86/widen_conversions.ll
+++ b/llvm/test/CodeGen/X86/widen_conversions.ll
@@ -12,7 +12,7 @@ define <4 x i32> @zext_v4i8_to_v4i32(<4 x i8>* %ptr) {
; CHECK-NEXT: punpcklwd %[[Z]], %[[X]]
; CHECK-NEXT: ret
- %val = load <4 x i8>* %ptr
+ %val = load <4 x i8>, <4 x i8>* %ptr
%ext = zext <4 x i8> %val to <4 x i32>
ret <4 x i32> %ext
}
diff --git a/llvm/test/CodeGen/X86/widen_load-0.ll b/llvm/test/CodeGen/X86/widen_load-0.ll
index 768a1be4edb..edaaa77c145 100644
--- a/llvm/test/CodeGen/X86/widen_load-0.ll
+++ b/llvm/test/CodeGen/X86/widen_load-0.ll
@@ -10,8 +10,8 @@
define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
entry:
- %0 = load <2 x i16>* %b, align 2 ; <<2 x i16>> [#uses=1]
- %1 = load i32* %c, align 4 ; <i32> [#uses=1]
+ %0 = load <2 x i16>, <2 x i16>* %b, align 2 ; <<2 x i16>> [#uses=1]
+ %1 = load i32, i32* %c, align 4 ; <i32> [#uses=1]
%tmp1 = bitcast i32 %1 to <2 x i16> ; <<2 x i16>> [#uses=1]
store <2 x i16> %tmp1, <2 x i16>* %b, align 2
%tmp5 = bitcast <2 x i16> %0 to <1 x i32> ; <<1 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/widen_load-1.ll b/llvm/test/CodeGen/X86/widen_load-1.ll
index 19107984afc..849f9b9be26 100644
--- a/llvm/test/CodeGen/X86/widen_load-1.ll
+++ b/llvm/test/CodeGen/X86/widen_load-1.ll
@@ -26,12 +26,12 @@ define void @reset(<2 x float>* noalias %garbage1) {
store i32 0, i32* %changed, align 4
%r2 = getelementptr float, float* bitcast ([20 x i64]* @compl to float*), i64 32 ; <float*> [#uses=1]
%r3 = bitcast float* %r2 to <2 x float>* ; <<2 x float>*> [#uses=1]
- %r4 = load <2 x float>* %r3, align 4 ; <<2 x float>> [#uses=1]
+ %r4 = load <2 x float>, <2 x float>* %r3, align 4 ; <<2 x float>> [#uses=1]
call void @killcommon(i32* %changed)
br label %"file complex.c, line 34, bb4"
"file complex.c, line 34, bb4": ; preds = %"file complex.c, line 27, bb13"
- %r5 = load i32* %changed, align 4 ; <i32> [#uses=1]
+ %r5 = load i32, i32* %changed, align 4 ; <i32> [#uses=1]
%r6 = icmp eq i32 %r5, 0 ; <i1> [#uses=1]
%r7 = zext i1 %r6 to i32 ; <i32> [#uses=1]
%r8 = icmp ne i32 %r7, 0 ; <i1> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/widen_load-2.ll b/llvm/test/CodeGen/X86/widen_load-2.ll
index c6bd96421d7..f5ddc0eacc6 100644
--- a/llvm/test/CodeGen/X86/widen_load-2.ll
+++ b/llvm/test/CodeGen/X86/widen_load-2.ll
@@ -10,8 +10,8 @@ define void @add3i32(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
; CHECK-NEXT: paddd (%{{.*}}), %[[R0]]
; CHECK-NEXT: pextrd $2, %[[R0]], 8(%{{.*}})
; CHECK-NEXT: movq %[[R0]], (%{{.*}})
- %a = load %i32vec3* %ap, align 16
- %b = load %i32vec3* %bp, align 16
+ %a = load %i32vec3, %i32vec3* %ap, align 16
+ %b = load %i32vec3, %i32vec3* %bp, align 16
%x = add %i32vec3 %a, %b
store %i32vec3 %x, %i32vec3* %ret, align 16
ret void
@@ -26,8 +26,8 @@ define void @add3i32_2(%i32vec3* sret %ret, %i32vec3* %ap, %i32vec3* %bp) {
; CHECK-NEXT: paddd %[[R0]], %[[R1]]
; CHECK-NEXT: pextrd $2, %[[R1]], 8(%{{.*}})
; CHECK-NEXT: movq %[[R1]], (%{{.*}})
- %a = load %i32vec3* %ap, align 8
- %b = load %i32vec3* %bp, align 8
+ %a = load %i32vec3, %i32vec3* %ap, align 8
+ %b = load %i32vec3, %i32vec3* %bp, align 8
%x = add %i32vec3 %a, %b
store %i32vec3 %x, %i32vec3* %ret, align 8
ret void
@@ -43,8 +43,8 @@ define void @add7i32(%i32vec7* sret %ret, %i32vec7* %ap, %i32vec7* %bp) {
; CHECK-NEXT: pextrd $2, %[[R1]], 24(%{{.*}})
; CHECK-NEXT: movq %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i32vec7* %ap, align 16
- %b = load %i32vec7* %bp, align 16
+ %a = load %i32vec7, %i32vec7* %ap, align 16
+ %b = load %i32vec7, %i32vec7* %bp, align 16
%x = add %i32vec7 %a, %b
store %i32vec7 %x, %i32vec7* %ret, align 16
ret void
@@ -62,8 +62,8 @@ define void @add12i32(%i32vec12* sret %ret, %i32vec12* %ap, %i32vec12* %bp) {
; CHECK-NEXT: movdqa %[[R2]], 32(%{{.*}})
; CHECK-NEXT: movdqa %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i32vec12* %ap, align 16
- %b = load %i32vec12* %bp, align 16
+ %a = load %i32vec12, %i32vec12* %ap, align 16
+ %b = load %i32vec12, %i32vec12* %bp, align 16
%x = add %i32vec12 %a, %b
store %i32vec12 %x, %i32vec12* %ret, align 16
ret void
@@ -80,8 +80,8 @@ define void @add3i16(%i16vec3* nocapture sret %ret, %i16vec3* %ap, %i16vec3* %bp
; CHECK-NEXT: pshufb {{.*}}, %[[R1]]
; CHECK-NEXT: pmovzxdq %[[R1]], %[[R0]]
; CHECK-NEXT: movd %[[R0]], (%{{.*}})
- %a = load %i16vec3* %ap, align 16
- %b = load %i16vec3* %bp, align 16
+ %a = load %i16vec3, %i16vec3* %ap, align 16
+ %b = load %i16vec3, %i16vec3* %bp, align 16
%x = add %i16vec3 %a, %b
store %i16vec3 %x, %i16vec3* %ret, align 16
ret void
@@ -94,8 +94,8 @@ define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp
; CHECK-NEXT: movq (%{{.*}}), %[[R1:xmm[0-9]+]]
; CHECK-NEXT: paddw %[[R0]], %[[R1]]
; CHECK-NEXT: movq %[[R1]], (%{{.*}})
- %a = load %i16vec4* %ap, align 16
- %b = load %i16vec4* %bp, align 16
+ %a = load %i16vec4, %i16vec4* %ap, align 16
+ %b = load %i16vec4, %i16vec4* %bp, align 16
%x = add %i16vec4 %a, %b
store %i16vec4 %x, %i16vec4* %ret, align 16
ret void
@@ -110,8 +110,8 @@ define void @add12i16(%i16vec12* nocapture sret %ret, %i16vec12* %ap, %i16vec12*
; CHECK-NEXT: paddw 16(%{{.*}}), %[[R1]]
; CHECK-NEXT: movq %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i16vec12* %ap, align 16
- %b = load %i16vec12* %bp, align 16
+ %a = load %i16vec12, %i16vec12* %ap, align 16
+ %b = load %i16vec12, %i16vec12* %bp, align 16
%x = add %i16vec12 %a, %b
store %i16vec12 %x, %i16vec12* %ret, align 16
ret void
@@ -129,8 +129,8 @@ define void @add18i16(%i16vec18* nocapture sret %ret, %i16vec18* %ap, %i16vec18*
; CHECK-NEXT: movd %[[R2]], 32(%{{.*}})
; CHECK-NEXT: movdqa %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i16vec18* %ap, align 16
- %b = load %i16vec18* %bp, align 16
+ %a = load %i16vec18, %i16vec18* %ap, align 16
+ %b = load %i16vec18, %i16vec18* %bp, align 16
%x = add %i16vec18 %a, %b
store %i16vec18 %x, %i16vec18* %ret, align 16
ret void
@@ -148,8 +148,8 @@ define void @add3i8(%i8vec3* nocapture sret %ret, %i8vec3* %ap, %i8vec3* %bp) no
; CHECK-NEXT: pmovzxwq %[[R1]], %[[R0]]
; CHECK-NEXT: movd %[[R0]], %e[[R2:[abcd]]]x
; CHECK-NEXT: movw %[[R2]]x, (%{{.*}})
- %a = load %i8vec3* %ap, align 16
- %b = load %i8vec3* %bp, align 16
+ %a = load %i8vec3, %i8vec3* %ap, align 16
+ %b = load %i8vec3, %i8vec3* %bp, align 16
%x = add %i8vec3 %a, %b
store %i8vec3 %x, %i8vec3* %ret, align 16
ret void
@@ -167,8 +167,8 @@ define void @add31i8(%i8vec31* nocapture sret %ret, %i8vec31* %ap, %i8vec31* %bp
; CHECK-NEXT: pextrd $2, %[[R1]], 24(%{{.*}})
; CHECK-NEXT: movq %[[R1]], 16(%{{.*}})
; CHECK-NEXT: movdqa %[[R0]], (%{{.*}})
- %a = load %i8vec31* %ap, align 16
- %b = load %i8vec31* %bp, align 16
+ %a = load %i8vec31, %i8vec31* %ap, align 16
+ %b = load %i8vec31, %i8vec31* %bp, align 16
%x = add %i8vec31 %a, %b
store %i8vec31 %x, %i8vec31* %ret, align 16
ret void
@@ -216,9 +216,9 @@ entry:
store <3 x i8> <i8 -98, i8 -98, i8 -98>, <3 x i8>* %storetmp
%storetmp1 = bitcast %i8vec3pack* %rot to <3 x i8>*
store <3 x i8> <i8 1, i8 1, i8 1>, <3 x i8>* %storetmp1
- %tmp = load %i8vec3pack* %X
+ %tmp = load %i8vec3pack, %i8vec3pack* %X
%extractVec = extractvalue %i8vec3pack %tmp, 0
- %tmp2 = load %i8vec3pack* %rot
+ %tmp2 = load %i8vec3pack, %i8vec3pack* %rot
%extractVec3 = extractvalue %i8vec3pack %tmp2, 0
%shr = lshr <3 x i8> %extractVec, %extractVec3
%storetmp4 = bitcast %i8vec3pack* %result to <3 x i8>*
diff --git a/llvm/test/CodeGen/X86/win32_sret.ll b/llvm/test/CodeGen/X86/win32_sret.ll
index 0b10a671b8b..ca01d3b4331 100644
--- a/llvm/test/CodeGen/X86/win32_sret.ll
+++ b/llvm/test/CodeGen/X86/win32_sret.ll
@@ -106,7 +106,7 @@ define x86_thiscallcc void @"\01?foo@C5@@QAE?AUS5@@XZ"(%struct.S5* noalias sret
entry:
%this.addr = alloca %class.C5*, align 4
store %class.C5* %this, %class.C5** %this.addr, align 4
- %this1 = load %class.C5** %this.addr
+ %this1 = load %class.C5*, %class.C5** %this.addr
%x = getelementptr inbounds %struct.S5, %struct.S5* %agg.result, i32 0, i32 0
store i32 42, i32* %x, align 4
ret void
@@ -211,7 +211,7 @@ define void @test7_f(%struct.test7* %x) nounwind {
define x86_thiscallcc void @test7_g(%struct.test7* %in, %struct.test7* sret %out) {
%s = getelementptr %struct.test7, %struct.test7* %in, i32 0, i32 0
%d = getelementptr %struct.test7, %struct.test7* %out, i32 0, i32 0
- %v = load i32* %s
+ %v = load i32, i32* %s
store i32 %v, i32* %d
call void @clobber_eax()
ret void
diff --git a/llvm/test/CodeGen/X86/win64_eh.ll b/llvm/test/CodeGen/X86/win64_eh.ll
index b67ad58e0d0..d668f43c895 100644
--- a/llvm/test/CodeGen/X86/win64_eh.ll
+++ b/llvm/test/CodeGen/X86/win64_eh.ll
@@ -62,21 +62,21 @@ entry:
store i32 %d_arg, i32* %d
store i32 %e_arg, i32* %e
store i32 %f_arg, i32* %f
- %tmp = load i32* %a
+ %tmp = load i32, i32* %a
%tmp1 = mul i32 %tmp, 2
- %tmp2 = load i32* %b
+ %tmp2 = load i32, i32* %b
%tmp3 = mul i32 %tmp2, 3
%tmp4 = add i32 %tmp1, %tmp3
- %tmp5 = load i32* %c
+ %tmp5 = load i32, i32* %c
%tmp6 = mul i32 %tmp5, 5
%tmp7 = add i32 %tmp4, %tmp6
- %tmp8 = load i32* %d
+ %tmp8 = load i32, i32* %d
%tmp9 = mul i32 %tmp8, 7
%tmp10 = add i32 %tmp7, %tmp9
- %tmp11 = load i32* %e
+ %tmp11 = load i32, i32* %e
%tmp12 = mul i32 %tmp11, 11
%tmp13 = add i32 %tmp10, %tmp12
- %tmp14 = load i32* %f
+ %tmp14 = load i32, i32* %f
%tmp15 = mul i32 %tmp14, 13
%tmp16 = add i32 %tmp13, %tmp15
ret i32 %tmp16
@@ -105,7 +105,7 @@ define i32 @foo4() #0 {
entry:
%step = alloca i32, align 4
store i32 0, i32* %step
- %tmp = load i32* %step
+ %tmp = load i32, i32* %step
%tmp1 = invoke i32 @bar()
to label %finally unwind label %landingpad
@@ -123,7 +123,7 @@ landingpad:
unreachable
endtryfinally:
- %tmp10 = load i32* %step
+ %tmp10 = load i32, i32* %step
ret i32 %tmp10
}
; WIN64-LABEL: foo4:
diff --git a/llvm/test/CodeGen/X86/win_eh_prepare.ll b/llvm/test/CodeGen/X86/win_eh_prepare.ll
index f96fed5095f..b457a41e452 100644
--- a/llvm/test/CodeGen/X86/win_eh_prepare.ll
+++ b/llvm/test/CodeGen/X86/win_eh_prepare.ll
@@ -36,7 +36,7 @@ eh.resume:
}
define internal i32 @filt_g(i8*, i8*) {
- %g = load i32* @g
+ %g = load i32, i32* @g
ret i32 %g
}
diff --git a/llvm/test/CodeGen/X86/x32-function_pointer-1.ll b/llvm/test/CodeGen/X86/x32-function_pointer-1.ll
index 2baf92a9979..952add91d52 100644
--- a/llvm/test/CodeGen/X86/x32-function_pointer-1.ll
+++ b/llvm/test/CodeGen/X86/x32-function_pointer-1.ll
@@ -8,11 +8,11 @@
define void @bar(i8* %h) nounwind uwtable {
entry:
- %0 = load void (i8*)** @foo1, align 4
+ %0 = load void (i8*)*, void (i8*)** @foo1, align 4
; CHECK: movl foo1(%rip), %e{{[^,]*}}
tail call void %0(i8* %h) nounwind
; CHECK: callq *%r{{[^,]*}}
- %1 = load void (i8*)** @foo2, align 4
+ %1 = load void (i8*)*, void (i8*)** @foo2, align 4
; CHECK: movl foo2(%rip), %e{{[^,]*}}
tail call void %1(i8* %h) nounwind
; CHECK: jmpq *%r{{[^,]*}}
diff --git a/llvm/test/CodeGen/X86/x86-64-gv-offset.ll b/llvm/test/CodeGen/X86/x86-64-gv-offset.ll
index 365e4af63fc..e179146f647 100644
--- a/llvm/test/CodeGen/X86/x86-64-gv-offset.ll
+++ b/llvm/test/CodeGen/X86/x86-64-gv-offset.ll
@@ -5,8 +5,8 @@
define i32 @main() nounwind {
entry:
- %tmp2 = load float* getelementptr (%struct.x* @X, i32 0, i32 0), align 16 ; <float> [#uses=1]
- %tmp4 = load double* getelementptr (%struct.x* @X, i32 0, i32 1), align 8 ; <double> [#uses=1]
+ %tmp2 = load float, float* getelementptr (%struct.x* @X, i32 0, i32 0), align 16 ; <float> [#uses=1]
+ %tmp4 = load double, double* getelementptr (%struct.x* @X, i32 0, i32 1), align 8 ; <double> [#uses=1]
tail call void @t( float %tmp2, double %tmp4 ) nounwind
ret i32 0
}
diff --git a/llvm/test/CodeGen/X86/x86-64-jumps.ll b/llvm/test/CodeGen/X86/x86-64-jumps.ll
index 7e5cd706b96..846660eadf6 100644
--- a/llvm/test/CodeGen/X86/x86-64-jumps.ll
+++ b/llvm/test/CodeGen/X86/x86-64-jumps.ll
@@ -22,10 +22,10 @@ define void @test2(i32 %i) nounwind ssp {
entry:
%i.addr = alloca i32 ; <i32*> [#uses=2]
store i32 %i, i32* %i.addr
- %tmp = load i32* %i.addr ; <i32> [#uses=1]
+ %tmp = load i32, i32* %i.addr ; <i32> [#uses=1]
%idxprom = sext i32 %tmp to i64 ; <i64> [#uses=1]
%arrayidx = getelementptr inbounds i32, i32* getelementptr inbounds ([3 x i32]* @test.array, i32 0, i32 0), i64 %idxprom ; <i32*> [#uses=1]
- %tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
+ %tmp1 = load i32, i32* %arrayidx ; <i32> [#uses=1]
%idx.ext = sext i32 %tmp1 to i64 ; <i64> [#uses=1]
%add.ptr = getelementptr i8, i8* blockaddress(@test2, %foo), i64 %idx.ext ; <i8*> [#uses=1]
br label %indirectgoto
diff --git a/llvm/test/CodeGen/X86/x86-64-mem.ll b/llvm/test/CodeGen/X86/x86-64-mem.ll
index d15f516cdde..c7a298e2f05 100644
--- a/llvm/test/CodeGen/X86/x86-64-mem.ll
+++ b/llvm/test/CodeGen/X86/x86-64-mem.ll
@@ -17,7 +17,7 @@
@bdst = internal global [500000 x i32] zeroinitializer, align 32 ; <[500000 x i32]*> [#uses=0]
define void @test1() nounwind {
- %tmp = load i32* getelementptr ([0 x i32]* @src, i32 0, i32 0) ; <i32> [#uses=1]
+ %tmp = load i32, i32* getelementptr ([0 x i32]* @src, i32 0, i32 0) ; <i32> [#uses=1]
store i32 %tmp, i32* getelementptr ([0 x i32]* @dst, i32 0, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/X86/x86-64-pic-4.ll b/llvm/test/CodeGen/X86/x86-64-pic-4.ll
index 33b08c4b4b0..42d08cc2057 100644
--- a/llvm/test/CodeGen/X86/x86-64-pic-4.ll
+++ b/llvm/test/CodeGen/X86/x86-64-pic-4.ll
@@ -5,6 +5,6 @@
define i32 @get_a() {
entry:
- %tmp1 = load i32* @a, align 4
+ %tmp1 = load i32, i32* @a, align 4
ret i32 %tmp1
}
diff --git a/llvm/test/CodeGen/X86/x86-64-pic-5.ll b/llvm/test/CodeGen/X86/x86-64-pic-5.ll
index 234bc0d2f4f..d217a5c47df 100644
--- a/llvm/test/CodeGen/X86/x86-64-pic-5.ll
+++ b/llvm/test/CodeGen/X86/x86-64-pic-5.ll
@@ -6,6 +6,6 @@
define i32 @get_a() {
entry:
- %tmp1 = load i32* @a, align 4
+ %tmp1 = load i32, i32* @a, align 4
ret i32 %tmp1
}
diff --git a/llvm/test/CodeGen/X86/x86-64-pic-6.ll b/llvm/test/CodeGen/X86/x86-64-pic-6.ll
index ae5b5835928..8671023daa8 100644
--- a/llvm/test/CodeGen/X86/x86-64-pic-6.ll
+++ b/llvm/test/CodeGen/X86/x86-64-pic-6.ll
@@ -6,6 +6,6 @@
define i32 @get_a() nounwind {
entry:
- %tmp1 = load i32* @a, align 4
+ %tmp1 = load i32, i32* @a, align 4
ret i32 %tmp1
}
diff --git a/llvm/test/CodeGen/X86/x86-64-ptr-arg-simple.ll b/llvm/test/CodeGen/X86/x86-64-ptr-arg-simple.ll
index 6d466639890..11dfc802d51 100644
--- a/llvm/test/CodeGen/X86/x86-64-ptr-arg-simple.ll
+++ b/llvm/test/CodeGen/X86/x86-64-ptr-arg-simple.ll
@@ -22,7 +22,7 @@ entry:
define void @bar(i32* nocapture %pOut, i32* nocapture %pIn) nounwind {
entry:
- %0 = load i32* %pIn, align 4
+ %0 = load i32, i32* %pIn, align 4
store i32 %0, i32* %pOut, align 4
ret void
}
diff --git a/llvm/test/CodeGen/X86/x86-64-sret-return.ll b/llvm/test/CodeGen/X86/x86-64-sret-return.ll
index bc98fd645a2..a0c43488db1 100644
--- a/llvm/test/CodeGen/X86/x86-64-sret-return.ll
+++ b/llvm/test/CodeGen/X86/x86-64-sret-return.ll
@@ -17,42 +17,42 @@ entry:
%memtmp = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %struct.foo* %d, %struct.foo** %d_addr
- %tmp = load %struct.foo** %d_addr, align 8 ; <%struct.foo*> [#uses=1]
+ %tmp = load %struct.foo*, %struct.foo** %d_addr, align 8 ; <%struct.foo*> [#uses=1]
%tmp1 = getelementptr %struct.foo, %struct.foo* %agg.result, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
%tmp2 = getelementptr %struct.foo, %struct.foo* %tmp, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
%tmp3 = getelementptr [4 x i64], [4 x i64]* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp4 = getelementptr [4 x i64], [4 x i64]* %tmp2, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp5 = load i64* %tmp4, align 8 ; <i64> [#uses=1]
+ %tmp5 = load i64, i64* %tmp4, align 8 ; <i64> [#uses=1]
store i64 %tmp5, i64* %tmp3, align 8
%tmp6 = getelementptr [4 x i64], [4 x i64]* %tmp1, i32 0, i32 1 ; <i64*> [#uses=1]
%tmp7 = getelementptr [4 x i64], [4 x i64]* %tmp2, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp8 = load i64* %tmp7, align 8 ; <i64> [#uses=1]
+ %tmp8 = load i64, i64* %tmp7, align 8 ; <i64> [#uses=1]
store i64 %tmp8, i64* %tmp6, align 8
%tmp9 = getelementptr [4 x i64], [4 x i64]* %tmp1, i32 0, i32 2 ; <i64*> [#uses=1]
%tmp10 = getelementptr [4 x i64], [4 x i64]* %tmp2, i32 0, i32 2 ; <i64*> [#uses=1]
- %tmp11 = load i64* %tmp10, align 8 ; <i64> [#uses=1]
+ %tmp11 = load i64, i64* %tmp10, align 8 ; <i64> [#uses=1]
store i64 %tmp11, i64* %tmp9, align 8
%tmp12 = getelementptr [4 x i64], [4 x i64]* %tmp1, i32 0, i32 3 ; <i64*> [#uses=1]
%tmp13 = getelementptr [4 x i64], [4 x i64]* %tmp2, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp14 = load i64* %tmp13, align 8 ; <i64> [#uses=1]
+ %tmp14 = load i64, i64* %tmp13, align 8 ; <i64> [#uses=1]
store i64 %tmp14, i64* %tmp12, align 8
%tmp15 = getelementptr %struct.foo, %struct.foo* %memtmp, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
%tmp16 = getelementptr %struct.foo, %struct.foo* %agg.result, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
%tmp17 = getelementptr [4 x i64], [4 x i64]* %tmp15, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp18 = getelementptr [4 x i64], [4 x i64]* %tmp16, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp19 = load i64* %tmp18, align 8 ; <i64> [#uses=1]
+ %tmp19 = load i64, i64* %tmp18, align 8 ; <i64> [#uses=1]
store i64 %tmp19, i64* %tmp17, align 8
%tmp20 = getelementptr [4 x i64], [4 x i64]* %tmp15, i32 0, i32 1 ; <i64*> [#uses=1]
%tmp21 = getelementptr [4 x i64], [4 x i64]* %tmp16, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp22 = load i64* %tmp21, align 8 ; <i64> [#uses=1]
+ %tmp22 = load i64, i64* %tmp21, align 8 ; <i64> [#uses=1]
store i64 %tmp22, i64* %tmp20, align 8
%tmp23 = getelementptr [4 x i64], [4 x i64]* %tmp15, i32 0, i32 2 ; <i64*> [#uses=1]
%tmp24 = getelementptr [4 x i64], [4 x i64]* %tmp16, i32 0, i32 2 ; <i64*> [#uses=1]
- %tmp25 = load i64* %tmp24, align 8 ; <i64> [#uses=1]
+ %tmp25 = load i64, i64* %tmp24, align 8 ; <i64> [#uses=1]
store i64 %tmp25, i64* %tmp23, align 8
%tmp26 = getelementptr [4 x i64], [4 x i64]* %tmp15, i32 0, i32 3 ; <i64*> [#uses=1]
%tmp27 = getelementptr [4 x i64], [4 x i64]* %tmp16, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp28 = load i64* %tmp27, align 8 ; <i64> [#uses=1]
+ %tmp28 = load i64, i64* %tmp27, align 8 ; <i64> [#uses=1]
store i64 %tmp28, i64* %tmp26, align 8
br label %return
diff --git a/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll b/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll
index 978abd7e030..5da3a470503 100644
--- a/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll
+++ b/llvm/test/CodeGen/X86/x86-64-static-relo-movl.ll
@@ -12,7 +12,7 @@ define void @setup() {
%t = bitcast %struct.MatchInfo* %pending to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false)
%u = getelementptr inbounds %struct.MatchInfo, %struct.MatchInfo* %pending, i32 0, i32 2
- %v = load i64* %u, align 8
+ %v = load i64, i64* %u, align 8
br label %done
done:
ret void
diff --git a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
index fcf7eaec054..8892a69abf0 100644
--- a/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
+++ b/llvm/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
@@ -10,8 +10,8 @@ define void @test1(i1 %cmp) align 2 {
%1 = alloca <2 x double>, align 16
%2 = alloca <2 x double>, align 8
- %val = load <2 x double>* %1, align 16
- %val2 = load <2 x double>* %2, align 8
+ %val = load <2 x double>, <2 x double>* %1, align 16
+ %val2 = load <2 x double>, <2 x double>* %2, align 8
%val3 = select i1 %cmp, <2 x double> %val, <2 x double> %val2
call void @sink(<2 x double> %val3)
ret void
@@ -24,8 +24,8 @@ define void @test2(i1 %cmp) align 2 {
%1 = alloca <2 x double>, align 16
%2 = alloca <2 x double>, align 8
- %val = load <2 x double>* %1, align 16
- %val2 = load <2 x double>* %2, align 16
+ %val = load <2 x double>, <2 x double>* %1, align 16
+ %val2 = load <2 x double>, <2 x double>* %2, align 16
%val3 = select i1 %cmp, <2 x double> %val, <2 x double> %val2
call void @sink(<2 x double> %val3)
ret void
diff --git a/llvm/test/CodeGen/X86/xop-intrinsics-x86_64.ll b/llvm/test/CodeGen/X86/xop-intrinsics-x86_64.ll
index e154e4a159b..2516116f769 100644
--- a/llvm/test/CodeGen/X86/xop-intrinsics-x86_64.ll
+++ b/llvm/test/CodeGen/X86/xop-intrinsics-x86_64.ll
@@ -8,14 +8,14 @@ define <2 x double> @test_int_x86_xop_vpermil2pd(<2 x double> %a0, <2 x double>
define <2 x double> @test_int_x86_xop_vpermil2pd_mr(<2 x double> %a0, <2 x double>* %a1, <2 x double> %a2) {
; CHECK-NOT: vmovaps
; CHECK: vpermil2pd
- %vec = load <2 x double>* %a1
+ %vec = load <2 x double>, <2 x double>* %a1
%res = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %vec, <2 x double> %a2, i8 1) ; [#uses=1]
ret <2 x double> %res
}
define <2 x double> @test_int_x86_xop_vpermil2pd_rm(<2 x double> %a0, <2 x double> %a1, <2 x double>* %a2) {
; CHECK-NOT: vmovaps
; CHECK: vpermil2pd
- %vec = load <2 x double>* %a2
+ %vec = load <2 x double>, <2 x double>* %a2
%res = call <2 x double> @llvm.x86.xop.vpermil2pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %vec, i8 1) ; [#uses=1]
ret <2 x double> %res
}
@@ -31,7 +31,7 @@ define <4 x double> @test_int_x86_xop_vpermil2pd_256_mr(<4 x double> %a0, <4 x d
; CHECK-NOT: vmovaps
; CHECK: vpermil2pd
; CHECK: ymm
- %vec = load <4 x double>* %a1
+ %vec = load <4 x double>, <4 x double>* %a1
%res = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %vec, <4 x double> %a2, i8 2) ;
ret <4 x double> %res
}
@@ -39,7 +39,7 @@ define <4 x double> @test_int_x86_xop_vpermil2pd_256_rm(<4 x double> %a0, <4 x d
; CHECK-NOT: vmovaps
; CHECK: vpermil2pd
; CHECK: ymm
- %vec = load <4 x double>* %a2
+ %vec = load <4 x double>, <4 x double>* %a2
%res = call <4 x double> @llvm.x86.xop.vpermil2pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %vec, i8 2) ;
ret <4 x double> %res
}
@@ -77,7 +77,7 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256_mr(<4 x i64> %a0, <4 x i64>* %a1,
; CHECK-NOT: vmovaps
; CHECK: vpcmov
; CHECK: ymm
- %vec = load <4 x i64>* %a1
+ %vec = load <4 x i64>, <4 x i64>* %a1
%res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %vec, <4 x i64> %a2) ;
ret <4 x i64> %res
}
@@ -85,7 +85,7 @@ define <4 x i64> @test_int_x86_xop_vpcmov_256_rm(<4 x i64> %a0, <4 x i64> %a1, <
; CHECK-NOT: vmovaps
; CHECK: vpcmov
; CHECK: ymm
- %vec = load <4 x i64>* %a2
+ %vec = load <4 x i64>, <4 x i64>* %a2
%res = call <4 x i64> @llvm.x86.xop.vpcmov.256(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> %vec) ;
ret <4 x i64> %res
}
@@ -99,7 +99,7 @@ define <16 x i8> @test_int_x86_xop_vpcomeqb(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @test_int_x86_xop_vpcomeqb_mem(<16 x i8> %a0, <16 x i8>* %a1) {
; CHECK-NOT: vmovaps
; CHECK:vpcomeqb
- %vec = load <16 x i8>* %a1
+ %vec = load <16 x i8>, <16 x i8>* %a1
%res = call <16 x i8> @llvm.x86.xop.vpcomeqb(<16 x i8> %a0, <16 x i8> %vec) ;
ret <16 x i8> %res
}
@@ -645,7 +645,7 @@ define <2 x i64> @test_int_x86_xop_vphsubdq(<4 x i32> %a0) {
define <2 x i64> @test_int_x86_xop_vphsubdq_mem(<4 x i32>* %a0) {
; CHECK-NOT: vmovaps
; CHECK: vphsubdq
- %vec = load <4 x i32>* %a0
+ %vec = load <4 x i32>, <4 x i32>* %a0
%res = call <2 x i64> @llvm.x86.xop.vphsubdq(<4 x i32> %vec) ;
ret <2 x i64> %res
}
@@ -659,7 +659,7 @@ define <4 x i32> @test_int_x86_xop_vphsubwd(<8 x i16> %a0) {
define <4 x i32> @test_int_x86_xop_vphsubwd_mem(<8 x i16>* %a0) {
; CHECK-NOT: vmovaps
; CHECK: vphsubwd
- %vec = load <8 x i16>* %a0
+ %vec = load <8 x i16>, <8 x i16>* %a0
%res = call <4 x i32> @llvm.x86.xop.vphsubwd(<8 x i16> %vec) ;
ret <4 x i32> %res
}
@@ -750,7 +750,7 @@ define <4 x i32> @test_int_x86_xop_vpmadcswd(<8 x i16> %a0, <8 x i16> %a1, <4 x
define <4 x i32> @test_int_x86_xop_vpmadcswd_mem(<8 x i16> %a0, <8 x i16>* %a1, <4 x i32> %a2) {
; CHECK-NOT: vmovaps
; CHECK: vpmadcswd
- %vec = load <8 x i16>* %a1
+ %vec = load <8 x i16>, <8 x i16>* %a1
%res = call <4 x i32> @llvm.x86.xop.vpmadcswd(<8 x i16> %a0, <8 x i16> %vec, <4 x i32> %a2) ;
ret <4 x i32> %res
}
@@ -764,14 +764,14 @@ define <16 x i8> @test_int_x86_xop_vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8
define <16 x i8> @test_int_x86_xop_vpperm_rm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8>* %a2) {
; CHECK-NOT: vmovaps
; CHECK: vpperm
- %vec = load <16 x i8>* %a2
+ %vec = load <16 x i8>, <16 x i8>* %a2
%res = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %vec) ;
ret <16 x i8> %res
}
define <16 x i8> @test_int_x86_xop_vpperm_mr(<16 x i8> %a0, <16 x i8>* %a1, <16 x i8> %a2) {
; CHECK-NOT: vmovaps
; CHECK: vpperm
- %vec = load <16 x i8>* %a1
+ %vec = load <16 x i8>, <16 x i8>* %a1
%res = call <16 x i8> @llvm.x86.xop.vpperm(<16 x i8> %a0, <16 x i8> %vec, <16 x i8> %a2) ;
ret <16 x i8> %res
}
@@ -862,14 +862,14 @@ define <8 x i16> @test_int_x86_xop_vpshlw(<8 x i16> %a0, <8 x i16> %a1) {
define <8 x i16> @test_int_x86_xop_vpshlw_rm(<8 x i16> %a0, <8 x i16>* %a1) {
; CHECK-NOT: vmovaps
; CHECK: vpshlw
- %vec = load <8 x i16>* %a1
+ %vec = load <8 x i16>, <8 x i16>* %a1
%res = call <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16> %a0, <8 x i16> %vec) ;
ret <8 x i16> %res
}
define <8 x i16> @test_int_x86_xop_vpshlw_mr(<8 x i16>* %a0, <8 x i16> %a1) {
; CHECK-NOT: vmovaps
; CHECK: vpshlw
- %vec = load <8 x i16>* %a0
+ %vec = load <8 x i16>, <8 x i16>* %a0
%res = call <8 x i16> @llvm.x86.xop.vpshlw(<8 x i16> %vec, <8 x i16> %a1) ;
ret <8 x i16> %res
}
@@ -884,7 +884,7 @@ define <4 x float> @test_int_x86_xop_vfrcz_ss(<4 x float> %a0) {
define <4 x float> @test_int_x86_xop_vfrcz_ss_mem(float* %a0) {
; CHECK-NOT: mov
; CHECK: vfrczss
- %elem = load float* %a0
+ %elem = load float, float* %a0
%vec = insertelement <4 x float> undef, float %elem, i32 0
%res = call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %vec) ;
ret <4 x float> %res
@@ -900,7 +900,7 @@ define <2 x double> @test_int_x86_xop_vfrcz_sd(<2 x double> %a0) {
define <2 x double> @test_int_x86_xop_vfrcz_sd_mem(double* %a0) {
; CHECK-NOT: mov
; CHECK: vfrczsd
- %elem = load double* %a0
+ %elem = load double, double* %a0
%vec = insertelement <2 x double> undef, double %elem, i32 0
%res = call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %vec) ;
ret <2 x double> %res
@@ -915,7 +915,7 @@ define <2 x double> @test_int_x86_xop_vfrcz_pd(<2 x double> %a0) {
define <2 x double> @test_int_x86_xop_vfrcz_pd_mem(<2 x double>* %a0) {
; CHECK-NOT: vmovaps
; CHECK: vfrczpd
- %vec = load <2 x double>* %a0
+ %vec = load <2 x double>, <2 x double>* %a0
%res = call <2 x double> @llvm.x86.xop.vfrcz.pd(<2 x double> %vec) ;
ret <2 x double> %res
}
@@ -931,7 +931,7 @@ define <4 x double> @test_int_x86_xop_vfrcz_pd_256_mem(<4 x double>* %a0) {
; CHECK-NOT: vmovaps
; CHECK: vfrczpd
; CHECK: ymm
- %vec = load <4 x double>* %a0
+ %vec = load <4 x double>, <4 x double>* %a0
%res = call <4 x double> @llvm.x86.xop.vfrcz.pd.256(<4 x double> %vec) ;
ret <4 x double> %res
}
@@ -945,7 +945,7 @@ define <4 x float> @test_int_x86_xop_vfrcz_ps(<4 x float> %a0) {
define <4 x float> @test_int_x86_xop_vfrcz_ps_mem(<4 x float>* %a0) {
; CHECK-NOT: vmovaps
; CHECK: vfrczps
- %vec = load <4 x float>* %a0
+ %vec = load <4 x float>, <4 x float>* %a0
%res = call <4 x float> @llvm.x86.xop.vfrcz.ps(<4 x float> %vec) ;
ret <4 x float> %res
}
@@ -961,7 +961,7 @@ define <8 x float> @test_int_x86_xop_vfrcz_ps_256_mem(<8 x float>* %a0) {
; CHECK-NOT: vmovaps
; CHECK: vfrczps
; CHECK: ymm
- %vec = load <8 x float>* %a0
+ %vec = load <8 x float>, <8 x float>* %a0
%res = call <8 x float> @llvm.x86.xop.vfrcz.ps.256(<8 x float> %vec) ;
ret <8 x float> %res
}
diff --git a/llvm/test/CodeGen/X86/zext-extract_subreg.ll b/llvm/test/CodeGen/X86/zext-extract_subreg.ll
index 43e79c77acc..9e34abb69b3 100644
--- a/llvm/test/CodeGen/X86/zext-extract_subreg.ll
+++ b/llvm/test/CodeGen/X86/zext-extract_subreg.ll
@@ -6,7 +6,7 @@ entry:
br i1 undef, label %return, label %if.end.i
if.end.i: ; preds = %entry
- %tmp7.i = load i32* undef, align 4
+ %tmp7.i = load i32, i32* undef, align 4
br i1 undef, label %return, label %if.end
if.end: ; preds = %if.end.i
diff --git a/llvm/test/CodeGen/X86/zext-sext.ll b/llvm/test/CodeGen/X86/zext-sext.ll
index 7b37d40f747..2758bff8024 100644
--- a/llvm/test/CodeGen/X86/zext-sext.ll
+++ b/llvm/test/CodeGen/X86/zext-sext.ll
@@ -9,14 +9,14 @@
define void @func([40 x i16]* %a, i32* %b, i16** %c, i64* %d) nounwind {
entry:
%tmp103 = getelementptr inbounds [40 x i16], [40 x i16]* %a, i64 0, i64 4
- %tmp104 = load i16* %tmp103, align 2
+ %tmp104 = load i16, i16* %tmp103, align 2
%tmp105 = sext i16 %tmp104 to i32
- %tmp106 = load i32* %b, align 4
+ %tmp106 = load i32, i32* %b, align 4
%tmp107 = sub nsw i32 4, %tmp106
- %tmp108 = load i16** %c, align 8
+ %tmp108 = load i16*, i16** %c, align 8
%tmp109 = sext i32 %tmp107 to i64
%tmp110 = getelementptr inbounds i16, i16* %tmp108, i64 %tmp109
- %tmp111 = load i16* %tmp110, align 1
+ %tmp111 = load i16, i16* %tmp110, align 1
%tmp112 = sext i16 %tmp111 to i32
%tmp = mul i32 355244649, %tmp112
%tmp1 = mul i32 %tmp, %tmp105
@@ -49,7 +49,7 @@ entry:
%tmp19 = sub i64 %tmp18, 5386586244038704851
%tmp20 = add i64 %tmp19, -1368057358110947217
%tmp21 = mul i64 %tmp20, -422037402840850817
- %tmp115 = load i64* %d, align 8
+ %tmp115 = load i64, i64* %d, align 8
%alphaX = mul i64 468858157810230901, %tmp21
%alphaXbetaY = add i64 %alphaX, %tmp115
%transformed = add i64 %alphaXbetaY, 9040145182981852475
diff --git a/llvm/test/CodeGen/X86/zlib-longest-match.ll b/llvm/test/CodeGen/X86/zlib-longest-match.ll
index 90c93991624..bd0b68e9062 100644
--- a/llvm/test/CodeGen/X86/zlib-longest-match.ll
+++ b/llvm/test/CodeGen/X86/zlib-longest-match.ll
@@ -29,45 +29,45 @@ target triple = "x86_64-apple-macosx10.9.0"
define i32 @longest_match(%struct.internal_state* nocapture %s, i32 %cur_match) nounwind {
entry:
%max_chain_length = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 31
- %0 = load i32* %max_chain_length, align 4
+ %0 = load i32, i32* %max_chain_length, align 4
%window = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 14
- %1 = load i8** %window, align 8
+ %1 = load i8*, i8** %window, align 8
%strstart = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 27
- %2 = load i32* %strstart, align 4
+ %2 = load i32, i32* %strstart, align 4
%idx.ext = zext i32 %2 to i64
%add.ptr = getelementptr inbounds i8, i8* %1, i64 %idx.ext
%prev_length = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 30
- %3 = load i32* %prev_length, align 4
+ %3 = load i32, i32* %prev_length, align 4
%nice_match1 = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 36
- %4 = load i32* %nice_match1, align 4
+ %4 = load i32, i32* %nice_match1, align 4
%w_size = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 11
- %5 = load i32* %w_size, align 4
+ %5 = load i32, i32* %w_size, align 4
%sub = add i32 %5, -262
%cmp = icmp ugt i32 %2, %sub
%sub6 = sub i32 %2, %sub
%sub6. = select i1 %cmp, i32 %sub6, i32 0
%prev7 = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 16
- %6 = load i16** %prev7, align 8
+ %6 = load i16*, i16** %prev7, align 8
%w_mask = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 13
- %7 = load i32* %w_mask, align 4
+ %7 = load i32, i32* %w_mask, align 4
%add.ptr11.sum = add i64 %idx.ext, 258
%add.ptr12 = getelementptr inbounds i8, i8* %1, i64 %add.ptr11.sum
%sub13 = add nsw i32 %3, -1
%idxprom = sext i32 %sub13 to i64
%add.ptr.sum = add i64 %idxprom, %idx.ext
%arrayidx = getelementptr inbounds i8, i8* %1, i64 %add.ptr.sum
- %8 = load i8* %arrayidx, align 1
+ %8 = load i8, i8* %arrayidx, align 1
%idxprom14 = sext i32 %3 to i64
%add.ptr.sum213 = add i64 %idxprom14, %idx.ext
%arrayidx15 = getelementptr inbounds i8, i8* %1, i64 %add.ptr.sum213
- %9 = load i8* %arrayidx15, align 1
+ %9 = load i8, i8* %arrayidx15, align 1
%good_match = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 35
- %10 = load i32* %good_match, align 4
+ %10 = load i32, i32* %good_match, align 4
%cmp17 = icmp ult i32 %3, %10
%shr = lshr i32 %0, 2
%chain_length.0 = select i1 %cmp17, i32 %0, i32 %shr
%lookahead = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 29
- %11 = load i32* %lookahead, align 4
+ %11 = load i32, i32* %lookahead, align 4
%cmp18 = icmp ugt i32 %4, %11
%. = select i1 %cmp18, i32 %11, i32 %4
%match_start = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 28
@@ -89,7 +89,7 @@ do.body: ; preds = %land.rhs131, %entry
%idxprom25 = sext i32 %best_len.0 to i64
%add.ptr24.sum = add i64 %idx.ext23, %idxprom25
%arrayidx26 = getelementptr inbounds i8, i8* %1, i64 %add.ptr24.sum
- %12 = load i8* %arrayidx26, align 1
+ %12 = load i8, i8* %arrayidx26, align 1
%cmp28 = icmp eq i8 %12, %scan_end.0
br i1 %cmp28, label %lor.lhs.false, label %do.cond125
@@ -98,21 +98,21 @@ lor.lhs.false: ; preds = %do.body
%idxprom31 = sext i32 %sub30 to i64
%add.ptr24.sum214 = add i64 %idx.ext23, %idxprom31
%arrayidx32 = getelementptr inbounds i8, i8* %1, i64 %add.ptr24.sum214
- %13 = load i8* %arrayidx32, align 1
+ %13 = load i8, i8* %arrayidx32, align 1
%cmp35 = icmp eq i8 %13, %scan_end1.0
br i1 %cmp35, label %lor.lhs.false37, label %do.cond125
lor.lhs.false37: ; preds = %lor.lhs.false
- %14 = load i8* %add.ptr24, align 1
- %15 = load i8* %add.ptr, align 1
+ %14 = load i8, i8* %add.ptr24, align 1
+ %15 = load i8, i8* %add.ptr, align 1
%cmp40 = icmp eq i8 %14, %15
br i1 %cmp40, label %lor.lhs.false42, label %do.cond125
lor.lhs.false42: ; preds = %lor.lhs.false37
%add.ptr24.sum215 = add i64 %idx.ext23, 1
%incdec.ptr = getelementptr inbounds i8, i8* %1, i64 %add.ptr24.sum215
- %16 = load i8* %incdec.ptr, align 1
- %17 = load i8* %arrayidx44, align 1
+ %16 = load i8, i8* %incdec.ptr, align 1
+ %17 = load i8, i8* %arrayidx44, align 1
%cmp46 = icmp eq i8 %16, %17
br i1 %cmp46, label %if.end49, label %do.cond125
@@ -125,65 +125,65 @@ do.cond: ; preds = %land.lhs.true100, %
%match.0 = phi i8* [ %incdec.ptr51, %if.end49 ], [ %incdec.ptr103, %land.lhs.true100 ]
%scan.1 = phi i8* [ %add.ptr50, %if.end49 ], [ %incdec.ptr101, %land.lhs.true100 ]
%incdec.ptr53 = getelementptr inbounds i8, i8* %scan.1, i64 1
- %18 = load i8* %incdec.ptr53, align 1
+ %18 = load i8, i8* %incdec.ptr53, align 1
%incdec.ptr55 = getelementptr inbounds i8, i8* %match.0, i64 1
- %19 = load i8* %incdec.ptr55, align 1
+ %19 = load i8, i8* %incdec.ptr55, align 1
%cmp57 = icmp eq i8 %18, %19
br i1 %cmp57, label %land.lhs.true, label %do.end
land.lhs.true: ; preds = %do.cond
%incdec.ptr59 = getelementptr inbounds i8, i8* %scan.1, i64 2
- %20 = load i8* %incdec.ptr59, align 1
+ %20 = load i8, i8* %incdec.ptr59, align 1
%incdec.ptr61 = getelementptr inbounds i8, i8* %match.0, i64 2
- %21 = load i8* %incdec.ptr61, align 1
+ %21 = load i8, i8* %incdec.ptr61, align 1
%cmp63 = icmp eq i8 %20, %21
br i1 %cmp63, label %land.lhs.true65, label %do.end
land.lhs.true65: ; preds = %land.lhs.true
%incdec.ptr66 = getelementptr inbounds i8, i8* %scan.1, i64 3
- %22 = load i8* %incdec.ptr66, align 1
+ %22 = load i8, i8* %incdec.ptr66, align 1
%incdec.ptr68 = getelementptr inbounds i8, i8* %match.0, i64 3
- %23 = load i8* %incdec.ptr68, align 1
+ %23 = load i8, i8* %incdec.ptr68, align 1
%cmp70 = icmp eq i8 %22, %23
br i1 %cmp70, label %land.lhs.true72, label %do.end
land.lhs.true72: ; preds = %land.lhs.true65
%incdec.ptr73 = getelementptr inbounds i8, i8* %scan.1, i64 4
- %24 = load i8* %incdec.ptr73, align 1
+ %24 = load i8, i8* %incdec.ptr73, align 1
%incdec.ptr75 = getelementptr inbounds i8, i8* %match.0, i64 4
- %25 = load i8* %incdec.ptr75, align 1
+ %25 = load i8, i8* %incdec.ptr75, align 1
%cmp77 = icmp eq i8 %24, %25
br i1 %cmp77, label %land.lhs.true79, label %do.end
land.lhs.true79: ; preds = %land.lhs.true72
%incdec.ptr80 = getelementptr inbounds i8, i8* %scan.1, i64 5
- %26 = load i8* %incdec.ptr80, align 1
+ %26 = load i8, i8* %incdec.ptr80, align 1
%incdec.ptr82 = getelementptr inbounds i8, i8* %match.0, i64 5
- %27 = load i8* %incdec.ptr82, align 1
+ %27 = load i8, i8* %incdec.ptr82, align 1
%cmp84 = icmp eq i8 %26, %27
br i1 %cmp84, label %land.lhs.true86, label %do.end
land.lhs.true86: ; preds = %land.lhs.true79
%incdec.ptr87 = getelementptr inbounds i8, i8* %scan.1, i64 6
- %28 = load i8* %incdec.ptr87, align 1
+ %28 = load i8, i8* %incdec.ptr87, align 1
%incdec.ptr89 = getelementptr inbounds i8, i8* %match.0, i64 6
- %29 = load i8* %incdec.ptr89, align 1
+ %29 = load i8, i8* %incdec.ptr89, align 1
%cmp91 = icmp eq i8 %28, %29
br i1 %cmp91, label %land.lhs.true93, label %do.end
land.lhs.true93: ; preds = %land.lhs.true86
%incdec.ptr94 = getelementptr inbounds i8, i8* %scan.1, i64 7
- %30 = load i8* %incdec.ptr94, align 1
+ %30 = load i8, i8* %incdec.ptr94, align 1
%incdec.ptr96 = getelementptr inbounds i8, i8* %match.0, i64 7
- %31 = load i8* %incdec.ptr96, align 1
+ %31 = load i8, i8* %incdec.ptr96, align 1
%cmp98 = icmp eq i8 %30, %31
br i1 %cmp98, label %land.lhs.true100, label %do.end
land.lhs.true100: ; preds = %land.lhs.true93
%incdec.ptr101 = getelementptr inbounds i8, i8* %scan.1, i64 8
- %32 = load i8* %incdec.ptr101, align 1
+ %32 = load i8, i8* %incdec.ptr101, align 1
%incdec.ptr103 = getelementptr inbounds i8, i8* %match.0, i64 8
- %33 = load i8* %incdec.ptr103, align 1
+ %33 = load i8, i8* %incdec.ptr103, align 1
%cmp105 = icmp eq i8 %32, %33
%cmp107 = icmp ult i8* %incdec.ptr101, %add.ptr12
%or.cond = and i1 %cmp105, %cmp107
@@ -208,11 +208,11 @@ if.end118: ; preds = %if.then114
%idxprom120 = sext i32 %sub119 to i64
%add.ptr111.sum = add i64 %idxprom120, %idx.ext
%arrayidx121 = getelementptr inbounds i8, i8* %1, i64 %add.ptr111.sum
- %34 = load i8* %arrayidx121, align 1
+ %34 = load i8, i8* %arrayidx121, align 1
%idxprom122 = sext i32 %sub110 to i64
%add.ptr111.sum216 = add i64 %idxprom122, %idx.ext
%arrayidx123 = getelementptr inbounds i8, i8* %1, i64 %add.ptr111.sum216
- %35 = load i8* %arrayidx123, align 1
+ %35 = load i8, i8* %arrayidx123, align 1
br label %do.cond125
do.cond125: ; preds = %if.end118, %do.end, %lor.lhs.false42, %lor.lhs.false37, %lor.lhs.false, %do.body
@@ -222,7 +222,7 @@ do.cond125: ; preds = %if.end118, %do.end,
%and = and i32 %cur_match.addr.0, %7
%idxprom126 = zext i32 %and to i64
%arrayidx127 = getelementptr inbounds i16, i16* %6, i64 %idxprom126
- %36 = load i16* %arrayidx127, align 2
+ %36 = load i16, i16* %arrayidx127, align 2
%conv128 = zext i16 %36 to i32
%cmp129 = icmp ugt i32 %conv128, %sub6.
br i1 %cmp129, label %land.rhs131, label %do.end135
OpenPOWER on IntegriCloud