diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-01-30 23:09:28 +0000 | 
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-01-30 23:09:28 +0000 | 
| commit | 547a83b4ebd1cbbe90b092634bf1d909ded48555 (patch) | |
| tree | cd7c41cef8fc587a95986659b0f2cd684767eb4e | |
| parent | 10f59405ae50568308b713d9dd20eb30625470bd (diff) | |
| download | bcm5719-llvm-547a83b4ebd1cbbe90b092634bf1d909ded48555.tar.gz bcm5719-llvm-547a83b4ebd1cbbe90b092634bf1d909ded48555.zip  | |
MIR: Reject non-power-of-4 alignments in MMO parsing
llvm-svn: 352686
22 files changed, 167 insertions, 151 deletions
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index c5fecfb345b..c5db9cc6c2e 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -2324,6 +2324,10 @@ bool MIParser::parseAlignment(unsigned &Alignment) {    if (getUnsigned(Alignment))      return true;    lex(); + +  if (!isPowerOf2_32(Alignment)) +    return error("expected a power-of-2 literal after 'align'"); +    return false;  } diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir index 281218192a1..aa0a2257f3d 100644 --- a/llvm/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir +++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir @@ -474,7 +474,7 @@ fixedStack:  body:             |    bb.0:      %0(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1(s32) = G_LOAD %0(p0) :: (load 4 from %fixed-stack.0, align 0) +    %1(s32) = G_LOAD %0(p0) :: (load 4 from %fixed-stack.0, align 4)      %2(p0) = COPY $sp      %3(s32) = G_CONSTANT i32 8 diff --git a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir index 6d27d9619c1..049a06dfb07 100644 --- a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir @@ -82,11 +82,11 @@ constants:  #CHECK:    CONSTPOOL_ENTRY 1, %const{{.*}}, 2  #  # We don't want to decrease alignment if the block already has been -# aligned; this can e.g. be an existing CPE that has been carefully  -# aligned. Here BB.1.LA has already an 8-byte alignment, and we are  +# aligned; this can e.g. be an existing CPE that has been carefully +# aligned. Here BB.1.LA has already an 8-byte alignment, and we are  # checking we don't set it to 4:  # -#CHECK:  bb.{{.*}}.LA (align 3): +#CHECK:  bb.{{.*}}.LA (align 8):  body:             |    bb.0.entry: @@ -100,7 +100,7 @@ body:             |      FMSTAT 14, $noreg, implicit-def $cpsr, implicit killed $fpscr_nzcv      Bcc %bb.2, 0, killed $cpsr -  bb.1.LA (align 3): +  bb.1.LA (align 8):      successors: %bb.2(0x80000000)      dead renamable $r0 = SPACE 1000, undef renamable $r0 diff --git a/llvm/test/CodeGen/MIR/X86/expected-power-of-2-after-align.mir b/llvm/test/CodeGen/MIR/X86/expected-power-of-2-after-align.mir new file mode 100644 index 00000000000..0842d96cac2 --- /dev/null +++ b/llvm/test/CodeGen/MIR/X86/expected-power-of-2-after-align.mir @@ -0,0 +1,12 @@ +# RUN: not llc -march=x86-64 -run-pass none -o /dev/null %s 2>&1 | FileCheck %s + +--- +name: align_0 +body: | +  bb.0: + +    %0:_(p0) = IMPLICIT_DEF +    ; CHECK: [[@LINE+1]]:50: expected a power-of-2 literal after 'align' +    %1:_(s64) = G_LOAD %0(p0) :: (load 8, align 0) +... + diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir index 9f469ea8913..b5dce811994 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir @@ -44,7 +44,7 @@ body:             |      ; MIPS32-LABEL: name: ptr_arg_on_stack      ; MIPS32: liveins: $a0, $a1, $a2, $a3      ; MIPS32: [[ADDiu:%[0-9]+]]:gpr32 = ADDiu %fixed-stack.0, 0 -    ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 0) +    ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 8)      ; MIPS32: [[LW1:%[0-9]+]]:gpr32 = LW [[LW]], 0 :: (load 4 from %ir.p)      ; MIPS32: $v0 = COPY [[LW1]]      ; MIPS32: RetRA implicit $v0 @@ -53,7 +53,7 @@ body:             |      %2:gprb(s32) = COPY $a2      %3:gprb(s32) = COPY $a3      %5:gprb(p0) = G_FRAME_INDEX %fixed-stack.0 -    %4:gprb(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) +    %4:gprb(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)      %6:gprb(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p)      $v0 = COPY %6(s32)      RetRA implicit $v0 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir index f93be8db703..114432a8aa9 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir @@ -25,7 +25,7 @@ body:             |      ; MIPS32: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2      ; MIPS32: [[COPY3:%[0-9]+]]:gpr32 = COPY $a3      ; MIPS32: [[ADDiu:%[0-9]+]]:gpr32 = ADDiu %fixed-stack.0, 0 -    ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 0) +    ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 8)      ; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp      ; MIPS32: $a0 = COPY [[COPY]]      ; MIPS32: $a1 = COPY [[COPY1]] @@ -35,7 +35,7 @@ body:             |      ; MIPS32: [[LUi:%[0-9]+]]:gpr32 = LUi 0      ; MIPS32: [[ORi:%[0-9]+]]:gpr32 = ORi [[LUi]], 16      ; MIPS32: [[ADDu:%[0-9]+]]:gpr32 = ADDu [[COPY4]], [[ORi]] -    ; MIPS32: SW [[LW]], [[ADDu]], 0 :: (store 4 into stack + 16, align 0) +    ; MIPS32: SW [[LW]], [[ADDu]], 0 :: (store 4 into stack + 16)      ; MIPS32: JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0      ; MIPS32: [[COPY5:%[0-9]+]]:gpr32 = COPY $v0      ; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp @@ -46,7 +46,7 @@ body:             |      %2:gprb(s32) = COPY $a2      %3:gprb(s32) = COPY $a3      %5:gprb(p0) = G_FRAME_INDEX %fixed-stack.0 -    %4:gprb(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) +    %4:gprb(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)      ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp      $a0 = COPY %0(s32)      $a1 = COPY %1(s32) @@ -55,7 +55,7 @@ body:             |      %7:gprb(p0) = COPY $sp      %8:gprb(s32) = G_CONSTANT i32 16      %9:gprb(p0) = G_GEP %7, %8(s32) -    G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 0) +    G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 4)      JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0      %6:gprb(s32) = COPY $v0      ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir index c90b8e6d86c..72be67734fd 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir @@ -52,8 +52,8 @@ body:             |      ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]]      ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24      ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) -    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]] -    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]] +    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) +    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)      ; MIPS32: $v0 = COPY [[ASHR]](s32)      ; MIPS32: RetRA implicit $v0      %2:_(s32) = COPY $a0 @@ -141,8 +141,8 @@ body:             |      ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]]      ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16      ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32) -    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]] -    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]] +    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) +    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)      ; MIPS32: $v0 = COPY [[ASHR]](s32)      ; MIPS32: RetRA implicit $v0      %2:_(s32) = COPY $a0 @@ -275,13 +275,13 @@ body:             |      ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2      ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3      ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) +    ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0)      ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1, align 0) +    ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)      ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 -    ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 0) +    ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2)      ; MIPS32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 -    ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3, align 0) +    ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3)      ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0      ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[COPY]]      ; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 @@ -317,13 +317,13 @@ body:             |      %5:_(s32) = COPY $a3      %0:_(s128) = G_MERGE_VALUES %2(s32), %3(s32), %4(s32), %5(s32)      %10:_(p0) = G_FRAME_INDEX %fixed-stack.3 -    %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 0) +    %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 4)      %11:_(p0) = G_FRAME_INDEX %fixed-stack.2 -    %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 0) +    %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 4)      %12:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 0) +    %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 4)      %13:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 0) +    %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 4)      %1:_(s128) = G_MERGE_VALUES %6(s32), %7(s32), %8(s32), %9(s32)      %14:_(s128) = G_ADD %1, %0      %15:_(s32), %16:_(s32), %17:_(s32), %18:_(s32) = G_UNMERGE_VALUES %14(s128) diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir index 0dbeb55108a..bef66149344 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir @@ -44,7 +44,7 @@ body:             |      ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2      ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3      ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; MIPS32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) +    ; MIPS32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0)      ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.p)      ; MIPS32: $v0 = COPY [[LOAD1]](s32)      ; MIPS32: RetRA implicit $v0 @@ -53,7 +53,7 @@ body:             |      %2:_(s32) = COPY $a2      %3:_(s32) = COPY $a3      %5:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) +    %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 4)      %6:_(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p)      $v0 = COPY %6(s32)      RetRA implicit $v0 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir index f9d61297358..2ba722d1ea3 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir @@ -23,7 +23,7 @@ body:             |      ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2      ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3      ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) +    ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)      ; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp      ; MIPS32: $a0 = COPY [[COPY]](s32)      ; MIPS32: $a1 = COPY [[COPY1]](s32) @@ -32,7 +32,7 @@ body:             |      ; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY $sp      ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16      ; MIPS32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY4]], [[C]](s32) -    ; MIPS32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack + 16, align 0) +    ; MIPS32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack + 16)      ; MIPS32: JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0      ; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v0      ; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp @@ -43,7 +43,7 @@ body:             |      %2:_(s32) = COPY $a2      %3:_(s32) = COPY $a3      %5:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %4:_(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) +    %4:_(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)      ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp      $a0 = COPY %0(s32)      $a1 = COPY %1(s32) @@ -52,7 +52,7 @@ body:             |      %7:_(p0) = COPY $sp      %8:_(s32) = G_CONSTANT i32 16      %9:_(p0) = G_GEP %7, %8(s32) -    G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 0) +    G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 4)      JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0      %6:_(s32) = COPY $v0      ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir index aabb90705f2..9f84e4d0501 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir @@ -52,8 +52,8 @@ body:             |      ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY2]], [[COPY3]]      ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24      ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB]](s32) -    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]] -    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]] +    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) +    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)      ; MIPS32: $v0 = COPY [[ASHR]](s32)      ; MIPS32: RetRA implicit $v0      %2:_(s32) = COPY $a0 @@ -141,8 +141,8 @@ body:             |      ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY2]], [[COPY3]]      ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16      ; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB]](s32) -    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]] -    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]] +    ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32) +    ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)      ; MIPS32: $v0 = COPY [[ASHR]](s32)      ; MIPS32: RetRA implicit $v0      %2:_(s32) = COPY $a0 @@ -270,13 +270,13 @@ body:             |      ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2      ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3      ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) +    ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)      ; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1, align 0) +    ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)      ; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 -    ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 0) +    ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)      ; MIPS32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 -    ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3, align 0) +    ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3)      ; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LOAD]], [[COPY]]      ; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[LOAD]](s32), [[COPY]]      ; MIPS32: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32) @@ -323,13 +323,13 @@ body:             |      %5:_(s32) = COPY $a3      %0:_(s128) = G_MERGE_VALUES %2(s32), %3(s32), %4(s32), %5(s32)      %10:_(p0) = G_FRAME_INDEX %fixed-stack.3 -    %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 0) +    %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 8)      %11:_(p0) = G_FRAME_INDEX %fixed-stack.2 -    %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 0) +    %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 4)      %12:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 0) +    %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 8)      %13:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 0) +    %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 4)      %1:_(s128) = G_MERGE_VALUES %6(s32), %7(s32), %8(s32), %9(s32)      %14:_(s128) = G_SUB %1, %0      %15:_(s32), %16:_(s32), %17:_(s32), %18:_(s32) = G_UNMERGE_VALUES %14(s128) diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir index 7c8dc0a1949..52a67aea7e2 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir @@ -46,7 +46,7 @@ body:             |      ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2      ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY $a3      ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) +    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)      ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.p)      ; MIPS32: $v0 = COPY [[LOAD1]](s32)      ; MIPS32: RetRA implicit $v0 @@ -55,7 +55,7 @@ body:             |      %2:_(s32) = COPY $a2      %3:_(s32) = COPY $a3      %5:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) +    %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)      %6:_(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p)      $v0 = COPY %6(s32)      RetRA implicit $v0 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir index c3de383ff99..43caa5e5bd7 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir @@ -24,7 +24,7 @@ body:             |      ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2      ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY $a3      ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) +    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)      ; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp      ; MIPS32: $a0 = COPY [[COPY]](s32)      ; MIPS32: $a1 = COPY [[COPY1]](s32) @@ -33,7 +33,7 @@ body:             |      ; MIPS32: [[COPY4:%[0-9]+]]:gprb(p0) = COPY $sp      ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 16      ; MIPS32: [[GEP:%[0-9]+]]:gprb(p0) = G_GEP [[COPY4]], [[C]](s32) -    ; MIPS32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack + 16, align 0) +    ; MIPS32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack + 16)      ; MIPS32: JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0      ; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY $v0      ; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp @@ -44,7 +44,7 @@ body:             |      %2:_(s32) = COPY $a2      %3:_(s32) = COPY $a3      %5:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %4:_(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) +    %4:_(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)      ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp      $a0 = COPY %0(s32)      $a1 = COPY %1(s32) @@ -53,7 +53,7 @@ body:             |      %7:_(p0) = COPY $sp      %8:_(s32) = G_CONSTANT i32 16      %9:_(p0) = G_GEP %7, %8(s32) -    G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 0) +    G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 4)      JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0      %6:_(s32) = COPY $v0      ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir index bf03d1d2caa..e940a462f07 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir @@ -57,12 +57,12 @@ fixedStack:  body:             |    bb.1 (%ir-block.0):      ; ALL-LABEL: name: test_load_i8 -    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)      ; ALL: $al = COPY [[MOV8rm]]      ; ALL: RET 0, implicit $al      %1(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)      $al = COPY %2(s8)      RET 0, implicit $al @@ -82,12 +82,12 @@ fixedStack:  body:             |    bb.1 (%ir-block.0):      ; ALL-LABEL: name: test_load_i16 -    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)      ; ALL: $ax = COPY [[MOV16rm]]      ; ALL: RET 0, implicit $ax      %1(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)      $ax = COPY %2(s16)      RET 0, implicit $ax @@ -107,12 +107,12 @@ fixedStack:  body:             |    bb.1 (%ir-block.0):      ; ALL-LABEL: name: test_load_i32 -    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)      ; ALL: $eax = COPY [[MOV32rm1]]      ; ALL: RET 0, implicit $eax      %1(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)      $eax = COPY %2(s32)      RET 0, implicit $eax @@ -134,15 +134,15 @@ fixedStack:  body:             |    bb.1 (%ir-block.0):      ; ALL-LABEL: name: test_store_i8 -    ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0) -    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) +    ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16) +    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)      ; ALL: MOV8mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV8rm]] :: (store 1 into %ir.p1)      ; ALL: $eax = COPY [[MOV32rm]]      ; ALL: RET 0, implicit $eax      %2(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0) +    %0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)      %3(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)      G_STORE %0(s8), %1(p0) :: (store 1 into %ir.p1)      $eax = COPY %1(p0)      RET 0, implicit $eax @@ -164,15 +164,15 @@ fixedStack:  body:             |    bb.1 (%ir-block.0):      ; ALL-LABEL: name: test_store_i16 -    ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0) -    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) +    ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16) +    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)      ; ALL: MOV16mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV16rm]] :: (store 2 into %ir.p1)      ; ALL: $eax = COPY [[MOV32rm]]      ; ALL: RET 0, implicit $eax      %2(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0) +    %0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)      %3(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)      G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p1)      $eax = COPY %1(p0)      RET 0, implicit $eax @@ -194,15 +194,15 @@ fixedStack:  body:             |    bb.1 (%ir-block.0):      ; ALL-LABEL: name: test_store_i32 -    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) -    ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) +    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16) +    ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)      ; ALL: MOV32mr [[MOV32rm1]], 1, $noreg, 0, $noreg, [[MOV32rm]] :: (store 4 into %ir.p1)      ; ALL: $eax = COPY [[MOV32rm1]]      ; ALL: RET 0, implicit $eax      %2(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    %0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)      %3(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)      G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)      $eax = COPY %1(p0)      RET 0, implicit $eax @@ -222,12 +222,12 @@ fixedStack:  body:             |    bb.1 (%ir-block.0):      ; ALL-LABEL: name: test_load_ptr -    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr1)      ; ALL: $eax = COPY [[MOV32rm1]]      ; ALL: RET 0, implicit $eax      %1(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2(p0) = G_LOAD %0(p0) :: (load 4 from %ir.ptr1)      $eax = COPY %2(p0)      RET 0, implicit $eax @@ -249,14 +249,14 @@ fixedStack:  body:             |    bb.1 (%ir-block.0):      ; ALL-LABEL: name: test_store_ptr -    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) -    ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) +    ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16) +    ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)      ; ALL: MOV32mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1)      ; ALL: RET 0      %2(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)      %3(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)      G_STORE %1(p0), %0(p0) :: (store 4 into %ir.ptr1)      RET 0 diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir index 06759ae035a..be1e852248d 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir @@ -26,12 +26,12 @@ body:             |    bb.1.entry:      ; CHECK-LABEL: name: inttoptr_p0_s32      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[LOAD]](s32)      ; CHECK: $eax = COPY [[INTTOPTR]](p0)      ; CHECK: RET 0, implicit $eax      %1:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:_(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:_(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:_(p0) = G_INTTOPTR %0(s32)      $eax = COPY %2(p0)      RET 0, implicit $eax diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir index fec474d3bad..60d876e32d9 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir @@ -45,13 +45,13 @@ body:             |    bb.1.entry:      ; CHECK-LABEL: name: ptrtoint_s1_p0      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s1) = G_PTRTOINT [[LOAD]](p0)      ; CHECK: [[ANYEXT:%[0-9]+]]:_(s8) = G_ANYEXT [[PTRTOINT]](s1)      ; CHECK: $al = COPY [[ANYEXT]](s8)      ; CHECK: RET 0, implicit $al      %1:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:_(s1) = G_PTRTOINT %0(p0)      %3:_(s8) = G_ANYEXT %2(s1)      $al = COPY %3(s8) @@ -74,12 +74,12 @@ body:             |    bb.1.entry:      ; CHECK-LABEL: name: ptrtoint_s8_p0      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s8) = G_PTRTOINT [[LOAD]](p0)      ; CHECK: $al = COPY [[PTRTOINT]](s8)      ; CHECK: RET 0, implicit $al      %1:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:_(s8) = G_PTRTOINT %0(p0)      $al = COPY %2(s8)      RET 0, implicit $al @@ -101,12 +101,12 @@ body:             |    bb.1.entry:      ; CHECK-LABEL: name: ptrtoint_s16_p0      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s16) = G_PTRTOINT [[LOAD]](p0)      ; CHECK: $ax = COPY [[PTRTOINT]](s16)      ; CHECK: RET 0, implicit $ax      %1:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:_(s16) = G_PTRTOINT %0(p0)      $ax = COPY %2(s16)      RET 0, implicit $ax @@ -128,12 +128,12 @@ body:             |    bb.1.entry:      ; CHECK-LABEL: name: ptrtoint_s32_p0      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[LOAD]](p0)      ; CHECK: $eax = COPY [[PTRTOINT]](s32)      ; CHECK: RET 0, implicit $eax      %1:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:_(s32) = G_PTRTOINT %0(p0)      $eax = COPY %2(s32)      RET 0, implicit $eax diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir index b0f74cf9bc5..777536a646b 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir @@ -70,16 +70,16 @@ body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_srem_i8      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 16)      ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 0) +    ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 4)      ; CHECK: [[SREM:%[0-9]+]]:_(s8) = G_SREM [[LOAD]], [[LOAD1]]      ; CHECK: $al = COPY [[SREM]](s8)      ; CHECK: RET 0, implicit $al      %2:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0) +    %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)      %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0) +    %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)      %4:_(s8) = G_SREM %0, %1      $al = COPY %4(s8)      RET 0, implicit $al @@ -132,16 +132,16 @@ body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_srem_i16      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 16)      ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 0) +    ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 4)      ; CHECK: [[SREM:%[0-9]+]]:_(s16) = G_SREM [[LOAD]], [[LOAD1]]      ; CHECK: $ax = COPY [[SREM]](s16)      ; CHECK: RET 0, implicit $ax      %2:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0) +    %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)      %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0) +    %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)      %4:_(s16) = G_SREM %0, %1      $ax = COPY %4(s16)      RET 0, implicit $ax @@ -194,16 +194,16 @@ body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_srem_i32      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1)      ; CHECK: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[LOAD]], [[LOAD1]]      ; CHECK: $eax = COPY [[SREM]](s32)      ; CHECK: RET 0, implicit $eax      %2:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)      %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)      %4:_(s32) = G_SREM %0, %1      $eax = COPY %4(s32)      RET 0, implicit $eax diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir index 5fd01532066..b9c46b44abb 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir @@ -70,16 +70,16 @@ body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_urem_i8      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 16)      ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 0) +    ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 4)      ; CHECK: [[UREM:%[0-9]+]]:_(s8) = G_UREM [[LOAD]], [[LOAD1]]      ; CHECK: $al = COPY [[UREM]](s8)      ; CHECK: RET 0, implicit $al      %2:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0) +    %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)      %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0) +    %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)      %4:_(s8) = G_UREM %0, %1      $al = COPY %4(s8)      RET 0, implicit $al @@ -132,16 +132,16 @@ body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_urem_i16      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 16)      ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 0) +    ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 4)      ; CHECK: [[UREM:%[0-9]+]]:_(s16) = G_UREM [[LOAD]], [[LOAD1]]      ; CHECK: $ax = COPY [[UREM]](s16)      ; CHECK: RET 0, implicit $ax      %2:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0) +    %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)      %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0) +    %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)      %4:_(s16) = G_UREM %0, %1      $ax = COPY %4(s16)      RET 0, implicit $ax @@ -194,16 +194,16 @@ body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_urem_i32      ; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1)      ; CHECK: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[LOAD]], [[LOAD1]]      ; CHECK: $eax = COPY [[UREM]](s32)      ; CHECK: RET 0, implicit $eax      %2:_(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)      %3:_(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)      %4:_(s32) = G_UREM %0, %1      $eax = COPY %4(s32)      RET 0, implicit $eax diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir index 521d1997fb3..29cc1c4a102 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir @@ -27,11 +27,11 @@ fixedStack:  body:             |    bb.1.entry:      ; CHECK-LABEL: name: inttoptr_p0_s32 -    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: $eax = COPY [[MOV32rm]]      ; CHECK: RET 0, implicit $eax      %1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:gpr(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:gpr(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:gpr(p0) = G_INTTOPTR %0(s32)      $eax = COPY %2(p0)      RET 0, implicit $eax diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir index 901633bf6fe..43e8619ddaa 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir @@ -46,12 +46,12 @@ fixedStack:  body:             |    bb.1.entry:      ; CHECK-LABEL: name: ptrtoint_s1_p0 -    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY [[MOV32rm]].sub_8bit      ; CHECK: $al = COPY [[COPY]]      ; CHECK: RET 0, implicit $al      %1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:gpr(s1) = G_PTRTOINT %0(p0)      %3:gpr(s8) = G_ANYEXT %2(s1)      $al = COPY %3(s8) @@ -75,12 +75,12 @@ fixedStack:  body:             |    bb.1.entry:      ; CHECK-LABEL: name: ptrtoint_s8_p0 -    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY [[MOV32rm]].sub_8bit      ; CHECK: $al = COPY [[COPY]]      ; CHECK: RET 0, implicit $al      %1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:gpr(s8) = G_PTRTOINT %0(p0)      $al = COPY %2(s8)      RET 0, implicit $al @@ -103,12 +103,12 @@ fixedStack:  body:             |    bb.1.entry:      ; CHECK-LABEL: name: ptrtoint_s16_p0 -    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY [[MOV32rm]].sub_16bit      ; CHECK: $ax = COPY [[COPY]]      ; CHECK: RET 0, implicit $ax      %1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:gpr(s16) = G_PTRTOINT %0(p0)      $ax = COPY %2(s16)      RET 0, implicit $ax @@ -131,11 +131,11 @@ fixedStack:  body:             |    bb.1.entry:      ; CHECK-LABEL: name: ptrtoint_s32_p0 -    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) +    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)      ; CHECK: $eax = COPY [[MOV32rm]]      ; CHECK: RET 0, implicit $eax      %1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %2:gpr(s32) = G_PTRTOINT %0(p0)      $eax = COPY %2(s32)      RET 0, implicit $eax diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir index 29600a9dd2d..50ebc9faaeb 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir @@ -69,17 +69,17 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_srem_i8 -    ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16) +    ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)      ; CHECK: $ax = MOVSX16rr8 [[MOV8rm]]      ; CHECK: IDIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax      ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $ah      ; CHECK: $al = COPY [[COPY]]      ; CHECK: RET 0, implicit $al      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0) +    %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0) +    %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)      %4:gpr(s8) = G_SREM %0, %1      $al = COPY %4(s8)      RET 0, implicit $al @@ -131,8 +131,8 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_srem_i16 -    ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16) +    ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)      ; CHECK: $ax = COPY [[MOV16rm]]      ; CHECK: CWD implicit-def $ax, implicit-def $dx, implicit $ax      ; CHECK: IDIV16r [[MOV16rm1]], implicit-def $ax, implicit-def $dx, implicit-def $eflags, implicit $ax, implicit $dx @@ -140,9 +140,9 @@ body:             |      ; CHECK: $ax = COPY [[COPY]]      ; CHECK: RET 0, implicit $ax      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0) +    %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0) +    %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)      %4:gpr(s16) = G_SREM %0, %1      $ax = COPY %4(s16)      RET 0, implicit $ax @@ -194,8 +194,8 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_srem_i32 -    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16) +    ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)      ; CHECK: $eax = COPY [[MOV32rm]]      ; CHECK: CDQ implicit-def $eax, implicit-def $edx, implicit $eax      ; CHECK: IDIV32r [[MOV32rm1]], implicit-def $eax, implicit-def $edx, implicit-def $eflags, implicit $eax, implicit $edx @@ -203,9 +203,9 @@ body:             |      ; CHECK: $eax = COPY [[COPY]]      ; CHECK: RET 0, implicit $eax      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)      %4:gpr(s32) = G_SREM %0, %1      $eax = COPY %4(s32)      RET 0, implicit $eax diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir index 4b8766160c7..afbbab73f51 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir @@ -69,17 +69,17 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_udiv_i8 -    ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16) +    ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)      ; CHECK: $ax = MOVZX16rr8 [[MOV8rm]]      ; CHECK: DIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax      ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $al      ; CHECK: $al = COPY [[COPY]]      ; CHECK: RET 0, implicit $al      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0) +    %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0) +    %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)      %4:gpr(s8) = G_UDIV %0, %1      $al = COPY %4(s8)      RET 0, implicit $al @@ -131,8 +131,8 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_udiv_i16 -    ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16) +    ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)      ; CHECK: $ax = COPY [[MOV16rm]]      ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags      ; CHECK: $dx = COPY [[MOV32r0_]].sub_16bit @@ -141,9 +141,9 @@ body:             |      ; CHECK: $ax = COPY [[COPY]]      ; CHECK: RET 0, implicit $ax      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0) +    %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0) +    %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)      %4:gpr(s16) = G_UDIV %0, %1      $ax = COPY %4(s16)      RET 0, implicit $ax @@ -195,8 +195,8 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_udiv_i32 -    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0) +    ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 16)      ; CHECK: $eax = COPY [[MOV32rm]]      ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags      ; CHECK: $edx = COPY [[MOV32r0_]] @@ -205,9 +205,9 @@ body:             |      ; CHECK: $eax = COPY [[COPY]]      ; CHECK: RET 0, implicit $eax      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 4)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 16)      %4:gpr(s32) = G_UDIV %0, %1      $eax = COPY %4(s32)      RET 0, implicit $eax diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir index 08f7df4cae3..1ca33657d7d 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir @@ -69,17 +69,17 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_urem_i8 -    ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16) +    ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)      ; CHECK: $ax = MOVZX16rr8 [[MOV8rm]]      ; CHECK: DIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax      ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $ah      ; CHECK: $al = COPY [[COPY]]      ; CHECK: RET 0, implicit $al      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0) +    %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0) +    %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)      %4:gpr(s8) = G_UREM %0, %1      $al = COPY %4(s8)      RET 0, implicit $al @@ -131,8 +131,8 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_urem_i16 -    ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16) +    ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)      ; CHECK: $ax = COPY [[MOV16rm]]      ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags      ; CHECK: $dx = COPY [[MOV32r0_]].sub_16bit @@ -141,9 +141,9 @@ body:             |      ; CHECK: $ax = COPY [[COPY]]      ; CHECK: RET 0, implicit $ax      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0) +    %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0) +    %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)      %4:gpr(s16) = G_UREM %0, %1      $ax = COPY %4(s16)      RET 0, implicit $ax @@ -195,8 +195,8 @@ constants:  body:             |    bb.1 (%ir-block.0):      ; CHECK-LABEL: name: test_urem_i32 -    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0) -    ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0) +    ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16) +    ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)      ; CHECK: $eax = COPY [[MOV32rm]]      ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags      ; CHECK: $edx = COPY [[MOV32r0_]] @@ -205,9 +205,9 @@ body:             |      ; CHECK: $eax = COPY [[COPY]]      ; CHECK: RET 0, implicit $eax      %2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1 -    %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0) +    %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)      %3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0 -    %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) +    %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)      %4:gpr(s32) = G_UREM %0, %1      $eax = COPY %4(s32)      RET 0, implicit $eax  | 

