summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-01-30 23:09:28 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2019-01-30 23:09:28 +0000
commit547a83b4ebd1cbbe90b092634bf1d909ded48555 (patch)
treecd7c41cef8fc587a95986659b0f2cd684767eb4e /llvm/test/CodeGen/X86
parent10f59405ae50568308b713d9dd20eb30625470bd (diff)
downloadbcm5719-llvm-547a83b4ebd1cbbe90b092634bf1d909ded48555.tar.gz
bcm5719-llvm-547a83b4ebd1cbbe90b092634bf1d909ded48555.zip
MIR: Reject non-power-of-4 alignments in MMO parsing
llvm-svn: 352686
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir48
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir4
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir16
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir24
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir24
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir4
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir16
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir24
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir24
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir24
10 files changed, 104 insertions, 104 deletions
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
index bf03d1d2caa..e940a462f07 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
@@ -57,12 +57,12 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i8
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
; ALL: $al = COPY [[MOV8rm]]
; ALL: RET 0, implicit $al
%1(p0) = G_FRAME_INDEX %fixed-stack.0
- %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
$al = COPY %2(s8)
RET 0, implicit $al
@@ -82,12 +82,12 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i16
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
; ALL: $ax = COPY [[MOV16rm]]
; ALL: RET 0, implicit $ax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
- %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
$ax = COPY %2(s16)
RET 0, implicit $ax
@@ -107,12 +107,12 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
; ALL: $eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit $eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
- %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
$eax = COPY %2(s32)
RET 0, implicit $eax
@@ -134,15 +134,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i8
- ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; ALL: MOV8mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV8rm]] :: (store 1 into %ir.p1)
; ALL: $eax = COPY [[MOV32rm]]
; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
- %0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
G_STORE %0(s8), %1(p0) :: (store 1 into %ir.p1)
$eax = COPY %1(p0)
RET 0, implicit $eax
@@ -164,15 +164,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i16
- ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; ALL: MOV16mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV16rm]] :: (store 2 into %ir.p1)
; ALL: $eax = COPY [[MOV32rm]]
; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
- %0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p1)
$eax = COPY %1(p0)
RET 0, implicit $eax
@@ -194,15 +194,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; ALL: MOV32mr [[MOV32rm1]], 1, $noreg, 0, $noreg, [[MOV32rm]] :: (store 4 into %ir.p1)
; ALL: $eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
- %0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
$eax = COPY %1(p0)
RET 0, implicit $eax
@@ -222,12 +222,12 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr1)
; ALL: $eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit $eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
- %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2(p0) = G_LOAD %0(p0) :: (load 4 from %ir.ptr1)
$eax = COPY %2(p0)
RET 0, implicit $eax
@@ -249,14 +249,14 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; ALL: MOV32mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1)
; ALL: RET 0
%2(p0) = G_FRAME_INDEX %fixed-stack.1
- %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
G_STORE %1(p0), %0(p0) :: (store 4 into %ir.ptr1)
RET 0
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
index 06759ae035a..be1e852248d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
@@ -26,12 +26,12 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: inttoptr_p0_s32
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[LOAD]](s32)
; CHECK: $eax = COPY [[INTTOPTR]](p0)
; CHECK: RET 0, implicit $eax
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(p0) = G_INTTOPTR %0(s32)
$eax = COPY %2(p0)
RET 0, implicit $eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
index fec474d3bad..60d876e32d9 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
@@ -45,13 +45,13 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s1_p0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s1) = G_PTRTOINT [[LOAD]](p0)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s8) = G_ANYEXT [[PTRTOINT]](s1)
; CHECK: $al = COPY [[ANYEXT]](s8)
; CHECK: RET 0, implicit $al
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(s1) = G_PTRTOINT %0(p0)
%3:_(s8) = G_ANYEXT %2(s1)
$al = COPY %3(s8)
@@ -74,12 +74,12 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s8_p0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s8) = G_PTRTOINT [[LOAD]](p0)
; CHECK: $al = COPY [[PTRTOINT]](s8)
; CHECK: RET 0, implicit $al
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(s8) = G_PTRTOINT %0(p0)
$al = COPY %2(s8)
RET 0, implicit $al
@@ -101,12 +101,12 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s16_p0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s16) = G_PTRTOINT [[LOAD]](p0)
; CHECK: $ax = COPY [[PTRTOINT]](s16)
; CHECK: RET 0, implicit $ax
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(s16) = G_PTRTOINT %0(p0)
$ax = COPY %2(s16)
RET 0, implicit $ax
@@ -128,12 +128,12 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s32_p0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[LOAD]](p0)
; CHECK: $eax = COPY [[PTRTOINT]](s32)
; CHECK: RET 0, implicit $eax
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(s32) = G_PTRTOINT %0(p0)
$eax = COPY %2(s32)
RET 0, implicit $eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir
index b0f74cf9bc5..777536a646b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir
@@ -70,16 +70,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i8
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: [[SREM:%[0-9]+]]:_(s8) = G_SREM [[LOAD]], [[LOAD1]]
; CHECK: $al = COPY [[SREM]](s8)
; CHECK: RET 0, implicit $al
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:_(s8) = G_SREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -132,16 +132,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i16
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: [[SREM:%[0-9]+]]:_(s16) = G_SREM [[LOAD]], [[LOAD1]]
; CHECK: $ax = COPY [[SREM]](s16)
; CHECK: RET 0, implicit $ax
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:_(s16) = G_SREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -194,16 +194,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i32
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1)
; CHECK: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[LOAD]], [[LOAD1]]
; CHECK: $eax = COPY [[SREM]](s32)
; CHECK: RET 0, implicit $eax
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
%4:_(s32) = G_SREM %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir
index 5fd01532066..b9c46b44abb 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir
@@ -70,16 +70,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i8
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: [[UREM:%[0-9]+]]:_(s8) = G_UREM [[LOAD]], [[LOAD1]]
; CHECK: $al = COPY [[UREM]](s8)
; CHECK: RET 0, implicit $al
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:_(s8) = G_UREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -132,16 +132,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i16
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: [[UREM:%[0-9]+]]:_(s16) = G_UREM [[LOAD]], [[LOAD1]]
; CHECK: $ax = COPY [[UREM]](s16)
; CHECK: RET 0, implicit $ax
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:_(s16) = G_UREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -194,16 +194,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i32
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1)
; CHECK: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[LOAD]], [[LOAD1]]
; CHECK: $eax = COPY [[UREM]](s32)
; CHECK: RET 0, implicit $eax
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
%4:_(s32) = G_UREM %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
index 521d1997fb3..29cc1c4a102 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
@@ -27,11 +27,11 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: inttoptr_p0_s32
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: RET 0, implicit $eax
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(p0) = G_INTTOPTR %0(s32)
$eax = COPY %2(p0)
RET 0, implicit $eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
index 901633bf6fe..43e8619ddaa 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
@@ -46,12 +46,12 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s1_p0
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY [[MOV32rm]].sub_8bit
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(s1) = G_PTRTOINT %0(p0)
%3:gpr(s8) = G_ANYEXT %2(s1)
$al = COPY %3(s8)
@@ -75,12 +75,12 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s8_p0
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY [[MOV32rm]].sub_8bit
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(s8) = G_PTRTOINT %0(p0)
$al = COPY %2(s8)
RET 0, implicit $al
@@ -103,12 +103,12 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s16_p0
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY [[MOV32rm]].sub_16bit
; CHECK: $ax = COPY [[COPY]]
; CHECK: RET 0, implicit $ax
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(s16) = G_PTRTOINT %0(p0)
$ax = COPY %2(s16)
RET 0, implicit $ax
@@ -131,11 +131,11 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s32_p0
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: RET 0, implicit $eax
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(s32) = G_PTRTOINT %0(p0)
$eax = COPY %2(s32)
RET 0, implicit $eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir
index 29600a9dd2d..50ebc9faaeb 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-srem.mir
@@ -69,17 +69,17 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i8
- ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: $ax = MOVSX16rr8 [[MOV8rm]]
; CHECK: IDIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $ah
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:gpr(s8) = G_SREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -131,8 +131,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i16
- ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: $ax = COPY [[MOV16rm]]
; CHECK: CWD implicit-def $ax, implicit-def $dx, implicit $ax
; CHECK: IDIV16r [[MOV16rm1]], implicit-def $ax, implicit-def $dx, implicit-def $eflags, implicit $ax, implicit $dx
@@ -140,9 +140,9 @@ body: |
; CHECK: $ax = COPY [[COPY]]
; CHECK: RET 0, implicit $ax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:gpr(s16) = G_SREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -194,8 +194,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i32
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: CDQ implicit-def $eax, implicit-def $edx, implicit $eax
; CHECK: IDIV32r [[MOV32rm1]], implicit-def $eax, implicit-def $edx, implicit-def $eflags, implicit $eax, implicit $edx
@@ -203,9 +203,9 @@ body: |
; CHECK: $eax = COPY [[COPY]]
; CHECK: RET 0, implicit $eax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
%4:gpr(s32) = G_SREM %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir
index 4b8766160c7..afbbab73f51 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir
@@ -69,17 +69,17 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_udiv_i8
- ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: $ax = MOVZX16rr8 [[MOV8rm]]
; CHECK: DIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $al
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:gpr(s8) = G_UDIV %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -131,8 +131,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_udiv_i16
- ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: $ax = COPY [[MOV16rm]]
; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
; CHECK: $dx = COPY [[MOV32r0_]].sub_16bit
@@ -141,9 +141,9 @@ body: |
; CHECK: $ax = COPY [[COPY]]
; CHECK: RET 0, implicit $ax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:gpr(s16) = G_UDIV %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -195,8 +195,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_udiv_i32
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0)
+ ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 16)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
; CHECK: $edx = COPY [[MOV32r0_]]
@@ -205,9 +205,9 @@ body: |
; CHECK: $eax = COPY [[COPY]]
; CHECK: RET 0, implicit $eax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 4)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%4:gpr(s32) = G_UDIV %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir
index 08f7df4cae3..1ca33657d7d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-select-urem.mir
@@ -69,17 +69,17 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i8
- ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: $ax = MOVZX16rr8 [[MOV8rm]]
; CHECK: DIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $ah
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:gpr(s8) = G_UREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -131,8 +131,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i16
- ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: $ax = COPY [[MOV16rm]]
; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
; CHECK: $dx = COPY [[MOV32r0_]].sub_16bit
@@ -141,9 +141,9 @@ body: |
; CHECK: $ax = COPY [[COPY]]
; CHECK: RET 0, implicit $ax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:gpr(s16) = G_UREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -195,8 +195,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i32
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
; CHECK: $edx = COPY [[MOV32r0_]]
@@ -205,9 +205,9 @@ body: |
; CHECK: $eax = COPY [[COPY]]
; CHECK: RET 0, implicit $eax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
%4:gpr(s32) = G_UREM %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
OpenPOWER on IntegriCloud