diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir')
| -rw-r--r-- | llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir | 364 |
1 files changed, 224 insertions, 140 deletions
diff --git a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir index d0d37f75261..e1d64558af7 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir +++ b/llvm/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir @@ -257,8 +257,10 @@ alignment: 4 legalized: true regBankSelected: true registers: - - { id: 0, class: gpr } - - { id: 1, class: gpr } + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: vecr, preferred-register: '' } + - { id: 3, class: vecr, preferred-register: '' } body: | bb.1 (%ir-block.0): liveins: $rdi @@ -266,26 +268,36 @@ body: | ; SSE-LABEL: name: test_load_float ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) - ; SSE: $xmm0 = COPY [[MOV32rm]] + ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]] + ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]] + ; SSE: $xmm0 = COPY [[COPY2]] ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_load_float ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) - ; AVX: $xmm0 = COPY [[MOV32rm]] + ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]] + ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]] + ; AVX: $xmm0 = COPY [[COPY2]] ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_load_float ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) - ; AVX512F: $xmm0 = COPY [[MOV32rm]] + ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]] + ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]] + ; AVX512F: $xmm0 = COPY [[COPY2]] ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_load_float ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) - ; AVX512VL: $xmm0 = COPY [[MOV32rm]] + ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]] + ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]] + ; AVX512VL: $xmm0 = COPY [[COPY2]] ; AVX512VL: RET 0, implicit $xmm0 - %0(p0) = COPY $rdi - %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) - $xmm0 = COPY %1(s32) + %0:gpr(p0) = COPY $rdi + %1:gpr(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) + %3:vecr(s32) = COPY %1(s32) + %2:vecr(s128) = G_ANYEXT %3(s32) + $xmm0 = COPY %2(s128) RET 0, implicit $xmm0 ... @@ -295,35 +307,47 @@ alignment: 4 legalized: true regBankSelected: true registers: - - { id: 0, class: gpr } - - { id: 1, class: vecr } + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: vecr, preferred-register: '' } + - { id: 3, class: vecr, preferred-register: '' } body: | bb.1 (%ir-block.0): liveins: $rdi ; SSE-LABEL: name: test_load_float_vecreg ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; SSE: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) - ; SSE: $xmm0 = COPY [[MOVSSrm]] + ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]] + ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]] + ; SSE: $xmm0 = COPY [[COPY2]] ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_load_float_vecreg ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; AVX: [[VMOVSSrm:%[0-9]+]]:fr32 = VMOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) - ; AVX: $xmm0 = COPY [[VMOVSSrm]] + ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]] + ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]] + ; AVX: $xmm0 = COPY [[COPY2]] ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_load_float_vecreg ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512F: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) - ; AVX512F: $xmm0 = COPY [[VMOVSSZrm]] + ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]] + ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]] + ; AVX512F: $xmm0 = COPY [[COPY2]] ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_load_float_vecreg ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512VL: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) - ; AVX512VL: $xmm0 = COPY [[VMOVSSZrm]] + ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1) + ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]] + ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]] + ; AVX512VL: $xmm0 = COPY [[COPY2]] ; AVX512VL: RET 0, implicit $xmm0 - %0(p0) = COPY $rdi - %1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) - $xmm0 = COPY %1(s32) + %0:gpr(p0) = COPY $rdi + %1:gpr(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1) + %3:vecr(s32) = COPY %1(s32) + %2:vecr(s128) = G_ANYEXT %3(s32) + $xmm0 = COPY %2(s128) RET 0, implicit $xmm0 ... @@ -333,8 +357,10 @@ alignment: 4 legalized: true regBankSelected: true registers: - - { id: 0, class: gpr } - - { id: 1, class: gpr } + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: vecr, preferred-register: '' } + - { id: 3, class: vecr, preferred-register: '' } body: | bb.1 (%ir-block.0): liveins: $rdi @@ -342,26 +368,36 @@ body: | ; SSE-LABEL: name: test_load_double ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) - ; SSE: $xmm0 = COPY [[MOV64rm]] + ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]] + ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]] + ; SSE: $xmm0 = COPY [[COPY2]] ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_load_double ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) - ; AVX: $xmm0 = COPY [[MOV64rm]] + ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]] + ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]] + ; AVX: $xmm0 = COPY [[COPY2]] ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_load_double ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) - ; AVX512F: $xmm0 = COPY [[MOV64rm]] + ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]] + ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]] + ; AVX512F: $xmm0 = COPY [[COPY2]] ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_load_double ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) - ; AVX512VL: $xmm0 = COPY [[MOV64rm]] + ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]] + ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]] + ; AVX512VL: $xmm0 = COPY [[COPY2]] ; AVX512VL: RET 0, implicit $xmm0 - %0(p0) = COPY $rdi - %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) - $xmm0 = COPY %1(s64) + %0:gpr(p0) = COPY $rdi + %1:gpr(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) + %3:vecr(s64) = COPY %1(s64) + %2:vecr(s128) = G_ANYEXT %3(s64) + $xmm0 = COPY %2(s128) RET 0, implicit $xmm0 ... @@ -371,35 +407,47 @@ alignment: 4 legalized: true regBankSelected: true registers: - - { id: 0, class: gpr } - - { id: 1, class: vecr } + - { id: 0, class: gpr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: vecr, preferred-register: '' } + - { id: 3, class: vecr, preferred-register: '' } body: | bb.1 (%ir-block.0): liveins: $rdi ; SSE-LABEL: name: test_load_double_vecreg ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; SSE: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) - ; SSE: $xmm0 = COPY [[MOVSDrm]] + ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]] + ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]] + ; SSE: $xmm0 = COPY [[COPY2]] ; SSE: RET 0, implicit $xmm0 ; AVX-LABEL: name: test_load_double_vecreg ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; AVX: [[VMOVSDrm:%[0-9]+]]:fr64 = VMOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) - ; AVX: $xmm0 = COPY [[VMOVSDrm]] + ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]] + ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]] + ; AVX: $xmm0 = COPY [[COPY2]] ; AVX: RET 0, implicit $xmm0 ; AVX512F-LABEL: name: test_load_double_vecreg ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512F: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) - ; AVX512F: $xmm0 = COPY [[VMOVSDZrm]] + ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]] + ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]] + ; AVX512F: $xmm0 = COPY [[COPY2]] ; AVX512F: RET 0, implicit $xmm0 ; AVX512VL-LABEL: name: test_load_double_vecreg ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512VL: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) - ; AVX512VL: $xmm0 = COPY [[VMOVSDZrm]] + ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1) + ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]] + ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]] + ; AVX512VL: $xmm0 = COPY [[COPY2]] ; AVX512VL: RET 0, implicit $xmm0 - %0(p0) = COPY $rdi - %1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) - $xmm0 = COPY %1(s64) + %0:gpr(p0) = COPY $rdi + %1:gpr(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1) + %3:vecr(s64) = COPY %1(s64) + %2:vecr(s128) = G_ANYEXT %3(s64) + $xmm0 = COPY %2(s128) RET 0, implicit $xmm0 ... @@ -495,45 +543,51 @@ alignment: 4 legalized: true regBankSelected: true registers: - - { id: 0, class: vecr } - - { id: 1, class: gpr } - - { id: 2, class: gpr } + - { id: 0, class: vecr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: vecr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } body: | bb.1 (%ir-block.0): liveins: $rdi, $xmm0 ; SSE-LABEL: name: test_store_float - ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; SSE: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]] - ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1) - ; SSE: $rax = COPY [[COPY1]] + ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]] + ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]] + ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1) + ; SSE: $rax = COPY [[COPY2]] ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_float - ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]] - ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1) - ; AVX: $rax = COPY [[COPY1]] + ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]] + ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]] + ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1) + ; AVX: $rax = COPY [[COPY2]] ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_float - ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512F: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]] - ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1) - ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]] + ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]] + ; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY2]] ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_float - ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512VL: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]] - ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1) - ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]] + ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]] + ; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY2]] ; AVX512VL: RET 0, implicit $rax - %0(s32) = COPY $xmm0 - %1(p0) = COPY $rdi - %2(s32) = COPY %0(s32) - G_STORE %2(s32), %1(p0) :: (store 4 into %ir.p1) + %2:vecr(s128) = COPY $xmm0 + %0:vecr(s32) = G_TRUNC %2(s128) + %1:gpr(p0) = COPY $rdi + %3:gpr(s32) = COPY %0(s32) + G_STORE %3(s32), %1(p0) :: (store 4 into %ir.p1) $rax = COPY %1(p0) RET 0, implicit $rax @@ -544,39 +598,51 @@ alignment: 4 legalized: true regBankSelected: true registers: - - { id: 0, class: vecr } - - { id: 1, class: gpr } + - { id: 0, class: vecr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: vecr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } body: | bb.1 (%ir-block.0): liveins: $rdi, $xmm0 ; SSE-LABEL: name: test_store_float_vec - ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; SSE: MOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) - ; SSE: $rax = COPY [[COPY1]] + ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]] + ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]] + ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1) + ; SSE: $rax = COPY [[COPY2]] ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_float_vec - ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0 - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX: VMOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX: $rax = COPY [[COPY1]] + ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]] + ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]] + ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1) + ; AVX: $rax = COPY [[COPY2]] ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_float_vec - ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512F: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]] + ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]] + ; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY2]] ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_float_vec - ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512VL: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1) - ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]] + ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]] + ; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 4 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY2]] ; AVX512VL: RET 0, implicit $rax - %0(s32) = COPY $xmm0 - %1(p0) = COPY $rdi - G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1) + %2:vecr(s128) = COPY $xmm0 + %0:vecr(s32) = G_TRUNC %2(s128) + %1:gpr(p0) = COPY $rdi + %3:gpr(s32) = COPY %0(s32) + G_STORE %3(s32), %1(p0) :: (store 4 into %ir.p1) $rax = COPY %1(p0) RET 0, implicit $rax @@ -587,46 +653,52 @@ alignment: 4 legalized: true regBankSelected: true registers: - - { id: 0, class: vecr } - - { id: 1, class: gpr } - - { id: 2, class: gpr } + - { id: 0, class: vecr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: vecr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } # NO_AVX512X: %0:fr64 = COPY $xmm0 body: | bb.1 (%ir-block.0): liveins: $rdi, $xmm0 ; SSE-LABEL: name: test_store_double - ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]] - ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1) - ; SSE: $rax = COPY [[COPY1]] + ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]] + ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]] + ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1) + ; SSE: $rax = COPY [[COPY2]] ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_double - ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]] - ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1) - ; AVX: $rax = COPY [[COPY1]] + ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]] + ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]] + ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1) + ; AVX: $rax = COPY [[COPY2]] ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_double - ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]] - ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1) - ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]] + ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]] + ; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY2]] ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_double - ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]] - ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1) - ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]] + ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]] + ; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY2]] ; AVX512VL: RET 0, implicit $rax - %0(s64) = COPY $xmm0 - %1(p0) = COPY $rdi - %2(s64) = COPY %0(s64) - G_STORE %2(s64), %1(p0) :: (store 8 into %ir.p1) + %2:vecr(s128) = COPY $xmm0 + %0:vecr(s64) = G_TRUNC %2(s128) + %1:gpr(p0) = COPY $rdi + %3:gpr(s64) = COPY %0(s64) + G_STORE %3(s64), %1(p0) :: (store 8 into %ir.p1) $rax = COPY %1(p0) RET 0, implicit $rax @@ -637,39 +709,51 @@ alignment: 4 legalized: true regBankSelected: true registers: - - { id: 0, class: vecr } - - { id: 1, class: gpr } + - { id: 0, class: vecr, preferred-register: '' } + - { id: 1, class: gpr, preferred-register: '' } + - { id: 2, class: vecr, preferred-register: '' } + - { id: 3, class: gpr, preferred-register: '' } body: | bb.1 (%ir-block.0): liveins: $rdi, $xmm0 ; SSE-LABEL: name: test_store_double_vec - ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 - ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; SSE: MOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) - ; SSE: $rax = COPY [[COPY1]] + ; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]] + ; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]] + ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1) + ; SSE: $rax = COPY [[COPY2]] ; SSE: RET 0, implicit $rax ; AVX-LABEL: name: test_store_double_vec - ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0 - ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX: VMOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX: $rax = COPY [[COPY1]] + ; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0 + ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]] + ; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]] + ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1) + ; AVX: $rax = COPY [[COPY2]] ; AVX: RET 0, implicit $rax ; AVX512F-LABEL: name: test_store_double_vec - ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 - ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512F: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX512F: $rax = COPY [[COPY1]] + ; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]] + ; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]] + ; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1) + ; AVX512F: $rax = COPY [[COPY2]] ; AVX512F: RET 0, implicit $rax ; AVX512VL-LABEL: name: test_store_double_vec - ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0 - ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi - ; AVX512VL: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1) - ; AVX512VL: $rax = COPY [[COPY1]] + ; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0 + ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]] + ; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi + ; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]] + ; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store 8 into %ir.p1) + ; AVX512VL: $rax = COPY [[COPY2]] ; AVX512VL: RET 0, implicit $rax - %0(s64) = COPY $xmm0 - %1(p0) = COPY $rdi - G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1) + %2:vecr(s128) = COPY $xmm0 + %0:vecr(s64) = G_TRUNC %2(s128) + %1:gpr(p0) = COPY $rdi + %3:gpr(s64) = COPY %0(s64) + G_STORE %3(s64), %1(p0) :: (store 8 into %ir.p1) $rax = COPY %1(p0) RET 0, implicit $rax |

