diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-06-15 04:42:55 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-06-15 04:42:55 +0000 |
| commit | 3b060daba56d0f0c531e0b59a83f78e28e8d0a58 (patch) | |
| tree | bc438578e29fe3554e2c4f862b249cd3878f0138 /llvm/test | |
| parent | f43807dd89da69e93acc2a05ffab1bee3e95f760 (diff) | |
| download | bcm5719-llvm-3b060daba56d0f0c531e0b59a83f78e28e8d0a58.tar.gz bcm5719-llvm-3b060daba56d0f0c531e0b59a83f78e28e8d0a58.zip | |
[X86] Fix some checks to use X86 instead of X32.
These tests were recently updated so it looks like gone wrong.
llvm-svn: 334786
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll | 192 |
1 files changed, 96 insertions, 96 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll index 3a1bdf1ccc1..227ecbfcf18 100644 --- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll @@ -2563,12 +2563,12 @@ entry: ret <8 x i64> %2 } define <4 x float> @test_mm_mask_add_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { -; X32-LABEL: test_mm_mask_add_ss: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vaddss %xmm2, %xmm1, %xmm0 {%k1} -; X32-NEXT: retl +; X86-LABEL: test_mm_mask_add_ss: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vaddss %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_add_ss: ; X64: # %bb.0: # %entry @@ -2588,12 +2588,12 @@ entry: } define <4 x float> @test_mm_maskz_add_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { -; X32-LABEL: test_mm_maskz_add_ss: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vaddss %xmm1, %xmm0, %xmm0 {%k1} {z} -; X32-NEXT: retl +; X86-LABEL: test_mm_maskz_add_ss: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vaddss %xmm1, %xmm0, %xmm0 {%k1} {z} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_add_ss: ; X64: # %bb.0: # %entry @@ -2612,12 +2612,12 @@ entry: } define <2 x double> @test_mm_mask_add_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { -; X32-LABEL: test_mm_mask_add_sd: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vaddsd %xmm2, %xmm1, %xmm0 {%k1} -; X32-NEXT: retl +; X86-LABEL: test_mm_mask_add_sd: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vaddsd %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_add_sd: ; X64: # %bb.0: # %entry @@ -2637,12 +2637,12 @@ entry: } define <2 x double> @test_mm_maskz_add_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { -; X32-LABEL: test_mm_maskz_add_sd: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vaddsd %xmm1, %xmm0, %xmm0 {%k1} {z} -; X32-NEXT: retl +; X86-LABEL: test_mm_maskz_add_sd: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vaddsd %xmm1, %xmm0, %xmm0 {%k1} {z} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_add_sd: ; X64: # %bb.0: # %entry @@ -2661,12 +2661,12 @@ entry: } define <4 x float> @test_mm_mask_sub_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { -; X32-LABEL: test_mm_mask_sub_ss: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vsubss %xmm2, %xmm1, %xmm0 {%k1} -; X32-NEXT: retl +; X86-LABEL: test_mm_mask_sub_ss: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vsubss %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_sub_ss: ; X64: # %bb.0: # %entry @@ -2686,12 +2686,12 @@ entry: } define <4 x float> @test_mm_maskz_sub_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { -; X32-LABEL: test_mm_maskz_sub_ss: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vsubss %xmm1, %xmm0, %xmm0 {%k1} {z} -; X32-NEXT: retl +; X86-LABEL: test_mm_maskz_sub_ss: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vsubss %xmm1, %xmm0, %xmm0 {%k1} {z} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_sub_ss: ; X64: # %bb.0: # %entry @@ -2710,12 +2710,12 @@ entry: } define <2 x double> @test_mm_mask_sub_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { -; X32-LABEL: test_mm_mask_sub_sd: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vsubsd %xmm2, %xmm1, %xmm0 {%k1} -; X32-NEXT: retl +; X86-LABEL: test_mm_mask_sub_sd: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vsubsd %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_sub_sd: ; X64: # %bb.0: # %entry @@ -2735,12 +2735,12 @@ entry: } define <2 x double> @test_mm_maskz_sub_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { -; X32-LABEL: test_mm_maskz_sub_sd: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vsubsd %xmm1, %xmm0, %xmm0 {%k1} {z} -; X32-NEXT: retl +; X86-LABEL: test_mm_maskz_sub_sd: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vsubsd %xmm1, %xmm0, %xmm0 {%k1} {z} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_sub_sd: ; X64: # %bb.0: # %entry @@ -2759,12 +2759,12 @@ entry: } define <4 x float> @test_mm_mask_mul_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { -; X32-LABEL: test_mm_mask_mul_ss: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vmulss %xmm2, %xmm1, %xmm0 {%k1} -; X32-NEXT: retl +; X86-LABEL: test_mm_mask_mul_ss: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vmulss %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_mul_ss: ; X64: # %bb.0: # %entry @@ -2784,12 +2784,12 @@ entry: } define <4 x float> @test_mm_maskz_mul_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { -; X32-LABEL: test_mm_maskz_mul_ss: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vmulss %xmm1, %xmm0, %xmm0 {%k1} {z} -; X32-NEXT: retl +; X86-LABEL: test_mm_maskz_mul_ss: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vmulss %xmm1, %xmm0, %xmm0 {%k1} {z} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_mul_ss: ; X64: # %bb.0: # %entry @@ -2808,12 +2808,12 @@ entry: } define <2 x double> @test_mm_mask_mul_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { -; X32-LABEL: test_mm_mask_mul_sd: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vmulsd %xmm2, %xmm1, %xmm0 {%k1} -; X32-NEXT: retl +; X86-LABEL: test_mm_mask_mul_sd: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vmulsd %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_mul_sd: ; X64: # %bb.0: # %entry @@ -2833,12 +2833,12 @@ entry: } define <2 x double> @test_mm_maskz_mul_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { -; X32-LABEL: test_mm_maskz_mul_sd: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vmulsd %xmm1, %xmm0, %xmm0 {%k1} {z} -; X32-NEXT: retl +; X86-LABEL: test_mm_maskz_mul_sd: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vmulsd %xmm1, %xmm0, %xmm0 {%k1} {z} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_mul_sd: ; X64: # %bb.0: # %entry @@ -2857,12 +2857,12 @@ entry: } define <4 x float> @test_mm_mask_div_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { -; X32-LABEL: test_mm_mask_div_ss: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vdivss %xmm2, %xmm1, %xmm0 {%k1} -; X32-NEXT: retl +; X86-LABEL: test_mm_mask_div_ss: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vdivss %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_div_ss: ; X64: # %bb.0: # %entry @@ -2882,12 +2882,12 @@ entry: } define <4 x float> @test_mm_maskz_div_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { -; X32-LABEL: test_mm_maskz_div_ss: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vdivss %xmm1, %xmm0, %xmm0 {%k1} {z} -; X32-NEXT: retl +; X86-LABEL: test_mm_maskz_div_ss: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vdivss %xmm1, %xmm0, %xmm0 {%k1} {z} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_div_ss: ; X64: # %bb.0: # %entry @@ -2906,12 +2906,12 @@ entry: } define <2 x double> @test_mm_mask_div_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { -; X32-LABEL: test_mm_mask_div_sd: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vdivsd %xmm2, %xmm1, %xmm0 {%k1} -; X32-NEXT: retl +; X86-LABEL: test_mm_mask_div_sd: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vdivsd %xmm2, %xmm1, %xmm0 {%k1} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_div_sd: ; X64: # %bb.0: # %entry @@ -2931,12 +2931,12 @@ entry: } define <2 x double> @test_mm_maskz_div_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { -; X32-LABEL: test_mm_maskz_div_sd: -; X32: # %bb.0: # %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: kmovw %eax, %k1 -; X32-NEXT: vdivsd %xmm1, %xmm0, %xmm0 {%k1} {z} -; X32-NEXT: retl +; X86-LABEL: test_mm_maskz_div_sd: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vdivsd %xmm1, %xmm0, %xmm0 {%k1} {z} +; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_div_sd: ; X64: # %bb.0: # %entry |

