diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll | 298 |
1 files changed, 149 insertions, 149 deletions
diff --git a/llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll index dc1dd08e6b4..78fd5d57e40 100644 --- a/llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll @@ -3,7 +3,7 @@ define <4 x float> @test_mask_andnot_ps_rr_128(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: test_mask_andnot_ps_rr_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x55,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask.andn.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 -1) @@ -12,7 +12,7 @@ define <4 x float> @test_mask_andnot_ps_rr_128(<4 x float> %a, <4 x float> %b) { define <4 x float> @test_mask_andnot_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rrk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x55,0xd1] ; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] @@ -23,7 +23,7 @@ define <4 x float> @test_mask_andnot_ps_rrk_128(<4 x float> %a, <4 x float> %b, define <4 x float> @test_mask_andnot_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rrkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x55,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -33,7 +33,7 @@ define <4 x float> @test_mask_andnot_ps_rrkz_128(<4 x float> %a, <4 x float> %b, define <4 x float> @test_mask_andnot_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_andnot_ps_rm_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <4 x float>, <4 x float>* %ptr_b @@ -43,7 +43,7 @@ define <4 x float> @test_mask_andnot_ps_rm_128(<4 x float> %a, <4 x float>* %ptr define <4 x float> @test_mask_andnot_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x55,0x0f] ; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] @@ -55,7 +55,7 @@ define <4 x float> @test_mask_andnot_ps_rmk_128(<4 x float> %a, <4 x float>* %pt define <4 x float> @test_mask_andnot_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -66,7 +66,7 @@ define <4 x float> @test_mask_andnot_ps_rmkz_128(<4 x float> %a, <4 x float>* %p define <4 x float> @test_mask_andnot_ps_rmb_128(<4 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_andnot_ps_rmb_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -78,7 +78,7 @@ define <4 x float> @test_mask_andnot_ps_rmb_128(<4 x float> %a, float* %ptr_b) { define <4 x float> @test_mask_andnot_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmbk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x55,0x0f] ; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] @@ -92,7 +92,7 @@ define <4 x float> @test_mask_andnot_ps_rmbk_128(<4 x float> %a, float* %ptr_b, define <4 x float> @test_mask_andnot_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmbkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -107,7 +107,7 @@ declare <4 x float> @llvm.x86.avx512.mask.andn.ps.128(<4 x float>, <4 x float>, define <8 x float> @test_mask_andnot_ps_rr_256(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: test_mask_andnot_ps_rr_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x55,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.andn.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 -1) @@ -116,7 +116,7 @@ define <8 x float> @test_mask_andnot_ps_rr_256(<8 x float> %a, <8 x float> %b) { define <8 x float> @test_mask_andnot_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rrk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandnps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x55,0xd1] ; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] @@ -127,7 +127,7 @@ define <8 x float> @test_mask_andnot_ps_rrk_256(<8 x float> %a, <8 x float> %b, define <8 x float> @test_mask_andnot_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rrkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandnps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x55,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -137,7 +137,7 @@ define <8 x float> @test_mask_andnot_ps_rrkz_256(<8 x float> %a, <8 x float> %b, define <8 x float> @test_mask_andnot_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_andnot_ps_rm_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <8 x float>, <8 x float>* %ptr_b @@ -147,7 +147,7 @@ define <8 x float> @test_mask_andnot_ps_rm_256(<8 x float> %a, <8 x float>* %ptr define <8 x float> @test_mask_andnot_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x55,0x0f] ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] @@ -159,7 +159,7 @@ define <8 x float> @test_mask_andnot_ps_rmk_256(<8 x float> %a, <8 x float>* %pt define <8 x float> @test_mask_andnot_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -170,7 +170,7 @@ define <8 x float> @test_mask_andnot_ps_rmkz_256(<8 x float> %a, <8 x float>* %p define <8 x float> @test_mask_andnot_ps_rmb_256(<8 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_andnot_ps_rmb_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -182,7 +182,7 @@ define <8 x float> @test_mask_andnot_ps_rmb_256(<8 x float> %a, float* %ptr_b) { define <8 x float> @test_mask_andnot_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmbk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x55,0x0f] ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] @@ -196,7 +196,7 @@ define <8 x float> @test_mask_andnot_ps_rmbk_256(<8 x float> %a, float* %ptr_b, define <8 x float> @test_mask_andnot_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmbkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -211,7 +211,7 @@ declare <8 x float> @llvm.x86.avx512.mask.andn.ps.256(<8 x float>, <8 x float>, define <16 x float> @test_mask_andnot_ps_rr_512(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: test_mask_andnot_ps_rr_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x55,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask.andn.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> zeroinitializer, i16 -1) @@ -220,7 +220,7 @@ define <16 x float> @test_mask_andnot_ps_rr_512(<16 x float> %a, <16 x float> %b define <16 x float> @test_mask_andnot_ps_rrk_512(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rrk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandnps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x55,0xd1] ; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] @@ -231,7 +231,7 @@ define <16 x float> @test_mask_andnot_ps_rrk_512(<16 x float> %a, <16 x float> % define <16 x float> @test_mask_andnot_ps_rrkz_512(<16 x float> %a, <16 x float> %b, i16 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rrkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandnps %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x55,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -241,7 +241,7 @@ define <16 x float> @test_mask_andnot_ps_rrkz_512(<16 x float> %a, <16 x float> define <16 x float> @test_mask_andnot_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_andnot_ps_rm_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <16 x float>, <16 x float>* %ptr_b @@ -251,7 +251,7 @@ define <16 x float> @test_mask_andnot_ps_rm_512(<16 x float> %a, <16 x float>* % define <16 x float> @test_mask_andnot_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr_b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x55,0x0f] ; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] @@ -263,7 +263,7 @@ define <16 x float> @test_mask_andnot_ps_rmk_512(<16 x float> %a, <16 x float>* define <16 x float> @test_mask_andnot_ps_rmkz_512(<16 x float> %a, <16 x float>* %ptr_b, i16 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -274,7 +274,7 @@ define <16 x float> @test_mask_andnot_ps_rmkz_512(<16 x float> %a, <16 x float>* define <16 x float> @test_mask_andnot_ps_rmb_512(<16 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_andnot_ps_rmb_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandnps (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x58,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -286,7 +286,7 @@ define <16 x float> @test_mask_andnot_ps_rmb_512(<16 x float> %a, float* %ptr_b) define <16 x float> @test_mask_andnot_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmbk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x55,0x0f] ; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] @@ -300,7 +300,7 @@ define <16 x float> @test_mask_andnot_ps_rmbk_512(<16 x float> %a, float* %ptr_b define <16 x float> @test_mask_andnot_ps_rmbkz_512(<16 x float> %a, float* %ptr_b, i16 %mask) { ; CHECK-LABEL: test_mask_andnot_ps_rmbkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandnps (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x55,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -315,7 +315,7 @@ declare <16 x float> @llvm.x86.avx512.mask.andn.ps.512(<16 x float>, <16 x float define <4 x float> @test_mask_and_ps_rr_128(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: test_mask_and_ps_rr_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask.and.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 -1) @@ -324,7 +324,7 @@ define <4 x float> @test_mask_and_ps_rr_128(<4 x float> %a, <4 x float> %b) { define <4 x float> @test_mask_and_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rrk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x54,0xd1] ; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] @@ -335,7 +335,7 @@ define <4 x float> @test_mask_and_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 define <4 x float> @test_mask_and_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rrkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x54,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -345,7 +345,7 @@ define <4 x float> @test_mask_and_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 define <4 x float> @test_mask_and_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_and_ps_rm_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <4 x float>, <4 x float>* %ptr_b @@ -355,7 +355,7 @@ define <4 x float> @test_mask_and_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) define <4 x float> @test_mask_and_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x54,0x0f] ; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] @@ -367,7 +367,7 @@ define <4 x float> @test_mask_and_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b define <4 x float> @test_mask_and_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -378,7 +378,7 @@ define <4 x float> @test_mask_and_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_ define <4 x float> @test_mask_and_ps_rmb_128(<4 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_and_ps_rmb_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -390,7 +390,7 @@ define <4 x float> @test_mask_and_ps_rmb_128(<4 x float> %a, float* %ptr_b) { define <4 x float> @test_mask_and_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmbk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x54,0x0f] ; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] @@ -404,7 +404,7 @@ define <4 x float> @test_mask_and_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 define <4 x float> @test_mask_and_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmbkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -419,7 +419,7 @@ declare <4 x float> @llvm.x86.avx512.mask.and.ps.128(<4 x float>, <4 x float>, < define <8 x float> @test_mask_and_ps_rr_256(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: test_mask_and_ps_rr_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x54,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.and.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 -1) @@ -428,7 +428,7 @@ define <8 x float> @test_mask_and_ps_rr_256(<8 x float> %a, <8 x float> %b) { define <8 x float> @test_mask_and_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rrk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x54,0xd1] ; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] @@ -439,7 +439,7 @@ define <8 x float> @test_mask_and_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 define <8 x float> @test_mask_and_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rrkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x54,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -449,7 +449,7 @@ define <8 x float> @test_mask_and_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 define <8 x float> @test_mask_and_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_and_ps_rm_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <8 x float>, <8 x float>* %ptr_b @@ -459,7 +459,7 @@ define <8 x float> @test_mask_and_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) define <8 x float> @test_mask_and_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x54,0x0f] ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] @@ -471,7 +471,7 @@ define <8 x float> @test_mask_and_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b define <8 x float> @test_mask_and_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -482,7 +482,7 @@ define <8 x float> @test_mask_and_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_ define <8 x float> @test_mask_and_ps_rmb_256(<8 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_and_ps_rmb_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -494,7 +494,7 @@ define <8 x float> @test_mask_and_ps_rmb_256(<8 x float> %a, float* %ptr_b) { define <8 x float> @test_mask_and_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmbk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x54,0x0f] ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] @@ -508,7 +508,7 @@ define <8 x float> @test_mask_and_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 define <8 x float> @test_mask_and_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmbkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -523,7 +523,7 @@ declare <8 x float> @llvm.x86.avx512.mask.and.ps.256(<8 x float>, <8 x float>, < define <16 x float> @test_mask_and_ps_rr_512(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: test_mask_and_ps_rr_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x54,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask.and.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> zeroinitializer, i16 -1) @@ -532,7 +532,7 @@ define <16 x float> @test_mask_and_ps_rr_512(<16 x float> %a, <16 x float> %b) { define <16 x float> @test_mask_and_ps_rrk_512(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_and_ps_rrk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x54,0xd1] ; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] @@ -543,7 +543,7 @@ define <16 x float> @test_mask_and_ps_rrk_512(<16 x float> %a, <16 x float> %b, define <16 x float> @test_mask_and_ps_rrkz_512(<16 x float> %a, <16 x float> %b, i16 %mask) { ; CHECK-LABEL: test_mask_and_ps_rrkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vandps %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x54,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -553,7 +553,7 @@ define <16 x float> @test_mask_and_ps_rrkz_512(<16 x float> %a, <16 x float> %b, define <16 x float> @test_mask_and_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_and_ps_rm_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <16 x float>, <16 x float>* %ptr_b @@ -563,7 +563,7 @@ define <16 x float> @test_mask_and_ps_rm_512(<16 x float> %a, <16 x float>* %ptr define <16 x float> @test_mask_and_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr_b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x54,0x0f] ; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] @@ -575,7 +575,7 @@ define <16 x float> @test_mask_and_ps_rmk_512(<16 x float> %a, <16 x float>* %pt define <16 x float> @test_mask_and_ps_rmkz_512(<16 x float> %a, <16 x float>* %ptr_b, i16 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -586,7 +586,7 @@ define <16 x float> @test_mask_and_ps_rmkz_512(<16 x float> %a, <16 x float>* %p define <16 x float> @test_mask_and_ps_rmb_512(<16 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_and_ps_rmb_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vandps (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x58,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -598,7 +598,7 @@ define <16 x float> @test_mask_and_ps_rmb_512(<16 x float> %a, float* %ptr_b) { define <16 x float> @test_mask_and_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmbk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x54,0x0f] ; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] @@ -612,7 +612,7 @@ define <16 x float> @test_mask_and_ps_rmbk_512(<16 x float> %a, float* %ptr_b, < define <16 x float> @test_mask_and_ps_rmbkz_512(<16 x float> %a, float* %ptr_b, i16 %mask) { ; CHECK-LABEL: test_mask_and_ps_rmbkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vandps (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x54,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -627,7 +627,7 @@ declare <16 x float> @llvm.x86.avx512.mask.and.ps.512(<16 x float>, <16 x float> define <4 x float> @test_mask_or_ps_rr_128(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: test_mask_or_ps_rr_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask.or.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 -1) @@ -636,7 +636,7 @@ define <4 x float> @test_mask_or_ps_rr_128(<4 x float> %a, <4 x float> %b) { define <4 x float> @test_mask_or_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rrk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x56,0xd1] ; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] @@ -647,7 +647,7 @@ define <4 x float> @test_mask_or_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x define <4 x float> @test_mask_or_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rrkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x56,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -657,7 +657,7 @@ define <4 x float> @test_mask_or_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 define <4 x float> @test_mask_or_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_or_ps_rm_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <4 x float>, <4 x float>* %ptr_b @@ -667,7 +667,7 @@ define <4 x float> @test_mask_or_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) define <4 x float> @test_mask_or_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x56,0x0f] ; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] @@ -679,7 +679,7 @@ define <4 x float> @test_mask_or_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, define <4 x float> @test_mask_or_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -690,7 +690,7 @@ define <4 x float> @test_mask_or_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b define <4 x float> @test_mask_or_ps_rmb_128(<4 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_or_ps_rmb_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -702,7 +702,7 @@ define <4 x float> @test_mask_or_ps_rmb_128(<4 x float> %a, float* %ptr_b) { define <4 x float> @test_mask_or_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmbk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x56,0x0f] ; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] @@ -716,7 +716,7 @@ define <4 x float> @test_mask_or_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x define <4 x float> @test_mask_or_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmbkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -731,7 +731,7 @@ declare <4 x float> @llvm.x86.avx512.mask.or.ps.128(<4 x float>, <4 x float>, <4 define <8 x float> @test_mask_or_ps_rr_256(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: test_mask_or_ps_rr_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x56,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.or.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 -1) @@ -740,7 +740,7 @@ define <8 x float> @test_mask_or_ps_rr_256(<8 x float> %a, <8 x float> %b) { define <8 x float> @test_mask_or_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rrk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x56,0xd1] ; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] @@ -751,7 +751,7 @@ define <8 x float> @test_mask_or_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x define <8 x float> @test_mask_or_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rrkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x56,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -761,7 +761,7 @@ define <8 x float> @test_mask_or_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 define <8 x float> @test_mask_or_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_or_ps_rm_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <8 x float>, <8 x float>* %ptr_b @@ -771,7 +771,7 @@ define <8 x float> @test_mask_or_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) define <8 x float> @test_mask_or_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x56,0x0f] ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] @@ -783,7 +783,7 @@ define <8 x float> @test_mask_or_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, define <8 x float> @test_mask_or_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -794,7 +794,7 @@ define <8 x float> @test_mask_or_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b define <8 x float> @test_mask_or_ps_rmb_256(<8 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_or_ps_rmb_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -806,7 +806,7 @@ define <8 x float> @test_mask_or_ps_rmb_256(<8 x float> %a, float* %ptr_b) { define <8 x float> @test_mask_or_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmbk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x56,0x0f] ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] @@ -820,7 +820,7 @@ define <8 x float> @test_mask_or_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x define <8 x float> @test_mask_or_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmbkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -835,7 +835,7 @@ declare <8 x float> @llvm.x86.avx512.mask.or.ps.256(<8 x float>, <8 x float>, <8 define <16 x float> @test_mask_or_ps_rr_512(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: test_mask_or_ps_rr_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x56,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask.or.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> zeroinitializer, i16 -1) @@ -844,7 +844,7 @@ define <16 x float> @test_mask_or_ps_rr_512(<16 x float> %a, <16 x float> %b) { define <16 x float> @test_mask_or_ps_rrk_512(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_or_ps_rrk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vorps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x56,0xd1] ; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] @@ -855,7 +855,7 @@ define <16 x float> @test_mask_or_ps_rrk_512(<16 x float> %a, <16 x float> %b, < define <16 x float> @test_mask_or_ps_rrkz_512(<16 x float> %a, <16 x float> %b, i16 %mask) { ; CHECK-LABEL: test_mask_or_ps_rrkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vorps %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x56,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -865,7 +865,7 @@ define <16 x float> @test_mask_or_ps_rrkz_512(<16 x float> %a, <16 x float> %b, define <16 x float> @test_mask_or_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_or_ps_rm_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <16 x float>, <16 x float>* %ptr_b @@ -875,7 +875,7 @@ define <16 x float> @test_mask_or_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_ define <16 x float> @test_mask_or_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr_b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x56,0x0f] ; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] @@ -887,7 +887,7 @@ define <16 x float> @test_mask_or_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr define <16 x float> @test_mask_or_ps_rmkz_512(<16 x float> %a, <16 x float>* %ptr_b, i16 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -898,7 +898,7 @@ define <16 x float> @test_mask_or_ps_rmkz_512(<16 x float> %a, <16 x float>* %pt define <16 x float> @test_mask_or_ps_rmb_512(<16 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_or_ps_rmb_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vorps (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x58,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -910,7 +910,7 @@ define <16 x float> @test_mask_or_ps_rmb_512(<16 x float> %a, float* %ptr_b) { define <16 x float> @test_mask_or_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmbk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x56,0x0f] ; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] @@ -924,7 +924,7 @@ define <16 x float> @test_mask_or_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <1 define <16 x float> @test_mask_or_ps_rmbkz_512(<16 x float> %a, float* %ptr_b, i16 %mask) { ; CHECK-LABEL: test_mask_or_ps_rmbkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vorps (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x56,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -939,7 +939,7 @@ declare <16 x float> @llvm.x86.avx512.mask.or.ps.512(<16 x float>, <16 x float>, define <4 x float> @test_mask_xor_ps_rr_128(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: test_mask_xor_ps_rr_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask.xor.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 -1) @@ -948,7 +948,7 @@ define <4 x float> @test_mask_xor_ps_rr_128(<4 x float> %a, <4 x float> %b) { define <4 x float> @test_mask_xor_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rrk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x57,0xd1] ; CHECK-NEXT: vmovaps %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] @@ -959,7 +959,7 @@ define <4 x float> @test_mask_xor_ps_rrk_128(<4 x float> %a, <4 x float> %b, <4 define <4 x float> @test_mask_xor_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rrkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vxorps %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x57,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -969,7 +969,7 @@ define <4 x float> @test_mask_xor_ps_rrkz_128(<4 x float> %a, <4 x float> %b, i8 define <4 x float> @test_mask_xor_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_xor_ps_rm_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <4 x float>, <4 x float>* %ptr_b @@ -979,7 +979,7 @@ define <4 x float> @test_mask_xor_ps_rm_128(<4 x float> %a, <4 x float>* %ptr_b) define <4 x float> @test_mask_xor_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x57,0x0f] ; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] @@ -991,7 +991,7 @@ define <4 x float> @test_mask_xor_ps_rmk_128(<4 x float> %a, <4 x float>* %ptr_b define <4 x float> @test_mask_xor_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1002,7 +1002,7 @@ define <4 x float> @test_mask_xor_ps_rmkz_128(<4 x float> %a, <4 x float>* %ptr_ define <4 x float> @test_mask_xor_ps_rmb_128(<4 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_xor_ps_rmb_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps (%rdi){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -1014,7 +1014,7 @@ define <4 x float> @test_mask_xor_ps_rmb_128(<4 x float> %a, float* %ptr_b) { define <4 x float> @test_mask_xor_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmbk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi){1to4}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x19,0x57,0x0f] ; CHECK-NEXT: vmovaps %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] @@ -1028,7 +1028,7 @@ define <4 x float> @test_mask_xor_ps_rmbk_128(<4 x float> %a, float* %ptr_b, <4 define <4 x float> @test_mask_xor_ps_rmbkz_128(<4 x float> %a, float* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmbkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x99,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1043,7 +1043,7 @@ declare <4 x float> @llvm.x86.avx512.mask.xor.ps.128(<4 x float>, <4 x float>, < define <8 x float> @test_mask_xor_ps_rr_256(<8 x float> %a, <8 x float> %b) { ; CHECK-LABEL: test_mask_xor_ps_rr_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x57,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask.xor.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> zeroinitializer, i8 -1) @@ -1052,7 +1052,7 @@ define <8 x float> @test_mask_xor_ps_rr_256(<8 x float> %a, <8 x float> %b) { define <8 x float> @test_mask_xor_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rrk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x57,0xd1] ; CHECK-NEXT: vmovaps %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] @@ -1063,7 +1063,7 @@ define <8 x float> @test_mask_xor_ps_rrk_256(<8 x float> %a, <8 x float> %b, <8 define <8 x float> @test_mask_xor_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rrkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vxorps %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x57,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1073,7 +1073,7 @@ define <8 x float> @test_mask_xor_ps_rrkz_256(<8 x float> %a, <8 x float> %b, i8 define <8 x float> @test_mask_xor_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_xor_ps_rm_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <8 x float>, <8 x float>* %ptr_b @@ -1083,7 +1083,7 @@ define <8 x float> @test_mask_xor_ps_rm_256(<8 x float> %a, <8 x float>* %ptr_b) define <8 x float> @test_mask_xor_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x57,0x0f] ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] @@ -1095,7 +1095,7 @@ define <8 x float> @test_mask_xor_ps_rmk_256(<8 x float> %a, <8 x float>* %ptr_b define <8 x float> @test_mask_xor_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1106,7 +1106,7 @@ define <8 x float> @test_mask_xor_ps_rmkz_256(<8 x float> %a, <8 x float>* %ptr_ define <8 x float> @test_mask_xor_ps_rmb_256(<8 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_xor_ps_rmb_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps (%rdi){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -1118,7 +1118,7 @@ define <8 x float> @test_mask_xor_ps_rmb_256(<8 x float> %a, float* %ptr_b) { define <8 x float> @test_mask_xor_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 x float> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmbk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi){1to8}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x39,0x57,0x0f] ; CHECK-NEXT: vmovaps %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc1] @@ -1132,7 +1132,7 @@ define <8 x float> @test_mask_xor_ps_rmbk_256(<8 x float> %a, float* %ptr_b, <8 define <8 x float> @test_mask_xor_ps_rmbkz_256(<8 x float> %a, float* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmbkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xb9,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1147,7 +1147,7 @@ declare <8 x float> @llvm.x86.avx512.mask.xor.ps.256(<8 x float>, <8 x float>, < define <16 x float> @test_mask_xor_ps_rr_512(<16 x float> %a, <16 x float> %b) { ; CHECK-LABEL: test_mask_xor_ps_rr_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x57,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask.xor.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> zeroinitializer, i16 -1) @@ -1156,7 +1156,7 @@ define <16 x float> @test_mask_xor_ps_rr_512(<16 x float> %a, <16 x float> %b) { define <16 x float> @test_mask_xor_ps_rrk_512(<16 x float> %a, <16 x float> %b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rrk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vxorps %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x57,0xd1] ; CHECK-NEXT: vmovaps %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] @@ -1167,7 +1167,7 @@ define <16 x float> @test_mask_xor_ps_rrk_512(<16 x float> %a, <16 x float> %b, define <16 x float> @test_mask_xor_ps_rrkz_512(<16 x float> %a, <16 x float> %b, i16 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rrkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vxorps %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x57,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1177,7 +1177,7 @@ define <16 x float> @test_mask_xor_ps_rrkz_512(<16 x float> %a, <16 x float> %b, define <16 x float> @test_mask_xor_ps_rm_512(<16 x float> %a, <16 x float>* %ptr_b) { ; CHECK-LABEL: test_mask_xor_ps_rm_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <16 x float>, <16 x float>* %ptr_b @@ -1187,7 +1187,7 @@ define <16 x float> @test_mask_xor_ps_rm_512(<16 x float> %a, <16 x float>* %ptr define <16 x float> @test_mask_xor_ps_rmk_512(<16 x float> %a, <16 x float>* %ptr_b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x57,0x0f] ; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] @@ -1199,7 +1199,7 @@ define <16 x float> @test_mask_xor_ps_rmk_512(<16 x float> %a, <16 x float>* %pt define <16 x float> @test_mask_xor_ps_rmkz_512(<16 x float> %a, <16 x float>* %ptr_b, i16 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xc9,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1210,7 +1210,7 @@ define <16 x float> @test_mask_xor_ps_rmkz_512(<16 x float> %a, <16 x float>* %p define <16 x float> @test_mask_xor_ps_rmb_512(<16 x float> %a, float* %ptr_b) { ; CHECK-LABEL: test_mask_xor_ps_rmb_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vxorps (%rdi){1to16}, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x58,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load float, float* %ptr_b @@ -1222,7 +1222,7 @@ define <16 x float> @test_mask_xor_ps_rmb_512(<16 x float> %a, float* %ptr_b) { define <16 x float> @test_mask_xor_ps_rmbk_512(<16 x float> %a, float* %ptr_b, <16 x float> %passThru, i16 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmbk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi){1to16}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x7c,0x59,0x57,0x0f] ; CHECK-NEXT: vmovaps %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc1] @@ -1236,7 +1236,7 @@ define <16 x float> @test_mask_xor_ps_rmbk_512(<16 x float> %a, float* %ptr_b, < define <16 x float> @test_mask_xor_ps_rmbkz_512(<16 x float> %a, float* %ptr_b, i16 %mask) { ; CHECK-LABEL: test_mask_xor_ps_rmbkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vxorps (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xd9,0x57,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1251,7 +1251,7 @@ declare <16 x float> @llvm.x86.avx512.mask.xor.ps.512(<16 x float>, <16 x float> define <8 x i64> @test_mask_mullo_epi64_rr_512(<8 x i64> %a, <8 x i64> %b) { ; CHECK-LABEL: test_mask_mullo_epi64_rr_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x48,0x40,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <8 x i64> @llvm.x86.avx512.mask.pmull.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1) @@ -1260,7 +1260,7 @@ define <8 x i64> @test_mask_mullo_epi64_rr_512(<8 x i64> %a, <8 x i64> %b) { define <8 x i64> @test_mask_mullo_epi64_rrk_512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rrk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpmullq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x40,0xd1] ; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2] @@ -1271,7 +1271,7 @@ define <8 x i64> @test_mask_mullo_epi64_rrk_512(<8 x i64> %a, <8 x i64> %b, <8 x define <8 x i64> @test_mask_mullo_epi64_rrkz_512(<8 x i64> %a, <8 x i64> %b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rrkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpmullq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x40,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1281,7 +1281,7 @@ define <8 x i64> @test_mask_mullo_epi64_rrkz_512(<8 x i64> %a, <8 x i64> %b, i8 define <8 x i64> @test_mask_mullo_epi64_rm_512(<8 x i64> %a, <8 x i64>* %ptr_b) { ; CHECK-LABEL: test_mask_mullo_epi64_rm_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq (%rdi), %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x48,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <8 x i64>, <8 x i64>* %ptr_b @@ -1291,7 +1291,7 @@ define <8 x i64> @test_mask_mullo_epi64_rm_512(<8 x i64> %a, <8 x i64>* %ptr_b) define <8 x i64> @test_mask_mullo_epi64_rmk_512(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi), %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x40,0x0f] ; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1] @@ -1303,7 +1303,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmk_512(<8 x i64> %a, <8 x i64>* %ptr_b, define <8 x i64> @test_mask_mullo_epi64_rmkz_512(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi), %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1314,7 +1314,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmkz_512(<8 x i64> %a, <8 x i64>* %ptr_b define <8 x i64> @test_mask_mullo_epi64_rmb_512(<8 x i64> %a, i64* %ptr_b) { ; CHECK-LABEL: test_mask_mullo_epi64_rmb_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq (%rdi){1to8}, %zmm0, %zmm0 ## encoding: [0x62,0xf2,0xfd,0x58,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load i64, i64* %ptr_b @@ -1326,7 +1326,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmb_512(<8 x i64> %a, i64* %ptr_b) { define <8 x i64> @test_mask_mullo_epi64_rmbk_512(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmbk_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi){1to8}, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x59,0x40,0x0f] ; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc1] @@ -1340,7 +1340,7 @@ define <8 x i64> @test_mask_mullo_epi64_rmbk_512(<8 x i64> %a, i64* %ptr_b, <8 x define <8 x i64> @test_mask_mullo_epi64_rmbkz_512(<8 x i64> %a, i64* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_512: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xd9,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1354,7 +1354,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.pmull.q.512(<8 x i64>, <8 x i64>, <8 x i define <4 x i64> @test_mask_mullo_epi64_rr_256(<4 x i64> %a, <4 x i64> %b) { ; CHECK-LABEL: test_mask_mullo_epi64_rr_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x40,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx512.mask.pmull.q.256(<4 x i64> %a, <4 x i64> %b, <4 x i64> zeroinitializer, i8 -1) @@ -1363,7 +1363,7 @@ define <4 x i64> @test_mask_mullo_epi64_rr_256(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @test_mask_mullo_epi64_rrk_256(<4 x i64> %a, <4 x i64> %b, <4 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rrk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpmullq %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x40,0xd1] ; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2] @@ -1374,7 +1374,7 @@ define <4 x i64> @test_mask_mullo_epi64_rrk_256(<4 x i64> %a, <4 x i64> %b, <4 x define <4 x i64> @test_mask_mullo_epi64_rrkz_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rrkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpmullq %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x40,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1384,7 +1384,7 @@ define <4 x i64> @test_mask_mullo_epi64_rrkz_256(<4 x i64> %a, <4 x i64> %b, i8 define <4 x i64> @test_mask_mullo_epi64_rm_256(<4 x i64> %a, <4 x i64>* %ptr_b) { ; CHECK-LABEL: test_mask_mullo_epi64_rm_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq (%rdi), %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x28,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <4 x i64>, <4 x i64>* %ptr_b @@ -1394,7 +1394,7 @@ define <4 x i64> @test_mask_mullo_epi64_rm_256(<4 x i64> %a, <4 x i64>* %ptr_b) define <4 x i64> @test_mask_mullo_epi64_rmk_256(<4 x i64> %a, <4 x i64>* %ptr_b, <4 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x40,0x0f] ; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1] @@ -1406,7 +1406,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmk_256(<4 x i64> %a, <4 x i64>* %ptr_b, define <4 x i64> @test_mask_mullo_epi64_rmkz_256(<4 x i64> %a, <4 x i64>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1417,7 +1417,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmkz_256(<4 x i64> %a, <4 x i64>* %ptr_b define <4 x i64> @test_mask_mullo_epi64_rmb_256(<4 x i64> %a, i64* %ptr_b) { ; CHECK-LABEL: test_mask_mullo_epi64_rmb_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf2,0xfd,0x38,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load i64, i64* %ptr_b @@ -1429,7 +1429,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmb_256(<4 x i64> %a, i64* %ptr_b) { define <4 x i64> @test_mask_mullo_epi64_rmbk_256(<4 x i64> %a, i64* %ptr_b, <4 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmbk_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi){1to4}, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x39,0x40,0x0f] ; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1] @@ -1443,7 +1443,7 @@ define <4 x i64> @test_mask_mullo_epi64_rmbk_256(<4 x i64> %a, i64* %ptr_b, <4 x define <4 x i64> @test_mask_mullo_epi64_rmbkz_256(<4 x i64> %a, i64* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xb9,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1458,7 +1458,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.pmull.q.256(<4 x i64>, <4 x i64>, <4 x i define <2 x i64> @test_mask_mullo_epi64_rr_128(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: test_mask_mullo_epi64_rr_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x40,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.mask.pmull.q.128(<2 x i64> %a, <2 x i64> %b, <2 x i64> zeroinitializer, i8 -1) @@ -1467,7 +1467,7 @@ define <2 x i64> @test_mask_mullo_epi64_rr_128(<2 x i64> %a, <2 x i64> %b) { define <2 x i64> @test_mask_mullo_epi64_rrk_128(<2 x i64> %a, <2 x i64> %b, <2 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rrk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpmullq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x40,0xd1] ; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2] @@ -1478,7 +1478,7 @@ define <2 x i64> @test_mask_mullo_epi64_rrk_128(<2 x i64> %a, <2 x i64> %b, <2 x define <2 x i64> @test_mask_mullo_epi64_rrkz_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rrkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vpmullq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x40,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1488,7 +1488,7 @@ define <2 x i64> @test_mask_mullo_epi64_rrkz_128(<2 x i64> %a, <2 x i64> %b, i8 define <2 x i64> @test_mask_mullo_epi64_rm_128(<2 x i64> %a, <2 x i64>* %ptr_b) { ; CHECK-LABEL: test_mask_mullo_epi64_rm_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq (%rdi), %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x08,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %b = load <2 x i64>, <2 x i64>* %ptr_b @@ -1498,7 +1498,7 @@ define <2 x i64> @test_mask_mullo_epi64_rm_128(<2 x i64> %a, <2 x i64>* %ptr_b) define <2 x i64> @test_mask_mullo_epi64_rmk_128(<2 x i64> %a, <2 x i64>* %ptr_b, <2 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x40,0x0f] ; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1] @@ -1510,7 +1510,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmk_128(<2 x i64> %a, <2 x i64>* %ptr_b, define <2 x i64> @test_mask_mullo_epi64_rmkz_128(<2 x i64> %a, <2 x i64>* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1521,7 +1521,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmkz_128(<2 x i64> %a, <2 x i64>* %ptr_b define <2 x i64> @test_mask_mullo_epi64_rmb_128(<2 x i64> %a, i64* %ptr_b) { ; CHECK-LABEL: test_mask_mullo_epi64_rmb_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpmullq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf2,0xfd,0x18,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %q = load i64, i64* %ptr_b @@ -1533,7 +1533,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmb_128(<2 x i64> %a, i64* %ptr_b) { define <2 x i64> @test_mask_mullo_epi64_rmbk_128(<2 x i64> %a, i64* %ptr_b, <2 x i64> %passThru, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmbk_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi){1to2}, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x19,0x40,0x0f] ; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1] @@ -1547,7 +1547,7 @@ define <2 x i64> @test_mask_mullo_epi64_rmbk_128(<2 x i64> %a, i64* %ptr_b, <2 x define <2 x i64> @test_mask_mullo_epi64_rmbkz_128(<2 x i64> %a, i64* %ptr_b, i8 %mask) { ; CHECK-LABEL: test_mask_mullo_epi64_rmbkz_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vpmullq (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x99,0x40,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1564,7 +1564,7 @@ declare <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double>, i32, define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_256(<4 x double> %x0, <2 x double> %x2, i8 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_vextractf64x2_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc2,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x19,0xc1,0x01] @@ -1584,7 +1584,7 @@ declare <4 x double> @llvm.x86.avx512.mask.insertf64x2.256(<4 x double>, <2 x do define <4 x double>@test_int_x86_avx512_mask_insertf64x2_256(<4 x double> %x0, <2 x double> %x1, <4 x double> %x3, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_insertf64x2_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xd1,0x01] @@ -1604,7 +1604,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.inserti64x2.256(<4 x i64>, <2 x i64>, i3 define <4 x i64>@test_int_x86_avx512_mask_inserti64x2_256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x3, i8 %x4) { ; CHECK-LABEL: test_int_x86_avx512_mask_inserti64x2_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xd1,0x01] @@ -1624,7 +1624,7 @@ declare <4 x i32> @llvm.x86.avx512.cvtmask2d.128(i8) define <4 x i32>@test_int_x86_avx512_cvtmask2d_128(i8 %x0) { ; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7] ; CHECK-NEXT: vpmovm2d %k0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x08,0x38,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1636,7 +1636,7 @@ declare <8 x i32> @llvm.x86.avx512.cvtmask2d.256(i8) define <8 x i32>@test_int_x86_avx512_cvtmask2d_256(i8 %x0) { ; CHECK-LABEL: test_int_x86_avx512_cvtmask2d_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7] ; CHECK-NEXT: vpmovm2d %k0, %ymm0 ## encoding: [0x62,0xf2,0x7e,0x28,0x38,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1648,7 +1648,7 @@ declare <2 x i64> @llvm.x86.avx512.cvtmask2q.128(i8) define <2 x i64>@test_int_x86_avx512_cvtmask2q_128(i8 %x0) { ; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7] ; CHECK-NEXT: vpmovm2q %k0, %xmm0 ## encoding: [0x62,0xf2,0xfe,0x08,0x38,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1660,7 +1660,7 @@ declare <4 x i64> @llvm.x86.avx512.cvtmask2q.256(i8) define <4 x i64>@test_int_x86_avx512_cvtmask2q_256(i8 %x0) { ; CHECK-LABEL: test_int_x86_avx512_cvtmask2q_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k0 ## encoding: [0xc5,0xf8,0x92,0xc7] ; CHECK-NEXT: vpmovm2q %k0, %ymm0 ## encoding: [0x62,0xf2,0xfe,0x28,0x38,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -1672,7 +1672,7 @@ declare <4 x double> @llvm.x86.avx512.mask.broadcastf64x2.256(<2 x double>, <4 x define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0, <4 x double> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def> ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] @@ -1692,7 +1692,7 @@ define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0 define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256_load(<2 x double>* %x0ptr, <4 x double> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256_load: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vbroadcastf64x2 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x1a,0x07] ; CHECK-NEXT: ## ymm0 {%k1} = mem[0,1,0,1] @@ -1707,7 +1707,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.broadcasti64x2.256(<2 x i64>, <4 x i64>, define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def> ; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] @@ -1727,7 +1727,7 @@ define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256_load(<2 x i64>* %x0ptr, <4 x i64> %x2, i8 %mask) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256_load: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce] ; CHECK-NEXT: vbroadcasti64x2 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x5a,0x07] ; CHECK-NEXT: ## ymm0 {%k1} = mem[0,1,0,1] @@ -1742,7 +1742,7 @@ declare <8 x float> @llvm.x86.avx512.mask.broadcastf32x2.256(<4 x float>, <8 x f define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, <8 x float> %x2, i8 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def> ; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01] ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] @@ -1763,7 +1763,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.broadcasti32x2.256(<4 x i32>, <8 x i32>, define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x2_256(<4 x i32> %x0, <8 x i32> %x2, i8 %x3, i64 * %y_ptr) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_256: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def> ; CHECK-NEXT: vmovq (%rsi), %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x16] ; CHECK-NEXT: ## xmm2 = mem[0],zero @@ -1789,7 +1789,7 @@ declare <4 x i32> @llvm.x86.avx512.mask.broadcasti32x2.128(<4 x i32>, <4 x i32>, define <4 x i32>@test_int_x86_avx512_mask_broadcasti32x2_128(<4 x i32> %x0, <4 x i32> %x2, i8 %x3) { ; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_128: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf] ; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6f,0xc8] ; CHECK-NEXT: vmovdqa32 %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6f,0xd0] |