diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/avx512vl-mov.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/avx512vl-mov.ll | 128 |
1 files changed, 64 insertions, 64 deletions
diff --git a/llvm/test/CodeGen/X86/avx512vl-mov.ll b/llvm/test/CodeGen/X86/avx512vl-mov.ll index 870f9e0748f..f0ce312305f 100644 --- a/llvm/test/CodeGen/X86/avx512vl-mov.ll +++ b/llvm/test/CodeGen/X86/avx512vl-mov.ll @@ -3,7 +3,7 @@ define <8 x i32> @test_256_1(i8 * %addr) { ; CHECK-LABEL: test_256_1: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <8 x i32>* @@ -13,7 +13,7 @@ define <8 x i32> @test_256_1(i8 * %addr) { define <8 x i32> @test_256_2(i8 * %addr) { ; CHECK-LABEL: test_256_2: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <8 x i32>* @@ -23,7 +23,7 @@ define <8 x i32> @test_256_2(i8 * %addr) { define void @test_256_3(i8 * %addr, <4 x i64> %data) { ; CHECK-LABEL: test_256_3: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x i64>* @@ -33,7 +33,7 @@ define void @test_256_3(i8 * %addr, <4 x i64> %data) { define void @test_256_4(i8 * %addr, <8 x i32> %data) { ; CHECK-LABEL: test_256_4: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <8 x i32>* @@ -43,7 +43,7 @@ define void @test_256_4(i8 * %addr, <8 x i32> %data) { define void @test_256_5(i8 * %addr, <8 x i32> %data) { ; CHECK-LABEL: test_256_5: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <8 x i32>* @@ -53,7 +53,7 @@ define void @test_256_5(i8 * %addr, <8 x i32> %data) { define <4 x i64> @test_256_6(i8 * %addr) { ; CHECK-LABEL: test_256_6: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x i64>* @@ -63,7 +63,7 @@ define <4 x i64> @test_256_6(i8 * %addr) { define void @test_256_7(i8 * %addr, <4 x i64> %data) { ; CHECK-LABEL: test_256_7: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x i64>* @@ -73,7 +73,7 @@ define void @test_256_7(i8 * %addr, <4 x i64> %data) { define <4 x i64> @test_256_8(i8 * %addr) { ; CHECK-LABEL: test_256_8: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x i64>* @@ -83,7 +83,7 @@ define <4 x i64> @test_256_8(i8 * %addr) { define void @test_256_9(i8 * %addr, <4 x double> %data) { ; CHECK-LABEL: test_256_9: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x double>* @@ -93,7 +93,7 @@ define void @test_256_9(i8 * %addr, <4 x double> %data) { define <4 x double> @test_256_10(i8 * %addr) { ; CHECK-LABEL: test_256_10: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x double>* @@ -103,7 +103,7 @@ define <4 x double> @test_256_10(i8 * %addr) { define void @test_256_11(i8 * %addr, <8 x float> %data) { ; CHECK-LABEL: test_256_11: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x29,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <8 x float>* @@ -113,7 +113,7 @@ define void @test_256_11(i8 * %addr, <8 x float> %data) { define <8 x float> @test_256_12(i8 * %addr) { ; CHECK-LABEL: test_256_12: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <8 x float>* @@ -123,7 +123,7 @@ define <8 x float> @test_256_12(i8 * %addr) { define void @test_256_13(i8 * %addr, <4 x double> %data) { ; CHECK-LABEL: test_256_13: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x double>* @@ -133,7 +133,7 @@ define void @test_256_13(i8 * %addr, <4 x double> %data) { define <4 x double> @test_256_14(i8 * %addr) { ; CHECK-LABEL: test_256_14: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x double>* @@ -143,7 +143,7 @@ define <4 x double> @test_256_14(i8 * %addr) { define void @test_256_15(i8 * %addr, <8 x float> %data) { ; CHECK-LABEL: test_256_15: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups %ymm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <8 x float>* @@ -153,7 +153,7 @@ define void @test_256_15(i8 * %addr, <8 x float> %data) { define <8 x float> @test_256_16(i8 * %addr) { ; CHECK-LABEL: test_256_16: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups (%rdi), %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x10,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <8 x float>* @@ -163,7 +163,7 @@ define <8 x float> @test_256_16(i8 * %addr) { define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) { ; CHECK-LABEL: test_256_17: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vmovdqa32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6f,0x07] @@ -177,7 +177,7 @@ define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) { define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) { ; CHECK-LABEL: test_256_18: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x6f,0x07] @@ -191,7 +191,7 @@ define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) { define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) { ; CHECK-LABEL: test_256_19: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqd %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovdqa32 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x6f,0x07] @@ -205,7 +205,7 @@ define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) { define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) { ; CHECK-LABEL: test_256_20: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqd %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x28,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x6f,0x07] @@ -219,7 +219,7 @@ define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) { define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) { ; CHECK-LABEL: test_256_21: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vmovdqa64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x6f,0x07] @@ -233,7 +233,7 @@ define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) { define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) { ; CHECK-LABEL: test_256_22: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0x6f,0x07] @@ -247,7 +247,7 @@ define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) { define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) { ; CHECK-LABEL: test_256_23: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovdqa64 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x6f,0x07] @@ -261,7 +261,7 @@ define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) { define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) { ; CHECK-LABEL: test_256_24: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfe,0xa9,0x6f,0x07] @@ -275,7 +275,7 @@ define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) { define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1) { ; CHECK-LABEL: test_256_25: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vcmpneq_oqps %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf1,0x74,0x28,0xc2,0xca,0x0c] ; CHECK-NEXT: vmovaps (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x28,0x07] @@ -289,7 +289,7 @@ define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1 define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1) { ; CHECK-LABEL: test_256_26: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vcmpneq_oqps %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf1,0x74,0x28,0xc2,0xca,0x0c] ; CHECK-NEXT: vmovups (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x10,0x07] @@ -303,7 +303,7 @@ define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1 define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) { ; CHECK-LABEL: test_256_27: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vcmpneq_oqps %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc9,0x0c] ; CHECK-NEXT: vmovaps (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x28,0x07] @@ -317,7 +317,7 @@ define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) { define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) { ; CHECK-LABEL: test_256_28: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vcmpneq_oqps %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc9,0x0c] ; CHECK-NEXT: vmovups (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0xa9,0x10,0x07] @@ -331,7 +331,7 @@ define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) { define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) { ; CHECK-LABEL: test_256_29: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vmovapd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x28,0x07] @@ -345,7 +345,7 @@ define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1 define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) { ; CHECK-LABEL: test_256_30: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vmovupd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x10,0x07] @@ -359,7 +359,7 @@ define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1 define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) { ; CHECK-LABEL: test_256_31: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovapd (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x28,0x07] @@ -373,7 +373,7 @@ define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) { define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) { ; CHECK-LABEL: test_256_32: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovupd (%rdi), %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x10,0x07] @@ -387,7 +387,7 @@ define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) { define <4 x i32> @test_128_1(i8 * %addr) { ; CHECK-LABEL: test_128_1: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x i32>* @@ -397,7 +397,7 @@ define <4 x i32> @test_128_1(i8 * %addr) { define <4 x i32> @test_128_2(i8 * %addr) { ; CHECK-LABEL: test_128_2: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x i32>* @@ -407,7 +407,7 @@ define <4 x i32> @test_128_2(i8 * %addr) { define void @test_128_3(i8 * %addr, <2 x i64> %data) { ; CHECK-LABEL: test_128_3: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <2 x i64>* @@ -417,7 +417,7 @@ define void @test_128_3(i8 * %addr, <2 x i64> %data) { define void @test_128_4(i8 * %addr, <4 x i32> %data) { ; CHECK-LABEL: test_128_4: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x i32>* @@ -427,7 +427,7 @@ define void @test_128_4(i8 * %addr, <4 x i32> %data) { define void @test_128_5(i8 * %addr, <4 x i32> %data) { ; CHECK-LABEL: test_128_5: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x i32>* @@ -437,7 +437,7 @@ define void @test_128_5(i8 * %addr, <4 x i32> %data) { define <2 x i64> @test_128_6(i8 * %addr) { ; CHECK-LABEL: test_128_6: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <2 x i64>* @@ -447,7 +447,7 @@ define <2 x i64> @test_128_6(i8 * %addr) { define void @test_128_7(i8 * %addr, <2 x i64> %data) { ; CHECK-LABEL: test_128_7: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <2 x i64>* @@ -457,7 +457,7 @@ define void @test_128_7(i8 * %addr, <2 x i64> %data) { define <2 x i64> @test_128_8(i8 * %addr) { ; CHECK-LABEL: test_128_8: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <2 x i64>* @@ -467,7 +467,7 @@ define <2 x i64> @test_128_8(i8 * %addr) { define void @test_128_9(i8 * %addr, <2 x double> %data) { ; CHECK-LABEL: test_128_9: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <2 x double>* @@ -477,7 +477,7 @@ define void @test_128_9(i8 * %addr, <2 x double> %data) { define <2 x double> @test_128_10(i8 * %addr) { ; CHECK-LABEL: test_128_10: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <2 x double>* @@ -487,7 +487,7 @@ define <2 x double> @test_128_10(i8 * %addr) { define void @test_128_11(i8 * %addr, <4 x float> %data) { ; CHECK-LABEL: test_128_11: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x29,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x float>* @@ -497,7 +497,7 @@ define void @test_128_11(i8 * %addr, <4 x float> %data) { define <4 x float> @test_128_12(i8 * %addr) { ; CHECK-LABEL: test_128_12: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x float>* @@ -507,7 +507,7 @@ define <4 x float> @test_128_12(i8 * %addr) { define void @test_128_13(i8 * %addr, <2 x double> %data) { ; CHECK-LABEL: test_128_13: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <2 x double>* @@ -517,7 +517,7 @@ define void @test_128_13(i8 * %addr, <2 x double> %data) { define <2 x double> @test_128_14(i8 * %addr) { ; CHECK-LABEL: test_128_14: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <2 x double>* @@ -527,7 +527,7 @@ define <2 x double> @test_128_14(i8 * %addr) { define void @test_128_15(i8 * %addr, <4 x float> %data) { ; CHECK-LABEL: test_128_15: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x float>* @@ -537,7 +537,7 @@ define void @test_128_15(i8 * %addr, <4 x float> %data) { define <4 x float> @test_128_16(i8 * %addr) { ; CHECK-LABEL: test_128_16: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vmovups (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x10,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %vaddr = bitcast i8* %addr to <4 x float>* @@ -547,7 +547,7 @@ define <4 x float> @test_128_16(i8 * %addr) { define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) { ; CHECK-LABEL: test_128_17: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vmovdqa32 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6f,0x07] @@ -561,7 +561,7 @@ define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) { define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) { ; CHECK-LABEL: test_128_18: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x6f,0x07] @@ -575,7 +575,7 @@ define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) { define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) { ; CHECK-LABEL: test_128_19: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovdqa32 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x6f,0x07] @@ -589,7 +589,7 @@ define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) { define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) { ; CHECK-LABEL: test_128_20: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x6f,0x07] @@ -603,7 +603,7 @@ define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) { define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) { ; CHECK-LABEL: test_128_21: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vmovdqa64 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x6f,0x07] @@ -617,7 +617,7 @@ define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) { define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) { ; CHECK-LABEL: test_128_22: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0x6f,0x07] @@ -631,7 +631,7 @@ define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) { define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) { ; CHECK-LABEL: test_128_23: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovdqa64 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x6f,0x07] @@ -645,7 +645,7 @@ define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) { define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) { ; CHECK-LABEL: test_128_24: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfe,0x89,0x6f,0x07] @@ -659,7 +659,7 @@ define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) { define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) { ; CHECK-LABEL: test_128_25: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vmovaps (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x28,0x07] @@ -673,7 +673,7 @@ define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) { ; CHECK-LABEL: test_128_26: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vmovups (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x10,0x07] @@ -687,7 +687,7 @@ define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) { ; CHECK-LABEL: test_128_27: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovaps (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x28,0x07] @@ -701,7 +701,7 @@ define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) { define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) { ; CHECK-LABEL: test_128_28: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovups (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7c,0x89,0x10,0x07] @@ -715,7 +715,7 @@ define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) { define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) { ; CHECK-LABEL: test_128_29: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vmovapd (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x28,0x07] @@ -729,7 +729,7 @@ define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1 define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) { ; CHECK-LABEL: test_128_30: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vmovupd (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x10,0x07] @@ -743,7 +743,7 @@ define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1 define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) { ; CHECK-LABEL: test_128_31: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovapd (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x28,0x07] @@ -757,7 +757,7 @@ define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) { define <2 x double> @test_128_32(i8 * %addr, <2 x i64> %mask1) { ; CHECK-LABEL: test_128_32: -; CHECK: ## BB#0: +; CHECK: ## %bb.0: ; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xef,0xc9] ; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xc9,0x04] ; CHECK-NEXT: vmovupd (%rdi), %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x10,0x07] |