diff options
-rw-r--r-- | llvm/test/CodeGen/X86/avx2-schedule.ll | 3106 |
1 files changed, 3106 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/avx2-schedule.ll b/llvm/test/CodeGen/X86/avx2-schedule.ll index 69a9f80aeb8..266a17b59cf 100644 --- a/llvm/test/CodeGen/X86/avx2-schedule.ll +++ b/llvm/test/CodeGen/X86/avx2-schedule.ll @@ -202,6 +202,38 @@ define <8 x i32> @test_inserti128(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) ret <8 x i32> %6 } +define <16 x i16> @test_mpsadbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_mpsadbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_mpsadbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [7:2.00] +; HASWELL-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_mpsadbw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [7:2.00] +; SKYLAKE-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [7:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_mpsadbw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmpsadbw $7, %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vmpsadbw $7, (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %a0, <32 x i8> %a1, i8 7) + %2 = bitcast <16 x i16> %1 to <32 x i8> + %3 = load <32 x i8>, <32 x i8> *%a2, align 32 + %4 = call <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8> %2, <32 x i8> %3, i8 7) + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.mpsadbw(<32 x i8>, <32 x i8>, i8) nounwind readnone + define <32 x i8> @test_pabsb(<32 x i8> %a0, <32 x i8> *%a1) { ; GENERIC-LABEL: test_pabsb: ; GENERIC: # BB#0: @@ -310,6 +342,134 @@ define <16 x i16> @test_pabsw(<16 x i16> %a0, <16 x i16> *%a1) { } declare <16 x i16> @llvm.x86.avx2.pabs.w(<16 x i16>) nounwind readnone +define <16 x i16> @test_packssdw(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_packssdw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_packssdw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_packssdw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_packssdw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpackssdw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1) + %2 = bitcast <16 x i16> %1 to <8 x i32> + %3 = load <8 x i32>, <8 x i32> *%a2, align 32 + %4 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %2, <8 x i32> %3) + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) nounwind readnone + +define <32 x i8> @test_packsswb(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_packsswb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_packsswb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_packsswb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_packsswb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpacksswb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) + %2 = bitcast <32 x i8> %1 to <16 x i16> + %3 = load <16 x i16>, <16 x i16> *%a2, align 32 + %4 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %2, <16 x i16> %3) + ret <32 x i8> %4 +} +declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) nounwind readnone + +define <16 x i16> @test_packusdw(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_packusdw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_packusdw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_packusdw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_packusdw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpackusdw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1) + %2 = bitcast <16 x i16> %1 to <8 x i32> + %3 = load <8 x i32>, <8 x i32> *%a2, align 32 + %4 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %2, <8 x i32> %3) + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>) nounwind readnone + +define <32 x i8> @test_packuswb(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_packuswb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_packuswb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_packuswb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_packuswb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpackuswb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %a0, <16 x i16> %a1) + %2 = bitcast <32 x i8> %1 to <16 x i16> + %3 = load <16 x i16>, <16 x i16> *%a2, align 32 + %4 = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> %2, <16 x i16> %3) + ret <32 x i8> %4 +} +declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) nounwind readnone + define <32 x i8> @test_paddb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { ; GENERIC-LABEL: test_paddb: ; GENERIC: # BB#0: @@ -400,6 +560,130 @@ define <4 x i64> @test_paddq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { ret <4 x i64> %3 } +define <32 x i8> @test_paddsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_paddsb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_paddsb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_paddsb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_paddsb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <16 x i16> @test_paddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_paddsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_paddsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_paddsw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_paddsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <32 x i8> @test_paddusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_paddusb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_paddusb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_paddusb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_paddusb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpaddusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.paddus.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <16 x i16> @test_paddusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_paddusw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_paddusw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_paddusw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_paddusw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpaddusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpaddusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.paddus.w(<16 x i16>, <16 x i16>) nounwind readnone + define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { ; GENERIC-LABEL: test_paddw: ; GENERIC: # BB#0: @@ -430,6 +714,36 @@ define <16 x i16> @test_paddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { ret <16 x i16> %3 } +define <32 x i8> @test_palignr(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_palignr: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00] +; GENERIC-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_palignr: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00] +; HASWELL-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_palignr: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00] +; SKYLAKE-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_palignr: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],ymm1[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [1:0.25] +; ZNVER1-NEXT: vpalignr {{.*#+}} ymm0 = mem[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],ymm0[0],mem[17,18,19,20,21,22,23,24,25,26,27,28,29,30,31],ymm0[16] sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <32 x i8> %a1, <32 x i8> %a0, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48> + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = shufflevector <32 x i8> %2, <32 x i8> %1, <32 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48> + ret <32 x i8> %3 +} + define <4 x i64> @test_pand(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { ; GENERIC-LABEL: test_pand: ; GENERIC: # BB#0: @@ -502,6 +816,217 @@ define <4 x i64> @test_pandn(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { ret <4 x i64> %6 } +define <32 x i8> @test_pavgb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_pavgb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pavgb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pavgb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pavgb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpavgb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpavgb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = zext <32 x i8> %a0 to <32 x i16> + %2 = zext <32 x i8> %a1 to <32 x i16> + %3 = add <32 x i16> %1, %2 + %4 = add <32 x i16> %3, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %5 = lshr <32 x i16> %4, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %6 = trunc <32 x i16> %5 to <32 x i8> + %7 = load <32 x i8>, <32 x i8> *%a2, align 32 + %8 = zext <32 x i8> %6 to <32 x i16> + %9 = zext <32 x i8> %7 to <32 x i16> + %10 = add <32 x i16> %8, %9 + %11 = add <32 x i16> %10, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %12 = lshr <32 x i16> %11, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + %13 = trunc <32 x i16> %12 to <32 x i8> + ret <32 x i8> %13 +} + +define <16 x i16> @test_pavgw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pavgw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pavgw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pavgw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pavgw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpavgw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpavgw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = zext <16 x i16> %a0 to <16 x i32> + %2 = zext <16 x i16> %a1 to <16 x i32> + %3 = add <16 x i32> %1, %2 + %4 = add <16 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + %5 = lshr <16 x i32> %4, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + %6 = trunc <16 x i32> %5 to <16 x i16> + %7 = load <16 x i16>, <16 x i16> *%a2, align 32 + %8 = zext <16 x i16> %6 to <16 x i32> + %9 = zext <16 x i16> %7 to <16 x i32> + %10 = add <16 x i32> %8, %9 + %11 = add <16 x i32> %10, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + %12 = lshr <16 x i32> %11, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + %13 = trunc <16 x i32> %12 to <16 x i16> + ret <16 x i16> %13 +} + +define <4 x i32> @test_pblendd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_pblendd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.50] +; GENERIC-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [5:0.50] +; GENERIC-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pblendd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33] +; HASWELL-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [1:0.50] +; HASWELL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pblendd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.33] +; SKYLAKE-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [1:0.50] +; SKYLAKE-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pblendd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3] sched: [1:0.50] +; ZNVER1-NEXT: vpblendd {{.*#+}} xmm1 = mem[0],xmm1[1],mem[2],xmm1[3] sched: [8:1.00] +; ZNVER1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> <i32 4, i32 5, i32 6, i32 3> + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> <i32 4, i32 1, i32 6, i32 3> + %4 = add <4 x i32> %a0, %3 + ret <4 x i32> %4 +} + +define <8 x i32> @test_pblendd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pblendd_ymm: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.50] +; GENERIC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [5:0.50] +; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pblendd_ymm: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33] +; HASWELL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [1:0.50] +; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pblendd_ymm: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.33] +; SKYLAKE-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [1:0.50] +; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pblendd_ymm: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm0[3,4,5,6],ymm1[7] sched: [1:0.50] +; ZNVER1-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2],ymm1[3,4,5,6,7] sched: [9:1.50] +; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 8, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 15> + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 7> + %4 = add <8 x i32> %a0, %3 + ret <8 x i32> %4 +} + +define <32 x i8> @test_pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2, <32 x i8> *%a3, <32 x i8> %a4) { +; GENERIC-LABEL: test_pblendvb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:1.00] +; GENERIC-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [6:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pblendvb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; HASWELL-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [2:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pblendvb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [2:2.00] +; SKYLAKE-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [2:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pblendvb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; ZNVER1-NEXT: vpblendvb %ymm3, (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %a2) + %2 = load <32 x i8>, <32 x i8> *%a3, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %1, <32 x i8> %2, <32 x i8> %a4) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) nounwind readnone + +define <16 x i16> @test_pblendw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pblendw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:0.50] +; GENERIC-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [5:0.50] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pblendw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00] +; HASWELL-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [4:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pblendw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [1:1.00] +; SKYLAKE-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [4:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pblendw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4],ymm0[5,6,7,8,9],ymm1[10,11,12],ymm0[13,14,15] sched: [2:0.33] +; ZNVER1-NEXT: vpblendw {{.*#+}} ymm0 = mem[0],ymm0[1],mem[2],ymm0[3],mem[4],ymm0[5],mem[6],ymm0[7],mem[8],ymm0[9],mem[10],ymm0[11],mem[12],ymm0[13],mem[14],ymm0[15] sched: [9:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 0, i32 1, i32 18, i32 19, i32 20, i32 5, i32 6, i32 7, i32 8, i32 9, i32 26, i32 27, i32 28, i32 13, i32 14, i32 15> + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> <i32 16, i32 1, i32 18, i32 3, i32 20, i32 5, i32 22, i32 7, i32 24, i32 9, i32 26, i32 11, i32 28, i32 13, i32 30, i32 15> + ret <16 x i16> %3 +} + define <16 x i8> @test_pbroadcastb(<16 x i8> %a0, <16 x i8> *%a1) { ; GENERIC-LABEL: test_pbroadcastb: ; GENERIC: # BB#0: @@ -782,6 +1307,262 @@ define <16 x i16> @test_pbroadcastw_ymm(<16 x i16> %a0, <16 x i16> *%a1) { ret <16 x i16> %4 } +define <32 x i8> @test_pcmpeqb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_pcmpeqb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pcmpeqb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pcmpeqb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pcmpeqb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpcmpeqb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = icmp eq <32 x i8> %a0, %a1 + %2 = sext <32 x i1> %1 to <32 x i8> + %3 = load <32 x i8>, <32 x i8> *%a2, align 32 + %4 = icmp eq <32 x i8> %2, %3 + %5 = sext <32 x i1> %4 to <32 x i8> + ret <32 x i8> %5 +} + +define <8 x i32> @test_pcmpeqd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pcmpeqd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pcmpeqd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pcmpeqd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pcmpeqd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpcmpeqd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = icmp eq <8 x i32> %a0, %a1 + %2 = sext <8 x i1> %1 to <8 x i32> + %3 = load <8 x i32>, <8 x i32> *%a2, align 32 + %4 = icmp eq <8 x i32> %2, %3 + %5 = sext <8 x i1> %4 to <8 x i32> + ret <8 x i32> %5 +} + +define <4 x i64> @test_pcmpeqq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; GENERIC-LABEL: test_pcmpeqq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pcmpeqq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pcmpeqq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pcmpeqq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpcmpeqq (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = icmp eq <4 x i64> %a0, %a1 + %2 = sext <4 x i1> %1 to <4 x i64> + %3 = load <4 x i64>, <4 x i64> *%a2, align 32 + %4 = icmp eq <4 x i64> %2, %3 + %5 = sext <4 x i1> %4 to <4 x i64> + ret <4 x i64> %5 +} + +define <16 x i16> @test_pcmpeqw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pcmpeqw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pcmpeqw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pcmpeqw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pcmpeqw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpcmpeqw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = icmp eq <16 x i16> %a0, %a1 + %2 = sext <16 x i1> %1 to <16 x i16> + %3 = load <16 x i16>, <16 x i16> *%a2, align 32 + %4 = icmp eq <16 x i16> %2, %3 + %5 = sext <16 x i1> %4 to <16 x i16> + ret <16 x i16> %5 +} + +define <32 x i8> @test_pcmpgtb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_pcmpgtb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pcmpgtb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pcmpgtb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pcmpgtb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpcmpgtb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = icmp sgt <32 x i8> %a0, %a1 + %2 = sext <32 x i1> %1 to <32 x i8> + %3 = load <32 x i8>, <32 x i8> *%a2, align 32 + %4 = icmp sgt <32 x i8> %2, %3 + %5 = sext <32 x i1> %4 to <32 x i8> + ret <32 x i8> %5 +} + +define <8 x i32> @test_pcmpgtd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pcmpgtd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pcmpgtd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pcmpgtd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pcmpgtd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpcmpgtd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = icmp sgt <8 x i32> %a0, %a1 + %2 = sext <8 x i1> %1 to <8 x i32> + %3 = load <8 x i32>, <8 x i32> *%a2, align 32 + %4 = icmp sgt <8 x i32> %2, %3 + %5 = sext <8 x i1> %4 to <8 x i32> + ret <8 x i32> %5 +} + +define <4 x i64> @test_pcmpgtq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; GENERIC-LABEL: test_pcmpgtq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pcmpgtq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pcmpgtq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pcmpgtq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpcmpgtq (%rdi), %ymm0, %ymm0 # sched: [8:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = icmp sgt <4 x i64> %a0, %a1 + %2 = sext <4 x i1> %1 to <4 x i64> + %3 = load <4 x i64>, <4 x i64> *%a2, align 32 + %4 = icmp sgt <4 x i64> %2, %3 + %5 = sext <4 x i1> %4 to <4 x i64> + ret <4 x i64> %5 +} + +define <16 x i16> @test_pcmpgtw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pcmpgtw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pcmpgtw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pcmpgtw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pcmpgtw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpcmpgtw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = icmp sgt <16 x i16> %a0, %a1 + %2 = sext <16 x i1> %1 to <16 x i16> + %3 = load <16 x i16>, <16 x i16> *%a2, align 32 + %4 = icmp sgt <16 x i16> %2, %3 + %5 = sext <16 x i1> %4 to <16 x i16> + ret <16 x i16> %5 +} + define <4 x i64> @test_perm2i128(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { ; GENERIC-LABEL: test_perm2i128: ; GENERIC: # BB#0: @@ -959,6 +1740,1185 @@ define <4 x i64> @test_permq(<4 x i64> %a0, <4 x i64> *%a1) { ret <4 x i64> %4 } +define <8 x i32> @test_phaddd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_phaddd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; GENERIC-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_phaddd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_phaddd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_phaddd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vphaddd %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddd (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_phaddsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_phaddsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_phaddsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_phaddsw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_phaddsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vphaddsw %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddsw (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.phadd.sw(<16 x i16>, <16 x i16>) nounwind readnone + +define <16 x i16> @test_phaddw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_phaddw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; GENERIC-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_phaddw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_phaddw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_phaddw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vphaddw %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphaddw (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <8 x i32> @test_phsubd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_phsubd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; GENERIC-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_phsubd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_phsubd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_phsubd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vphsubd %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubd (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_phsubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_phsubsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_phsubsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_phsubsw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_phsubsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vphsubsw %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubsw (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.phsub.sw(<16 x i16>, <16 x i16>) nounwind readnone + +define <16 x i16> @test_phsubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_phsubw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; GENERIC-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [5:0.50] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_phsubw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_phsubw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_phsubw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vphsubw %ymm1, %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: vphsubw (%rdi), %ymm0, %ymm0 # sched: [100:?] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <16 x i16> @test_pmaddubsw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_pmaddubsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmaddubsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmaddubsw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmaddubsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmaddubsw %ymm1, %ymm0, %ymm0 # sched: [4:1.00] +; ZNVER1-NEXT: vpmaddubsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %a0, <32 x i8> %a1) + %2 = bitcast <16 x i16> %1 to <32 x i8> + %3 = load <32 x i8>, <32 x i8> *%a2, align 32 + %4 = call <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8> %2, <32 x i8> %3) + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.pmadd.ub.sw(<32 x i8>, <32 x i8>) nounwind readnone + +define <8 x i32> @test_pmaddwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pmaddwd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmaddwd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmaddwd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmaddwd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0 # sched: [4:1.00] +; ZNVER1-NEXT: vpmaddwd (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a0, <16 x i16> %a1) + %2 = bitcast <8 x i32> %1 to <16 x i16> + %3 = load <16 x i16>, <16 x i16> *%a2, align 32 + %4 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %2, <16 x i16> %3) + ret <8 x i32> %4 +} +declare <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16>, <16 x i16>) nounwind readnone + +define <32 x i8> @test_pmaxsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_pmaxsb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmaxsb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmaxsb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmaxsb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpmaxsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.pmaxs.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <8 x i32> @test_pmaxsd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pmaxsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmaxsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmaxsd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmaxsd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmaxsd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpmaxsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.pmaxs.d(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_pmaxsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pmaxsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmaxsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmaxsw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmaxsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmaxsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpmaxsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.pmaxs.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <32 x i8> @test_pmaxub(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_pmaxub: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmaxub: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmaxub: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmaxub: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmaxub %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpmaxub (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.pmaxu.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <8 x i32> @test_pmaxud(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pmaxud: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmaxud: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmaxud: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmaxud: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmaxud %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpmaxud (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.pmaxu.d(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_pmaxuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pmaxuw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmaxuw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmaxuw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmaxuw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmaxuw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpmaxuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.pmaxu.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <32 x i8> @test_pminsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_pminsb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pminsb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pminsb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pminsb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpminsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpminsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.pmins.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <8 x i32> @test_pminsd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pminsd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pminsd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pminsd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pminsd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpminsd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpminsd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.pmins.d(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_pminsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pminsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pminsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pminsw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pminsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpminsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpminsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.pmins.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <32 x i8> @test_pminub(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_pminub: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pminub: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pminub: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pminub: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpminub %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpminub (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.pminu.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <8 x i32> @test_pminud(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pminud: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pminud: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pminud: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pminud: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpminud %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpminud (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.pminu.d(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_pminuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pminuw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pminuw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pminuw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pminuw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpminuw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpminuw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.pminu.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <8 x i32> @test_pmovsxbd(<16 x i8> %a0, <16 x i8> *%a1) { +; GENERIC-LABEL: test_pmovsxbd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [5:1.00] +; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovsxbd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00] +; HASWELL-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [3:1.00] +; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovsxbd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [3:1.00] +; SKYLAKE-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [3:1.00] +; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovsxbd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovsxbd (%rdi), %ymm1 # sched: [8:0.50] +; ZNVER1-NEXT: vpmovsxbd %xmm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %2 = sext <8 x i8> %1 to <8 x i32> + %3 = load <16 x i8>, <16 x i8> *%a1, align 16 + %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %5 = sext <8 x i8> %4 to <8 x i32> + %6 = add <8 x i32> %2, %5 + ret <8 x i32> %6 +} + +define <4 x i64> @test_pmovsxbq(<16 x i8> %a0, <16 x i8> *%a1) { +; GENERIC-LABEL: test_pmovsxbq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [5:1.00] +; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovsxbq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00] +; HASWELL-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [3:1.00] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovsxbq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [3:1.00] +; SKYLAKE-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [3:1.00] +; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovsxbq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovsxbq (%rdi), %ymm1 # sched: [8:0.50] +; ZNVER1-NEXT: vpmovsxbq %xmm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %2 = sext <4 x i8> %1 to <4 x i64> + %3 = load <16 x i8>, <16 x i8> *%a1, align 16 + %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %5 = sext <4 x i8> %4 to <4 x i64> + %6 = add <4 x i64> %2, %5 + ret <4 x i64> %6 +} + +define <16 x i16> @test_pmovsxbw(<16 x i8> %a0, <16 x i8> *%a1) { +; GENERIC-LABEL: test_pmovsxbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [5:1.00] +; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovsxbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00] +; HASWELL-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [3:1.00] +; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovsxbw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [3:1.00] +; SKYLAKE-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [3:1.00] +; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovsxbw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovsxbw (%rdi), %ymm1 # sched: [8:0.50] +; ZNVER1-NEXT: vpmovsxbw %xmm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = sext <16 x i8> %a0 to <16 x i16> + %2 = load <16 x i8>, <16 x i8> *%a1, align 16 + %3 = sext <16 x i8> %2 to <16 x i16> + %4 = add <16 x i16> %1, %3 + ret <16 x i16> %4 +} + +define <4 x i64> @test_pmovsxdq(<4 x i32> %a0, <4 x i32> *%a1) { +; GENERIC-LABEL: test_pmovsxdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [5:1.00] +; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovsxdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00] +; HASWELL-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [3:1.00] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovsxdq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [3:1.00] +; SKYLAKE-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [3:1.00] +; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovsxdq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovsxdq (%rdi), %ymm1 # sched: [8:0.50] +; ZNVER1-NEXT: vpmovsxdq %xmm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = sext <4 x i32> %a0 to <4 x i64> + %2 = load <4 x i32>, <4 x i32> *%a1, align 16 + %3 = sext <4 x i32> %2 to <4 x i64> + %4 = add <4 x i64> %1, %3 + ret <4 x i64> %4 +} + +define <8 x i32> @test_pmovsxwd(<8 x i16> %a0, <8 x i16> *%a1) { +; GENERIC-LABEL: test_pmovsxwd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [5:1.00] +; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovsxwd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00] +; HASWELL-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [3:1.00] +; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovsxwd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [3:1.00] +; SKYLAKE-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [3:1.00] +; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovsxwd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovsxwd (%rdi), %ymm1 # sched: [8:0.50] +; ZNVER1-NEXT: vpmovsxwd %xmm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = sext <8 x i16> %a0 to <8 x i32> + %2 = load <8 x i16>, <8 x i16> *%a1, align 16 + %3 = sext <8 x i16> %2 to <8 x i32> + %4 = add <8 x i32> %1, %3 + ret <8 x i32> %4 +} + +define <4 x i64> @test_pmovsxwq(<8 x i16> %a0, <8 x i16> *%a1) { +; GENERIC-LABEL: test_pmovsxwq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [5:1.00] +; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovsxwq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00] +; HASWELL-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [3:1.00] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovsxwq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [3:1.00] +; SKYLAKE-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [3:1.00] +; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovsxwq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovsxwq (%rdi), %ymm1 # sched: [8:0.50] +; ZNVER1-NEXT: vpmovsxwq %xmm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %2 = sext <4 x i16> %1 to <4 x i64> + %3 = load <8 x i16>, <8 x i16> *%a1, align 16 + %4 = shufflevector <8 x i16> %3, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %5 = sext <4 x i16> %4 to <4 x i64> + %6 = add <4 x i64> %2, %5 + ret <4 x i64> %6 +} + +define <8 x i32> @test_pmovzxbd(<16 x i8> %a0, <16 x i8> *%a1) { +; GENERIC-LABEL: test_pmovzxbd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:1.00] +; GENERIC-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [5:1.00] +; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovzxbd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00] +; HASWELL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [3:1.00] +; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovzxbd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [3:1.00] +; SKYLAKE-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [3:1.00] +; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovzxbd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero sched: [8:0.50] +; ZNVER1-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero sched: [1:0.25] +; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %2 = zext <8 x i8> %1 to <8 x i32> + %3 = load <16 x i8>, <16 x i8> *%a1, align 16 + %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %5 = zext <8 x i8> %4 to <8 x i32> + %6 = add <8 x i32> %2, %5 + ret <8 x i32> %6 +} + +define <4 x i64> @test_pmovzxbq(<16 x i8> %a0, <16 x i8> *%a1) { +; GENERIC-LABEL: test_pmovzxbq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:1.00] +; GENERIC-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [5:1.00] +; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovzxbq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00] +; HASWELL-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovzxbq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00] +; SKYLAKE-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [3:1.00] +; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovzxbq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovzxbq {{.*#+}} ymm1 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero sched: [8:0.50] +; ZNVER1-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero sched: [1:0.50] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %2 = zext <4 x i8> %1 to <4 x i64> + %3 = load <16 x i8>, <16 x i8> *%a1, align 16 + %4 = shufflevector <16 x i8> %3, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %5 = zext <4 x i8> %4 to <4 x i64> + %6 = add <4 x i64> %2, %5 + ret <4 x i64> %6 +} + +define <16 x i16> @test_pmovzxbw(<16 x i8> %a0, <16 x i8> *%a1) { +; GENERIC-LABEL: test_pmovzxbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:1.00] +; GENERIC-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [5:1.00] +; GENERIC-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovzxbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00] +; HASWELL-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [3:1.00] +; HASWELL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovzxbw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [3:1.00] +; SKYLAKE-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [3:1.00] +; SKYLAKE-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovzxbw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero sched: [8:0.50] +; ZNVER1-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero sched: [1:0.50] +; ZNVER1-NEXT: vpaddw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = zext <16 x i8> %a0 to <16 x i16> + %2 = load <16 x i8>, <16 x i8> *%a1, align 16 + %3 = zext <16 x i8> %2 to <16 x i16> + %4 = add <16 x i16> %1, %3 + ret <16 x i16> %4 +} + +define <4 x i64> @test_pmovzxdq(<4 x i32> %a0, <4 x i32> *%a1) { +; GENERIC-LABEL: test_pmovzxdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:1.00] +; GENERIC-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [5:1.00] +; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovzxdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00] +; HASWELL-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [3:1.00] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovzxdq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [3:1.00] +; SKYLAKE-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [3:1.00] +; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovzxdq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovzxdq {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero sched: [8:0.50] +; ZNVER1-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero sched: [1:0.50] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = zext <4 x i32> %a0 to <4 x i64> + %2 = load <4 x i32>, <4 x i32> *%a1, align 16 + %3 = zext <4 x i32> %2 to <4 x i64> + %4 = add <4 x i64> %1, %3 + ret <4 x i64> %4 +} + +define <8 x i32> @test_pmovzxwd(<8 x i16> %a0, <8 x i16> *%a1) { +; GENERIC-LABEL: test_pmovzxwd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:1.00] +; GENERIC-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [5:1.00] +; GENERIC-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovzxwd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00] +; HASWELL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [3:1.00] +; HASWELL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovzxwd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [3:1.00] +; SKYLAKE-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [3:1.00] +; SKYLAKE-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovzxwd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero sched: [8:0.50] +; ZNVER1-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero sched: [1:0.25] +; ZNVER1-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = zext <8 x i16> %a0 to <8 x i32> + %2 = load <8 x i16>, <8 x i16> *%a1, align 16 + %3 = zext <8 x i16> %2 to <8 x i32> + %4 = add <8 x i32> %1, %3 + ret <8 x i32> %4 +} + +define <4 x i64> @test_pmovzxwq(<8 x i16> %a0, <8 x i16> *%a1) { +; GENERIC-LABEL: test_pmovzxwq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:1.00] +; GENERIC-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [5:1.00] +; GENERIC-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmovzxwq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00] +; HASWELL-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [3:1.00] +; HASWELL-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmovzxwq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [3:1.00] +; SKYLAKE-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [3:1.00] +; SKYLAKE-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmovzxwq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmovzxwq {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero sched: [8:0.50] +; ZNVER1-NEXT: vpmovzxwq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero sched: [1:0.25] +; ZNVER1-NEXT: vpaddq %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %2 = zext <4 x i16> %1 to <4 x i64> + %3 = load <8 x i16>, <8 x i16> *%a1, align 16 + %4 = shufflevector <8 x i16> %3, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + %5 = zext <4 x i16> %4 to <4 x i64> + %6 = add <4 x i64> %2, %5 + ret <4 x i64> %6 +} + +define <4 x i64> @test_pmuldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pmuldq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmuldq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmuldq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmuldq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmuldq %ymm1, %ymm0, %ymm0 # sched: [4:1.00] +; ZNVER1-NEXT: vpmuldq (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %a0, <8 x i32> %a1) + %2 = bitcast <4 x i64> %1 to <8 x i32> + %3 = load <8 x i32>, <8 x i32> *%a2, align 32 + %4 = call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %2, <8 x i32> %3) + ret <4 x i64> %4 +} +declare <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_pmulhrsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pmulhrsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmulhrsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmulhrsw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmulhrsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmulhrsw %ymm1, %ymm0, %ymm0 # sched: [4:1.00] +; ZNVER1-NEXT: vpmulhrsw (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.pmul.hr.sw(<16 x i16>, <16 x i16>) nounwind readnone + +define <16 x i16> @test_pmulhuw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pmulhuw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmulhuw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmulhuw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmulhuw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmulhuw %ymm1, %ymm0, %ymm0 # sched: [4:1.00] +; ZNVER1-NEXT: vpmulhuw (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.pmulhu.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <16 x i16> @test_pmulhw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_pmulhw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmulhw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmulhw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmulhw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmulhw %ymm1, %ymm0, %ymm0 # sched: [4:1.00] +; ZNVER1-NEXT: vpmulhw (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.pmulh.w(<16 x i16>, <16 x i16>) nounwind readnone + define <8 x i32> @test_pmulld(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { ; GENERIC-LABEL: test_pmulld: ; GENERIC: # BB#0: @@ -1019,6 +2979,38 @@ define <16 x i16> @test_pmullw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) ret <16 x i16> %3 } +define <4 x i64> @test_pmuludq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_pmuludq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pmuludq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pmuludq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pmuludq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 # sched: [4:1.00] +; ZNVER1-NEXT: vpmuludq (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %a0, <8 x i32> %a1) + %2 = bitcast <4 x i64> %1 to <8 x i32> + %3 = load <8 x i32>, <8 x i32> *%a2, align 32 + %4 = call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %2, <8 x i32> %3) + ret <4 x i64> %4 +} +declare <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32>, <8 x i32>) nounwind readnone + define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { ; GENERIC-LABEL: test_por: ; GENERIC: # BB#0: @@ -1054,6 +3046,38 @@ define <4 x i64> @test_por(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { ret <4 x i64> %4 } +define <4 x i64> @test_psadbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_psadbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psadbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psadbw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psadbw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 # sched: [4:1.00] +; ZNVER1-NEXT: vpsadbw (%rdi), %ymm0, %ymm0 # sched: [11:1.00] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %a0, <32 x i8> %a1) + %2 = bitcast <4 x i64> %1 to <32 x i8> + %3 = load <32 x i8>, <32 x i8> *%a2, align 32 + %4 = call <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8> %2, <32 x i8> %3) + ret <4 x i64> %4 +} +declare <4 x i64> @llvm.x86.avx2.psad.bw(<32 x i8>, <32 x i8>) nounwind readnone + define <32 x i8> @test_pshufb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { ; GENERIC-LABEL: test_pshufb: ; GENERIC: # BB#0: @@ -1190,6 +3214,697 @@ define <16 x i16> @test_pshuflw(<16 x i16> %a0, <16 x i16> *%a1) { ret <16 x i16> %4 } +define <32 x i8> @test_psignb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_psignb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psignb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psignb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psignb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsignb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpsignb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.psign.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <8 x i32> @test_psignd(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_psignd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psignd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psignd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psignd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsignd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpsignd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.psign.d(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_psignw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_psignw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psignw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psignw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psignw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsignw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpsignw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.psign.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <8 x i32> @test_pslld(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_pslld: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_pslld: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; HASWELL-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_pslld: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; SKYLAKE-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_pslld: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpslld %xmm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpslld (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; ZNVER1-NEXT: vpslld $2, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32> %1, <4 x i32> %2) + %4 = shl <8 x i32> %3, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> + ret <8 x i32> %4 +} +declare <8 x i32> @llvm.x86.avx2.psll.d(<8 x i32>, <4 x i32>) nounwind readnone + +define <4 x i64> @test_psllq(<4 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_psllq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psllq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; HASWELL-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psllq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; SKYLAKE-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psllq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsllq %xmm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpsllq (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; ZNVER1-NEXT: vpsllq $2, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %a0, <2 x i64> %a1) + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = call <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64> %1, <2 x i64> %2) + %4 = shl <4 x i64> %3, <i64 2, i64 2, i64 2, i64 2> + ret <4 x i64> %4 +} +declare <4 x i64> @llvm.x86.avx2.psll.q(<4 x i64>, <2 x i64>) nounwind readnone + +define <4 x i32> @test_psllvd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_psllvd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psllvd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00] +; HASWELL-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psllvd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psllvd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsllvd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32> %1, <4 x i32> %2) + ret <4 x i32> %3 +} +declare <4 x i32> @llvm.x86.avx2.psllv.d(<4 x i32>, <4 x i32>) nounwind readnone + +define <8 x i32> @test_psllvd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_psllvd_ymm: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psllvd_ymm: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psllvd_ymm: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psllvd_ymm: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsllvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsllvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32>, <8 x i32>) nounwind readnone + +define <2 x i64> @test_psllvq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_psllvq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psllvq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psllvq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psllvq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsllvq (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %a0, <2 x i64> %a1) + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> %1, <2 x i64> %2) + ret <2 x i64> %3 +} +declare <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64>, <2 x i64>) nounwind readnone + +define <4 x i64> @test_psllvq_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; GENERIC-LABEL: test_psllvq_ymm: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psllvq_ymm: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psllvq_ymm: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psllvq_ymm: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsllvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsllvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %a0, <4 x i64> %a1) + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> %1, <4 x i64> %2) + ret <4 x i64> %3 +} +declare <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64>, <4 x i64>) nounwind readnone + +define <16 x i16> @test_psllw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psllw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psllw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; HASWELL-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psllw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; SKYLAKE-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psllw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsllw %xmm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpsllw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; ZNVER1-NEXT: vpsllw $2, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16> %1, <8 x i16> %2) + %4 = shl <16 x i16> %3, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.psll.w(<16 x i16>, <8 x i16>) nounwind readnone + +define <8 x i32> @test_psrad(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_psrad: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psrad: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; HASWELL-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psrad: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; SKYLAKE-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psrad: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsrad %xmm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpsrad (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; ZNVER1-NEXT: vpsrad $2, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32> %1, <4 x i32> %2) + %4 = ashr <8 x i32> %3, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> + ret <8 x i32> %4 +} +declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) nounwind readnone + +define <4 x i32> @test_psravd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_psravd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psravd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [3:2.00] +; HASWELL-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psravd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psravd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsravd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsravd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> %1, <4 x i32> %2) + ret <4 x i32> %3 +} +declare <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32>, <4 x i32>) nounwind readnone + +define <8 x i32> @test_psravd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_psravd_ymm: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psravd_ymm: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psravd_ymm: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psravd_ymm: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsravd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsravd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32>, <8 x i32>) nounwind readnone + +define <16 x i16> @test_psraw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psraw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psraw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; HASWELL-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psraw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; SKYLAKE-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psraw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsraw %xmm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpsraw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; ZNVER1-NEXT: vpsraw $2, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16> %1, <8 x i16> %2) + %4 = ashr <16 x i16> %3, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) nounwind readnone + +define <8 x i32> @test_psrld(<8 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_psrld: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psrld: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; HASWELL-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psrld: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; SKYLAKE-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psrld: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsrld %xmm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpsrld (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; ZNVER1-NEXT: vpsrld $2, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %1, <4 x i32> %2) + %4 = lshr <8 x i32> %3, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> + ret <8 x i32> %4 +} +declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) nounwind readnone + +define <4 x i64> @test_psrlq(<4 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_psrlq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psrlq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; HASWELL-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psrlq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; SKYLAKE-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psrlq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpsrlq (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; ZNVER1-NEXT: vpsrlq $2, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %a0, <2 x i64> %a1) + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %1, <2 x i64> %2) + %4 = lshr <4 x i64> %3, <i64 2, i64 2, i64 2, i64 2> + ret <4 x i64> %4 +} +declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) nounwind readnone + +define <4 x i32> @test_psrlvd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> *%a2) { +; GENERIC-LABEL: test_psrlvd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psrlvd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00] +; HASWELL-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psrlvd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psrlvd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %a0, <4 x i32> %a1) + %2 = load <4 x i32>, <4 x i32> *%a2, align 16 + %3 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> %1, <4 x i32> %2) + ret <4 x i32> %3 +} +declare <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32>, <4 x i32>) nounwind readnone + +define <8 x i32> @test_psrlvd_ymm(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_psrlvd_ymm: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psrlvd_ymm: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psrlvd_ymm: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [3:2.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psrlvd_ymm: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %a0, <8 x i32> %a1) + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> %1, <8 x i32> %2) + ret <8 x i32> %3 +} +declare <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32>, <8 x i32>) nounwind readnone + +define <2 x i64> @test_psrlvq(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> *%a2) { +; GENERIC-LABEL: test_psrlvq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psrlvq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psrlvq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psrlvq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %a0, <2 x i64> %a1) + %2 = load <2 x i64>, <2 x i64> *%a2, align 16 + %3 = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> %1, <2 x i64> %2) + ret <2 x i64> %3 +} +declare <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64>, <2 x i64>) nounwind readnone + +define <4 x i64> @test_psrlvq_ymm(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; GENERIC-LABEL: test_psrlvq_ymm: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psrlvq_ymm: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psrlvq_ymm: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psrlvq_ymm: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; ZNVER1-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %a0, <4 x i64> %a1) + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> %1, <4 x i64> %2) + ret <4 x i64> %3 +} +declare <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64>, <4 x i64>) nounwind readnone + +define <16 x i16> @test_psrlw(<16 x i16> %a0, <8 x i16> %a1, <8 x i16> *%a2) { +; GENERIC-LABEL: test_psrlw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [5:1.00] +; GENERIC-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psrlw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; HASWELL-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psrlw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [4:1.00] +; SKYLAKE-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psrlw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 # sched: [2:1.00] +; ZNVER1-NEXT: vpsrlw (%rdi), %ymm0, %ymm0 # sched: [9:1.00] +; ZNVER1-NEXT: vpsrlw $2, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %a0, <8 x i16> %a1) + %2 = load <8 x i16>, <8 x i16> *%a2, align 16 + %3 = call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> %1, <8 x i16> %2) + %4 = lshr <16 x i16> %3, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2> + ret <16 x i16> %4 +} +declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) nounwind readnone + define <32 x i8> @test_psubb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { ; GENERIC-LABEL: test_psubb: ; GENERIC: # BB#0: @@ -1280,6 +3995,130 @@ define <4 x i64> @test_psubq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { ret <4 x i64> %3 } +define <32 x i8> @test_psubsb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_psubsb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psubsb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psubsb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psubsb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <16 x i16> @test_psubsw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_psubsw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psubsw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psubsw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psubsw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone + +define <32 x i8> @test_psubusb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_psubusb: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psubusb: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psubusb: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psubusb: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubusb %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpsubusb (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %a0, <32 x i8> %a1) + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = call <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8> %1, <32 x i8> %2) + ret <32 x i8> %3 +} +declare <32 x i8> @llvm.x86.avx2.psubus.b(<32 x i8>, <32 x i8>) nounwind readnone + +define <16 x i16> @test_psubusw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_psubusw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [7:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_psubusw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_psubusw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_psubusw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpsubusw %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: vpsubusw (%rdi), %ymm0, %ymm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = call <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16> %1, <16 x i16> %2) + ret <16 x i16> %3 +} +declare <16 x i16> @llvm.x86.avx2.psubus.w(<16 x i16>, <16 x i16>) nounwind readnone + define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { ; GENERIC-LABEL: test_psubw: ; GENERIC: # BB#0: @@ -1310,6 +4149,273 @@ define <16 x i16> @test_psubw(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { ret <16 x i16> %3 } +define <32 x i8> @test_punpckhbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_punpckhbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00] +; GENERIC-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_punpckhbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00] +; HASWELL-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_punpckhbw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:1.00] +; SKYLAKE-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_punpckhbw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] sched: [1:0.25] +; ZNVER1-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15],ymm0[24],mem[24],ymm0[25],mem[25],ymm0[26],mem[26],ymm0[27],mem[27],ymm0[28],mem[28],ymm0[29],mem[29],ymm0[30],mem[30],ymm0[31],mem[31] sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63> + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = shufflevector <32 x i8> %1, <32 x i8> %2, <32 x i32> <i32 8, i32 40, i32 9, i32 41, i32 10, i32 42, i32 11, i32 43, i32 12, i32 44, i32 13, i32 45, i32 14, i32 46, i32 15, i32 47, i32 24, i32 56, i32 25, i32 57, i32 26, i32 58, i32 27, i32 59, i32 28, i32 60, i32 29, i32 61, i32 30, i32 62, i32 31, i32 63> + ret <32 x i8> %3 +} +define <8 x i32> @test_punpckhdq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_punpckhdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00] +; GENERIC-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [5:1.00] +; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [3:1.00] +; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_punpckhdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00] +; HASWELL-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [1:1.00] +; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50] +; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_punpckhdq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:1.00] +; SKYLAKE-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [1:1.00] +; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_punpckhdq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] sched: [1:0.25] +; ZNVER1-NEXT: vpunpckhdq {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] sched: [8:0.50] +; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.25] +; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15> + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15> + %4 = add <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + ret <8 x i32> %4 +} + +define <4 x i64> @test_punpckhqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; GENERIC-LABEL: test_punpckhqdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00] +; GENERIC-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [5:1.00] +; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_punpckhqdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00] +; HASWELL-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [1:1.00] +; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_punpckhqdq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:1.00] +; SKYLAKE-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [1:1.00] +; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_punpckhqdq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] sched: [1:0.25] +; ZNVER1-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] sched: [8:0.50] +; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 1, i32 5, i32 3, i32 7> + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = shufflevector <4 x i64> %a0, <4 x i64> %2, <4 x i32> <i32 1, i32 5, i32 3, i32 7> + %4 = add <4 x i64> %1, %3 + ret <4 x i64> %4 +} + +define <16 x i16> @test_punpckhwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_punpckhwd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00] +; GENERIC-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_punpckhwd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00] +; HASWELL-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_punpckhwd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:1.00] +; SKYLAKE-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_punpckhwd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15] sched: [1:0.25] +; ZNVER1-NEXT: vpunpckhwd {{.*#+}} ymm0 = ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[12],mem[12],ymm0[13],mem[13],ymm0[14],mem[14],ymm0[15],mem[15] sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> <i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31> + ret <16 x i16> %3 +} + +define <32 x i8> @test_punpcklbw(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> *%a2) { +; GENERIC-LABEL: test_punpcklbw: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00] +; GENERIC-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_punpcklbw: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00] +; HASWELL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_punpcklbw: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:1.00] +; SKYLAKE-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_punpcklbw: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] sched: [1:0.25] +; ZNVER1-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[4],mem[4],ymm0[5],mem[5],ymm0[6],mem[6],ymm0[7],mem[7],ymm0[16],mem[16],ymm0[17],mem[17],ymm0[18],mem[18],ymm0[19],mem[19],ymm0[20],mem[20],ymm0[21],mem[21],ymm0[22],mem[22],ymm0[23],mem[23] sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <32 x i8> %a0, <32 x i8> %a1, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55> + %2 = load <32 x i8>, <32 x i8> *%a2, align 32 + %3 = shufflevector <32 x i8> %1, <32 x i8> %2, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 16, i32 48, i32 17, i32 49, i32 18, i32 50, i32 19, i32 51, i32 20, i32 52, i32 21, i32 53, i32 22, i32 54, i32 23, i32 55> + ret <32 x i8> %3 +} + +define <8 x i32> @test_punpckldq(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> *%a2) { +; GENERIC-LABEL: test_punpckldq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00] +; GENERIC-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [5:1.00] +; GENERIC-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [3:1.00] +; GENERIC-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_punpckldq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00] +; HASWELL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [1:1.00] +; HASWELL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50] +; HASWELL-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_punpckldq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:1.00] +; SKYLAKE-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [1:1.00] +; SKYLAKE-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.50] +; SKYLAKE-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_punpckldq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] sched: [1:0.25] +; ZNVER1-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] sched: [8:0.50] +; ZNVER1-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 # sched: [1:0.25] +; ZNVER1-NEXT: vpsubd %ymm1, %ymm0, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <8 x i32> %a0, <8 x i32> %a1, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13> + %2 = load <8 x i32>, <8 x i32> *%a2, align 32 + %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13> + %4 = add <8 x i32> %3, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1> + ret <8 x i32> %4 +} + +define <4 x i64> @test_punpcklqdq(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { +; GENERIC-LABEL: test_punpcklqdq: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00] +; GENERIC-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [5:1.00] +; GENERIC-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [3:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_punpcklqdq: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00] +; HASWELL-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [1:1.00] +; HASWELL-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_punpcklqdq: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:1.00] +; SKYLAKE-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [1:1.00] +; SKYLAKE-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.50] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_punpcklqdq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] sched: [1:0.25] +; ZNVER1-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] sched: [8:0.50] +; ZNVER1-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <4 x i64> %a0, <4 x i64> %a1, <4 x i32> <i32 0, i32 4, i32 2, i32 6> + %2 = load <4 x i64>, <4 x i64> *%a2, align 32 + %3 = shufflevector <4 x i64> %a0, <4 x i64> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 6> + %4 = add <4 x i64> %1, %3 + ret <4 x i64> %4 +} + +define <16 x i16> @test_punpcklwd(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> *%a2) { +; GENERIC-LABEL: test_punpcklwd: +; GENERIC: # BB#0: +; GENERIC-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00] +; GENERIC-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [5:1.00] +; GENERIC-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: test_punpcklwd: +; HASWELL: # BB#0: +; HASWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00] +; HASWELL-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [1:1.00] +; HASWELL-NEXT: retq # sched: [2:1.00] +; +; SKYLAKE-LABEL: test_punpcklwd: +; SKYLAKE: # BB#0: +; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:1.00] +; SKYLAKE-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [1:1.00] +; SKYLAKE-NEXT: retq # sched: [2:1.00] +; +; ZNVER1-LABEL: test_punpcklwd: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11] sched: [1:0.25] +; ZNVER1-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11] sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [1:0.50] + %1 = shufflevector <16 x i16> %a0, <16 x i16> %a1, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27> + %2 = load <16 x i16>, <16 x i16> *%a2, align 32 + %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27> + ret <16 x i16> %3 +} + define <4 x i64> @test_pxor(<4 x i64> %a0, <4 x i64> %a1, <4 x i64> *%a2) { ; GENERIC-LABEL: test_pxor: ; GENERIC: # BB#0: |