summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/sse4a.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/sse4a.ll')
-rw-r--r--llvm/test/CodeGen/X86/sse4a.ll236
1 files changed, 108 insertions, 128 deletions
diff --git a/llvm/test/CodeGen/X86/sse4a.ll b/llvm/test/CodeGen/X86/sse4a.ll
index 612e3b7de9c..2b9b60b798b 100644
--- a/llvm/test/CodeGen/X86/sse4a.ll
+++ b/llvm/test/CodeGen/X86/sse4a.ll
@@ -1,49 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32 --check-prefix=X32-SSE
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX
define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
-; X32-LABEL: test_extrqi:
-; X32: # %bb.0:
-; X32-NEXT: extrq $2, $3, %xmm0
-; X32-NEXT: retl
-;
-; X64-LABEL: test_extrqi:
-; X64: # %bb.0:
-; X64-NEXT: extrq $2, $3, %xmm0
-; X64-NEXT: retq
+; CHECK-LABEL: test_extrqi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%1 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %x, i8 3, i8 2)
ret <2 x i64> %1
}
define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
-; X32-SSE-LABEL: test_extrqi_domain:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movdqa (%eax), %xmm0
-; X32-SSE-NEXT: extrq $2, $3, %xmm0
-; X32-SSE-NEXT: retl
-;
-; X32-AVX-LABEL: test_extrqi_domain:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT: vmovdqa (%eax), %xmm0
-; X32-AVX-NEXT: extrq $2, $3, %xmm0
-; X32-AVX-NEXT: retl
+; X86-SSE-LABEL: test_extrqi_domain:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT: movdqa (%eax), %xmm0 # encoding: [0x66,0x0f,0x6f,0x00]
+; X86-SSE-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; X86-SSE-NEXT: retl # encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_extrqi_domain:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT: vmovdqa (%eax), %xmm0 # encoding: [0xc5,0xf9,0x6f,0x00]
+; X86-AVX-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_extrqi_domain:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movdqa (%rdi), %xmm0
-; X64-SSE-NEXT: extrq $2, $3, %xmm0
-; X64-SSE-NEXT: retq
+; X64-SSE-NEXT: movdqa (%rdi), %xmm0 # encoding: [0x66,0x0f,0x6f,0x07]
+; X64-SSE-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: test_extrqi_domain:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0
-; X64-AVX-NEXT: extrq $2, $3, %xmm0
-; X64-AVX-NEXT: retq
+; X64-AVX-NEXT: vmovdqa (%rdi), %xmm0 # encoding: [0xc5,0xf9,0x6f,0x07]
+; X64-AVX-NEXT: extrq $2, $3, %xmm0 # encoding: [0x66,0x0f,0x78,0xc0,0x03,0x02]
+; X64-AVX-NEXT: retq # encoding: [0xc3]
%1 = load <2 x i64>, <2 x i64> *%p
%2 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %1, i8 3, i8 2)
ret <2 x i64> %2
@@ -52,50 +47,45 @@ define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind
define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
-; X32-LABEL: test_extrq:
-; X32: # %bb.0:
-; X32-NEXT: extrq %xmm1, %xmm0
-; X32-NEXT: retl
-;
-; X64-LABEL: test_extrq:
-; X64: # %bb.0:
-; X64-NEXT: extrq %xmm1, %xmm0
-; X64-NEXT: retq
+; CHECK-LABEL: test_extrq:
+; CHECK: # %bb.0:
+; CHECK-NEXT: extrq %xmm1, %xmm0 # encoding: [0x66,0x0f,0x79,0xc1]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%1 = bitcast <2 x i64> %y to <16 x i8>
%2 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %x, <16 x i8> %1) nounwind
ret <2 x i64> %2
}
define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
-; X32-SSE-LABEL: test_extrq_domain:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movdqa (%eax), %xmm1
-; X32-SSE-NEXT: extrq %xmm0, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: retl
-;
-; X32-AVX-LABEL: test_extrq_domain:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
-; X32-AVX-NEXT: extrq %xmm0, %xmm1
-; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
-; X32-AVX-NEXT: retl
+; X86-SSE-LABEL: test_extrq_domain:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
+; X86-SSE-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
+; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X86-SSE-NEXT: retl # encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_extrq_domain:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
+; X86-AVX-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
+; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_extrq_domain:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movdqa (%rdi), %xmm1
-; X64-SSE-NEXT: extrq %xmm0, %xmm1
-; X64-SSE-NEXT: movdqa %xmm1, %xmm0
-; X64-SSE-NEXT: retq
+; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
+; X64-SSE-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
+; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: test_extrq_domain:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
-; X64-AVX-NEXT: extrq %xmm0, %xmm1
-; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
-; X64-AVX-NEXT: retq
+; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
+; X64-AVX-NEXT: extrq %xmm0, %xmm1 # encoding: [0x66,0x0f,0x79,0xc8]
+; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-AVX-NEXT: retq # encoding: [0xc3]
%1 = load <2 x i64>, <2 x i64> *%p
%2 = bitcast <2 x i64> %y to <16 x i8>
%3 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %1, <16 x i8> %2) nounwind
@@ -105,49 +95,44 @@ define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtabl
declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind
define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
-; X32-LABEL: test_insertqi:
-; X32: # %bb.0:
-; X32-NEXT: insertq $6, $5, %xmm1, %xmm0
-; X32-NEXT: retl
-;
-; X64-LABEL: test_insertqi:
-; X64: # %bb.0:
-; X64-NEXT: insertq $6, $5, %xmm1, %xmm0
-; X64-NEXT: retq
+; CHECK-LABEL: test_insertqi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: insertq $6, $5, %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x78,0xc1,0x05,0x06]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %x, <2 x i64> %y, i8 5, i8 6)
ret <2 x i64> %1
}
define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
-; X32-SSE-LABEL: test_insertqi_domain:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movdqa (%eax), %xmm1
-; X32-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: retl
-;
-; X32-AVX-LABEL: test_insertqi_domain:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
-; X32-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1
-; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
-; X32-AVX-NEXT: retl
+; X86-SSE-LABEL: test_insertqi_domain:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
+; X86-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
+; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X86-SSE-NEXT: retl # encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_insertqi_domain:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
+; X86-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
+; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_insertqi_domain:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movdqa (%rdi), %xmm1
-; X64-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1
-; X64-SSE-NEXT: movdqa %xmm1, %xmm0
-; X64-SSE-NEXT: retq
+; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
+; X64-SSE-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
+; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: test_insertqi_domain:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
-; X64-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1
-; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
-; X64-AVX-NEXT: retq
+; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
+; X64-AVX-NEXT: insertq $6, $5, %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x78,0xc8,0x05,0x06]
+; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-AVX-NEXT: retq # encoding: [0xc3]
%1 = load <2 x i64>, <2 x i64> *%p
%2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %y, i8 5, i8 6)
ret <2 x i64> %2
@@ -156,49 +141,44 @@ define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwt
declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
-; X32-LABEL: test_insertq:
-; X32: # %bb.0:
-; X32-NEXT: insertq %xmm1, %xmm0
-; X32-NEXT: retl
-;
-; X64-LABEL: test_insertq:
-; X64: # %bb.0:
-; X64-NEXT: insertq %xmm1, %xmm0
-; X64-NEXT: retq
+; CHECK-LABEL: test_insertq:
+; CHECK: # %bb.0:
+; CHECK-NEXT: insertq %xmm1, %xmm0 # encoding: [0xf2,0x0f,0x79,0xc1]
+; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%1 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %x, <2 x i64> %y) nounwind
ret <2 x i64> %1
}
define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
-; X32-SSE-LABEL: test_insertq_domain:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT: movdqa (%eax), %xmm1
-; X32-SSE-NEXT: insertq %xmm0, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: retl
-;
-; X32-AVX-LABEL: test_insertq_domain:
-; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT: vmovdqa (%eax), %xmm1
-; X32-AVX-NEXT: insertq %xmm0, %xmm1
-; X32-AVX-NEXT: vmovdqa %xmm1, %xmm0
-; X32-AVX-NEXT: retl
+; X86-SSE-LABEL: test_insertq_domain:
+; X86-SSE: # %bb.0:
+; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-SSE-NEXT: movdqa (%eax), %xmm1 # encoding: [0x66,0x0f,0x6f,0x08]
+; X86-SSE-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
+; X86-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X86-SSE-NEXT: retl # encoding: [0xc3]
+;
+; X86-AVX-LABEL: test_insertq_domain:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
+; X86-AVX-NEXT: vmovdqa (%eax), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x08]
+; X86-AVX-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
+; X86-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: test_insertq_domain:
; X64-SSE: # %bb.0:
-; X64-SSE-NEXT: movdqa (%rdi), %xmm1
-; X64-SSE-NEXT: insertq %xmm0, %xmm1
-; X64-SSE-NEXT: movdqa %xmm1, %xmm0
-; X64-SSE-NEXT: retq
+; X64-SSE-NEXT: movdqa (%rdi), %xmm1 # encoding: [0x66,0x0f,0x6f,0x0f]
+; X64-SSE-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
+; X64-SSE-NEXT: movdqa %xmm1, %xmm0 # encoding: [0x66,0x0f,0x6f,0xc1]
+; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: test_insertq_domain:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1
-; X64-AVX-NEXT: insertq %xmm0, %xmm1
-; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0
-; X64-AVX-NEXT: retq
+; X64-AVX-NEXT: vmovdqa (%rdi), %xmm1 # encoding: [0xc5,0xf9,0x6f,0x0f]
+; X64-AVX-NEXT: insertq %xmm0, %xmm1 # encoding: [0xf2,0x0f,0x79,0xc8]
+; X64-AVX-NEXT: vmovdqa %xmm1, %xmm0 # encoding: [0xc5,0xf9,0x6f,0xc1]
+; X64-AVX-NEXT: retq # encoding: [0xc3]
%1 = load <2 x i64>, <2 x i64> *%p
%2 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %1, <2 x i64> %y) nounwind
ret <2 x i64> %2
OpenPOWER on IntegriCloud