summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/test/CodeGen/X86/avx1-logical-load-folding.ll83
1 files changed, 71 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/X86/avx1-logical-load-folding.ll b/llvm/test/CodeGen/X86/avx1-logical-load-folding.ll
index 90e00c96539..7073eb22476 100644
--- a/llvm/test/CodeGen/X86/avx1-logical-load-folding.ll
+++ b/llvm/test/CodeGen/X86/avx1-logical-load-folding.ll
@@ -1,10 +1,26 @@
-; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s
-
-target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.9.0"
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -O3 -disable-peephole -mtriple=i686-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -O3 -disable-peephole -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -mattr=+avx | FileCheck %s --check-prefix=X64
; Function Attrs: nounwind ssp uwtable
define void @test1(float* %A, float* %C) #0 {
+; X86-LABEL: test1:
+; X86: ## BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovaps (%ecx), %ymm0
+; X86-NEXT: vandps LCPI0_0, %ymm0, %ymm0
+; X86-NEXT: vmovss %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test1:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%tmp1 = bitcast float* %A to <8 x float>*
%tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
@@ -13,12 +29,27 @@ define void @test1(float* %A, float* %C) #0 {
%tmp6 = extractelement <8 x float> %tmp5, i32 0
store float %tmp6, float* %C
ret void
-
- ; CHECK: vandps LCPI0_0(%rip), %ymm0, %ymm0
}
; Function Attrs: nounwind ssp uwtable
define void @test2(float* %A, float* %C) #0 {
+; X86-LABEL: test2:
+; X86: ## BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovaps (%ecx), %ymm0
+; X86-NEXT: vorps LCPI1_0, %ymm0, %ymm0
+; X86-NEXT: vmovss %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test2:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vorps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%tmp1 = bitcast float* %A to <8 x float>*
%tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
@@ -27,12 +58,27 @@ define void @test2(float* %A, float* %C) #0 {
%tmp6 = extractelement <8 x float> %tmp5, i32 0
store float %tmp6, float* %C
ret void
-
- ; CHECK: vorps LCPI1_0(%rip), %ymm0, %ymm0
}
; Function Attrs: nounwind ssp uwtable
define void @test3(float* %A, float* %C) #0 {
+; X86-LABEL: test3:
+; X86: ## BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovaps (%ecx), %ymm0
+; X86-NEXT: vxorps LCPI2_0, %ymm0, %ymm0
+; X86-NEXT: vmovss %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test3:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%tmp1 = bitcast float* %A to <8 x float>*
%tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
@@ -41,11 +87,26 @@ define void @test3(float* %A, float* %C) #0 {
%tmp6 = extractelement <8 x float> %tmp5, i32 0
store float %tmp6, float* %C
ret void
-
- ; CHECK: vxorps LCPI2_0(%rip), %ymm0, %ymm0
}
define void @test4(float* %A, float* %C) #0 {
+; X86-LABEL: test4:
+; X86: ## BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: vmovaps (%ecx), %ymm0
+; X86-NEXT: vandnps LCPI3_0, %ymm0, %ymm0
+; X86-NEXT: vmovss %xmm0, (%eax)
+; X86-NEXT: vzeroupper
+; X86-NEXT: retl
+;
+; X64-LABEL: test4:
+; X64: ## BB#0:
+; X64-NEXT: vmovaps (%rdi), %ymm0
+; X64-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT: vmovss %xmm0, (%rsi)
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%tmp1 = bitcast float* %A to <8 x float>*
%tmp2 = load <8 x float>, <8 x float>* %tmp1, align 32
%tmp3 = bitcast <8 x float> %tmp2 to <8 x i32>
@@ -55,6 +116,4 @@ define void @test4(float* %A, float* %C) #0 {
%tmp7 = extractelement <8 x float> %tmp6, i32 0
store float %tmp7, float * %C
ret void
-
- ;CHECK: vandnps LCPI3_0(%rip), %ymm0, %ymm0
}
OpenPOWER on IntegriCloud