diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll | 29 |
1 files changed, 16 insertions, 13 deletions
diff --git a/llvm/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll b/llvm/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll index d885f1cd364..d3a12862a9e 100644 --- a/llvm/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/x86-upgrade-avx-vbroadcast.ll @@ -1,41 +1,44 @@ -; RUN: llc -mattr=+avx < %s | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9.0 -mattr=+avx | FileCheck %s ; Check that we properly upgrade the AVX vbroadcast intrinsics to IR. The ; expectation is that we should still get the original instruction back that ; maps to the intrinsic. target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-apple-macosx10.9.0" -; CHECK-LABEL: test_mm_broadcast_ss: define <4 x float> @test_mm_broadcast_ss(float* readonly %__a){ +; CHECK-LABEL: test_mm_broadcast_ss: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 +; CHECK-NEXT: retq entry: %0 = bitcast float* %__a to i8* -; CHECK: vbroadcastss (%{{.*}}), %xmm %1 = tail call <4 x float> @llvm.x86.avx.vbroadcast.ss(i8* %0) ret <4 x float> %1 } +declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*) -; CHECK-LABEL: test_mm256_broadcast_sd: define <4 x double> @test_mm256_broadcast_sd(double* readonly %__a) { +; CHECK-LABEL: test_mm256_broadcast_sd: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 +; CHECK-NEXT: retq entry: %0 = bitcast double* %__a to i8* -; CHECK: vbroadcastsd (%{{.*}}), %ymm %1 = tail call <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8* %0) ret <4 x double> %1 } +declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*) -; CHECK-LABEL: test_mm256_broadcast_ss: define <8 x float> @test_mm256_broadcast_ss(float* readonly %__a) { +; CHECK-LABEL: test_mm256_broadcast_ss: +; CHECK: ## BB#0: ## %entry +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 +; CHECK-NEXT: retq entry: %0 = bitcast float* %__a to i8* -; CHECK: vbroadcastss (%{{.*}}), %ymm %1 = tail call <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8* %0) ret <8 x float> %1 } - -declare <8 x float> @llvm.x86.avx.vbroadcast.ss.256(i8*) - -declare <4 x double> @llvm.x86.avx.vbroadcast.sd.256(i8*) - declare <4 x float> @llvm.x86.avx.vbroadcast.ss(i8*) |