diff options
author | Andrea Di Biagio <Andrea_DiBiagio@sn.scee.net> | 2015-10-02 12:45:37 +0000 |
---|---|---|
committer | Andrea Di Biagio <Andrea_DiBiagio@sn.scee.net> | 2015-10-02 12:45:37 +0000 |
commit | cb334561223f931c0ecd580a50ec80ee24266842 (patch) | |
tree | 9aa1019197681c701c05d5bc65f4071e911104a5 /llvm/test/CodeGen/X86/fast-isel-bitcasts-avx.ll | |
parent | b285e9e0d2faa84e6404ddc6aa9b54e0c2981c39 (diff) | |
download | bcm5719-llvm-cb334561223f931c0ecd580a50ec80ee24266842.tar.gz bcm5719-llvm-cb334561223f931c0ecd580a50ec80ee24266842.zip |
[FastISel][x86] Teach how to select SSE2/AVX bitcasts between 128/256-bit vector types.
This patch teaches FastIsel the following two things:
1) On SSE2, no instructions are needed for bitcasts between 128-bit vector types;
2) On AVX, no instructions are needed for bitcasts between 256-bit vector types.
Example:
%1 = bitcast <4 x i31> %V to <2 x i64>
Before (-fast-isel -fast-isel-abort=1):
FastIsel miss: %1 = bitcast <4 x i31> %V to <2 x i64>
Now we don't fall back to SelectionDAG and we correctly fold that computation
propagating the register associated to %V.
Differential Revision: http://reviews.llvm.org/D13347
llvm-svn: 249121
Diffstat (limited to 'llvm/test/CodeGen/X86/fast-isel-bitcasts-avx.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/fast-isel-bitcasts-avx.ll | 244 |
1 files changed, 244 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/fast-isel-bitcasts-avx.ll b/llvm/test/CodeGen/X86/fast-isel-bitcasts-avx.ll new file mode 100644 index 00000000000..03cefbc8682 --- /dev/null +++ b/llvm/test/CodeGen/X86/fast-isel-bitcasts-avx.ll @@ -0,0 +1,244 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -fast-isel-abort=1 -asm-verbose=0 | FileCheck %s +; +; Bitcasts between 256-bit vector types are no-ops since no instruction is +; needed for the conversion. + +define <4 x i64> @v8i32_to_v4i64(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <4 x i64> @v16i16_to_v4i64(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <4 x i64> @v32i8_to_v4i64(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <4 x i64> @v4f64_to_v4i64(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <4 x i64> @v8f32_to_v4i64(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <8 x i32> @v4i64_to_v8i32(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <8 x i32> @v16i16_to_v8i32(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <8 x i32> @v32i8_to_v8i32(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <8 x i32> @v4f64_to_v8i32(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <8 x i32> @v8f32_to_v8i32(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <16 x i16> @v4i64_to_v16i16(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <16 x i16> @v8i32_to_v16i16(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <16 x i16> @v32i8_to_v16i16(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <16 x i16> @v4f64_to_v16i16(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <16 x i16> @v8f32_to_v16i16(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <32 x i8> @v16i16_to_v32i8(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <32 x i8> @v4i64_to_v32i8(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <32 x i8> @v8i32_to_v32i8(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <32 x i8> @v4f64_to_v32i8(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <32 x i8> @v8f32_to_v32i8(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <8 x float> @v32i8_to_v8f32(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <8 x float> + ret <8 x float> %1 +} + +define <8 x float> @v16i16_to_v8f32(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <8 x float> + ret <8 x float> %1 +} + +define <8 x float> @v4i64_to_v8f32(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <8 x float> + ret <8 x float> %1 +} + +define <8 x float> @v8i32_to_v8f32(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <8 x float> + ret <8 x float> %1 +} + +define <8 x float> @v4f64_to_v8f32(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <8 x float> + ret <8 x float> %1 +} + +define <4 x double> @v8f32_to_v4f64(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <4 x double> + ret <4 x double> %1 +} + +define <4 x double> @v32i8_to_v4f64(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <4 x double> + ret <4 x double> %1 +} + +define <4 x double> @v16i16_to_v4f64(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <4 x double> + ret <4 x double> %1 +} + +define <4 x double> @v4i64_to_v4f64(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <4 x double> + ret <4 x double> %1 +} + +define <4 x double> @v8i32_to_v4f64(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <4 x double> + ret <4 x double> %1 +} |