summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll38
-rw-r--r--llvm/test/CodeGen/X86/avx512-mask-op.ll6
-rw-r--r--llvm/test/CodeGen/X86/masked_gather_scatter.ll2
-rw-r--r--llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll2
4 files changed, 43 insertions, 5 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll b/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll
new file mode 100644
index 00000000000..c255b05324b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/reduce-load-width-alignment.ll
@@ -0,0 +1,38 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}reduce_i64_load_align_4_width_to_i32:
+; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, [[VAL]]
+; GCN: buffer_store_dwordx2
+define void @reduce_i64_load_align_4_width_to_i32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+ %a = load i64, i64 addrspace(1)* %in, align 4
+ %and = and i64 %a, 1234567
+ store i64 %and, i64 addrspace(1)* %out, align 8
+ ret void
+}
+
+; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt0:
+; GCN: buffer_load_dword [[VAL:v[0-9]+]]
+; GCN: buffer_store_dword [[VAL]]
+define void @reduce_i64_align_4_bitcast_v2i32_elt0(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+ %a = load i64, i64 addrspace(1)* %in, align 4
+ %vec = bitcast i64 %a to <2 x i32>
+ %elt0 = extractelement <2 x i32> %vec, i32 0
+ store i32 %elt0, i32 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt1:
+; GCN: buffer_load_dword [[VAL:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4
+; GCN: buffer_store_dword [[VAL]]
+define void @reduce_i64_align_4_bitcast_v2i32_elt1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+ %a = load i64, i64 addrspace(1)* %in, align 4
+ %vec = bitcast i64 %a to <2 x i32>
+ %elt0 = extractelement <2 x i32> %vec, i32 1
+ store i32 %elt0, i32 addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll
index ea6a7af34ff..de0c97cf0b5 100644
--- a/llvm/test/CodeGen/X86/avx512-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll
@@ -53,7 +53,7 @@ define void @mask16_mem(i16* %ptr) {
define void @mask8_mem(i8* %ptr) {
; KNL-LABEL: mask8_mem:
; KNL: ## BB#0:
-; KNL-NEXT: movb (%rdi), %al
+; KNL-NEXT: movzbw (%rdi), %ax
; KNL-NEXT: kmovw %eax, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -951,7 +951,7 @@ define <16 x i32> @load_16i1(<16 x i1>* %a) {
define <2 x i16> @load_2i1(<2 x i1>* %a) {
; KNL-LABEL: load_2i1:
; KNL: ## BB#0:
-; KNL-NEXT: movb (%rdi), %al
+; KNL-NEXT: movzbw (%rdi), %ax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; KNL-NEXT: retq
@@ -969,7 +969,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
define <4 x i16> @load_4i1(<4 x i1>* %a) {
; KNL-LABEL: load_4i1:
; KNL: ## BB#0:
-; KNL-NEXT: movb (%rdi), %al
+; KNL-NEXT: movzbw (%rdi), %ax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqd %zmm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index ed88be0fd14..f153338aa41 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -291,7 +291,7 @@ define <8 x i32> @test7(i32* %base, <8 x i32> %ind, i8 %mask) {
; KNL_32-LABEL: test7:
; KNL_32: # BB#0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; KNL_32-NEXT: movb {{[0-9]+}}(%esp), %cl
+; KNL_32-NEXT: movzbw {{[0-9]+}}(%esp), %cx
; KNL_32-NEXT: kmovw %ecx, %k1
; KNL_32-NEXT: vpmovsxdq %ymm0, %zmm0
; KNL_32-NEXT: kmovw %k1, %k2
diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
index 3b6582fa04f..fafd796d299 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
@@ -234,7 +234,7 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s
; X32-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
; X32-AVX512F: # BB#0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX512F-NEXT: vmovdqu32 8(%eax), %zmm0
+; X32-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
; X32-AVX512F-NEXT: vpxord %zmm1, %zmm1, %zmm1
; X32-AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <0,0,u,u,2,0,u,u,4,0,13,0,u,u,7,0>
; X32-AVX512F-NEXT: vpermt2q %zmm1, %zmm2, %zmm0
OpenPOWER on IntegriCloud