diff options
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 5 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll | 29 |
2 files changed, 33 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 294ec69e867..6fe906de8cf 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -7572,7 +7572,10 @@ static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl, // Recurse to find a LoadSDNode source and the accumulated ByteOffest. static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) { if (ISD::isNON_EXTLoad(Elt.getNode())) { - Ld = cast<LoadSDNode>(Elt); + auto *BaseLd = cast<LoadSDNode>(Elt); + if (BaseLd->getMemOperand()->getFlags() & MachineMemOperand::MOVolatile) + return false; + Ld = BaseLd; ByteOffset = 0; return true; } diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll index c03e2267b0d..e635f6e00cd 100644 --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll @@ -666,3 +666,32 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind %resF = insertelement <16 x i16> %resE, i16 %valF, i16 15 ret <16 x i16> %resF } + +; +; Volatile tests. +; + +@l = external global <32 x i8>, align 32 + +define <2 x i8> @PR42846(<2 x i8>* %j, <2 x i8> %k) { +; AVX-LABEL: PR42846: +; AVX: # %bb.0: +; AVX-NEXT: vmovdqa {{.*}}(%rip), %ymm1 +; AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero +; AVX-NEXT: vpextrw $0, %xmm1, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq +; +; X32-AVX-LABEL: PR42846: +; X32-AVX: # %bb.0: +; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-AVX-NEXT: vmovdqa l, %ymm1 +; X32-AVX-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero +; X32-AVX-NEXT: vpextrw $0, %xmm1, (%eax) +; X32-AVX-NEXT: vzeroupper +; X32-AVX-NEXT: retl + %t0 = load volatile <32 x i8>, <32 x i8>* @l, align 32 + %shuffle = shufflevector <32 x i8> %t0, <32 x i8> undef, <2 x i32> <i32 0, i32 1> + store <2 x i8> %shuffle, <2 x i8>* %j, align 2 + ret <2 x i8> %shuffle +} |