diff options
| -rw-r--r-- | llvm/test/CodeGen/X86/logical-load-fold.ll | 4 | 
1 files changed, 2 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/X86/logical-load-fold.ll b/llvm/test/CodeGen/X86/logical-load-fold.ll index fa6b9e5000c..5aac2d7910c 100644 --- a/llvm/test/CodeGen/X86/logical-load-fold.ll +++ b/llvm/test/CodeGen/X86/logical-load-fold.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -mcpu=x86-64 -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2 -; RUN: llc < %s -mcpu=x86-64 -mattr=avx                    | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx                    | FileCheck %s --check-prefix=AVX  ; Although we have the ability to fold an unaligned load with AVX   ; and under special conditions with some SSE implementations, we  | 

