diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-07-17 23:26:20 +0000 | 
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-07-17 23:26:20 +0000 | 
| commit | a29f58dc3168927bde806d107e8fbf2b01ce17f8 (patch) | |
| tree | 0e8bea31f237d6300a4d36fadcbb995aa26fbe0d /llvm | |
| parent | 27242c0402f0cf5280ea5aba05d8f3a71e2e57fd (diff) | |
| download | bcm5719-llvm-a29f58dc3168927bde806d107e8fbf2b01ce17f8.tar.gz bcm5719-llvm-a29f58dc3168927bde806d107e8fbf2b01ce17f8.zip  | |
[X86] Remove the vector alignment requirement from the patterns added in r337320.
The resulting instruction will only load 64 bits so alignment isn't required.
llvm-svn: 337334
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-sse1.ll | 4 | 
2 files changed, 6 insertions, 4 deletions
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 3797d91fb31..c8ad7d9eabb 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -725,7 +725,8 @@ let Predicates = [UseSSE1] in {    // This pattern helps select MOVLPS on SSE1 only targets. With SSE2 we'll    // end up with a movsd or bleand instead of shufp. -  def : Pat<(X86Shufp (memopv4f32 addr:$src2), VR128:$src1, (i8 -28)), +  // No need for aligned load, we're only loading 64-bits. +  def : Pat<(X86Shufp (loadv4f32 addr:$src2), VR128:$src1, (i8 -28)),              (MOVLPSrm VR128:$src1, addr:$src2)>;  } @@ -801,7 +802,8 @@ let Predicates = [UseSSE1] in {    // This pattern helps select MOVHPS on SSE1 only targets. With SSE2 we'll    // end up with a movsd or bleand instead of shufp. -  def : Pat<(X86Movlhps VR128:$src1, (memopv4f32 addr:$src2)), +  // No need for aligned load, we're only loading 64-bits. +  def : Pat<(X86Movlhps VR128:$src1, (loadv4f32 addr:$src2)),              (MOVHPSrm VR128:$src1, addr:$src2)>;  } diff --git a/llvm/test/CodeGen/X86/vector-shuffle-sse1.ll b/llvm/test/CodeGen/X86/vector-shuffle-sse1.ll index dda46e062d5..eb0f0b043e2 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-sse1.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-sse1.ll @@ -280,7 +280,7 @@ define <4 x float> @shuffle_mem_v4f32_0145(<4 x float> %a, <4 x float>* %pb) {  ; SSE1:       # %bb.0:  ; SSE1-NEXT:    movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]  ; SSE1-NEXT:    retq -  %b = load <4 x float>, <4 x float>* %pb, align 16 +  %b = load <4 x float>, <4 x float>* %pb, align 1    %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>    ret <4 x float> %shuffle  } @@ -300,7 +300,7 @@ define <4 x float> @shuffle_mem_v4f32_4523(<4 x float> %a, <4 x float>* %pb) {  ; SSE1:       # %bb.0:  ; SSE1-NEXT:    movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]  ; SSE1-NEXT:    retq -  %b = load <4 x float>, <4 x float>* %pb, align 16 +  %b = load <4 x float>, <4 x float>* %pb, align 1    %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 5, i32 2, i32 3>    ret <4 x float> %shuffle  }  | 

