diff options
| author | Evan Cheng <evan.cheng@apple.com> | 2008-05-10 00:59:18 +0000 | 
|---|---|---|
| committer | Evan Cheng <evan.cheng@apple.com> | 2008-05-10 00:59:18 +0000 | 
| commit | da2587cedca070c2eb233cd74a6e67c517877aae (patch) | |
| tree | ff36e8ce558ec3154fb99a45072a14f6b51273d9 /llvm/lib/Target | |
| parent | bb48d55a88da0eed534043fa2196f838e0f2b480 (diff) | |
| download | bcm5719-llvm-da2587cedca070c2eb233cd74a6e67c517877aae.tar.gz bcm5719-llvm-da2587cedca070c2eb233cd74a6e67c517877aae.zip | |
Some clean up.
llvm-svn: 50929
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrSSE.td | 34 | 
1 files changed, 18 insertions, 16 deletions
| diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index adb6399e2c7..0d0c1a528fa 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -675,20 +675,21 @@ let Constraints = "$src1 = $dst" in {      def MOVLPSrm : PSI<0x12, MRMSrcMem,                         (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),                         "movlps\t{$src2, $dst|$dst, $src2}", -                       [(set VR128:$dst,  -                         (v4f32 (vector_shuffle VR128:$src1, -                         (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))), -                                 MOVLP_shuffle_mask)))]>; +       [(set VR128:$dst,  +             (v4f32 (vector_shuffle VR128:$src1, +                     (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))), +                     MOVLP_shuffle_mask)))]>;      def MOVHPSrm : PSI<0x16, MRMSrcMem,                         (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),                         "movhps\t{$src2, $dst|$dst, $src2}", -                       [(set VR128:$dst,  -                         (v4f32 (vector_shuffle VR128:$src1, -                         (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))), -                                 MOVHP_shuffle_mask)))]>; +       [(set VR128:$dst,  +             (v4f32 (vector_shuffle VR128:$src1, +                     (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))), +                     MOVHP_shuffle_mask)))]>;    } // AddedComplexity  } // Constraints = "$src1 = $dst" +  def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),                     "movlps\t{$src, $dst|$dst, $src}",                     [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)), @@ -2265,16 +2266,17 @@ def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),  // Move to lower bits of a VR128 and zeroing upper bits.  // Loading from memory automatically zeroing upper bits. -let AddedComplexity = 20 in -  def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), -                        "movsd\t{$src, $dst|$dst, $src}", -                        [(set VR128:$dst, -                          (v2f64 (X86vzmovl (v2f64 (scalar_to_vector -                                                   (loadf64 addr:$src))))))]>; +let AddedComplexity = 20 in { +def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), +                      "movsd\t{$src, $dst|$dst, $src}", +                      [(set VR128:$dst, +                        (v2f64 (X86vzmovl (v2f64 (scalar_to_vector +                                                 (loadf64 addr:$src))))))]>;  def : Pat<(v2f64 (X86vzmovl (memopv2f64 addr:$src))), -          (MOVZSD2PDrm addr:$src)>; +            (MOVZSD2PDrm addr:$src)>;  def : Pat<(v2f64 (X86vzload addr:$src)), (MOVZSD2PDrm addr:$src)>; +}  // movd / movq to XMM register zero-extends  let AddedComplexity = 15 in { @@ -2301,9 +2303,9 @@ def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),                         (v2i64 (X86vzmovl (v2i64 (scalar_to_vector                                                   (loadi64 addr:$src))))))]>, XS,                     Requires<[HasSSE2]>; -}  def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>; +}  // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in  // IA32 document. movq xmm1, xmm2 does clear the high bits. | 

