summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2013-10-08 05:53:50 +0000
committerCraig Topper <craig.topper@gmail.com>2013-10-08 05:53:50 +0000
commit72c8cd7bc3f4a0ac1ad6f9a0200ec032da5d7de1 (patch)
tree66bfe9fe5e1eee4f6706c7130e40508e950c9ef9
parent9a13ac9a408a6e198444c5ccbba0fae3b6e9bdb0 (diff)
downloadbcm5719-llvm-72c8cd7bc3f4a0ac1ad6f9a0200ec032da5d7de1.tar.gz
bcm5719-llvm-72c8cd7bc3f4a0ac1ad6f9a0200ec032da5d7de1.zip
Remove some instructions that existed to provide aliases to the assembler. Can be done with InstAlias instead. Unfortunately, this was causing printer to use 'vmovq' or 'vmovd' based on what was parsed. To cleanup the inconsistencies convert all 'vmovd' with 64-bit registers to 'vmovq', but provide an alias so that 'vmovd' will still parse.
llvm-svn: 192171
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td37
-rw-r--r--llvm/test/CodeGen/X86/avx-basic.ll4
-rw-r--r--llvm/test/CodeGen/X86/avx-bitcast.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx-splat.ll2
-rw-r--r--llvm/test/CodeGen/X86/mcinst-avx-lowering.ll4
-rw-r--r--llvm/test/MC/Disassembler/X86/x86-64.txt6
-rw-r--r--llvm/test/MC/X86/x86-64.s13
-rw-r--r--llvm/test/MC/X86/x86_64-avx-encoding.s6
-rw-r--r--llvm/utils/TableGen/X86RecognizableInstr.cpp4
9 files changed, 39 insertions, 39 deletions
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 3c660d17a92..0c05bd24755 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -4381,12 +4381,12 @@ def VMOVDI2PDIrm : VS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
IIC_SSE_MOVDQ>,
VEX, Sched<[WriteLoad]>;
def VMOV64toPQIrr : VRS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "movq\t{$src, $dst|$dst, $src}",
[(set VR128:$dst,
(v2i64 (scalar_to_vector GR64:$src)))],
IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "movq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert GR64:$src))],
IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
@@ -4475,7 +4475,7 @@ def : Pat<(v4i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
//
let SchedRW = [WriteMove] in {
def VMOVPQIto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "movq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
(iPTR 0)))],
IIC_SSE_MOVD_ToGP>,
@@ -4497,7 +4497,7 @@ def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
VEX, Sched<[WriteLoad]>;
def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}",
+ "movq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64:$src))],
IIC_SSE_MOVDQ>, VEX, Sched<[WriteMove]>;
def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
@@ -4549,7 +4549,7 @@ def VMOVZDI2PDIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
(v4i32 (scalar_to_vector GR32:$src)))))],
IIC_SSE_MOVDQ>, VEX;
def VMOVZQI2PQIrr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
+ "movq\t{$src, $dst|$dst, $src}", // X86-64 only
[(set VR128:$dst, (v2i64 (X86vzmovl
(v2i64 (scalar_to_vector GR64:$src)))))],
IIC_SSE_MOVDQ>,
@@ -4614,15 +4614,12 @@ let Predicates = [UseSSE2], AddedComplexity = 20 in {
def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
(MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
- (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
(MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
- (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
- (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
-def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
- (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
+// Allow "vmovd" but print "vmovq" since we don't need compatibility for AVX.
+def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
+ (VMOV64toPQIrr VR128:$dst, GR64:$src), 0>;
+def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
+ (VMOVPQIto64rr GR64:$dst, VR128:$src), 0>;
//===---------------------------------------------------------------------===//
// SSE2 - Move Quadword
@@ -4760,20 +4757,6 @@ let AddedComplexity = 20 in {
}
}
-// Instructions to match in the assembler
-let SchedRW = [WriteMove] in {
-def VMOVQs64rr : VS2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
- "movq\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVDQ>, VEX, VEX_W;
-def VMOVQd64rr : VS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVDQ>, VEX, VEX_W;
-// Recognize "movd" with GR64 destination, but encode as a "movq"
-def VMOVQd64rr_alt : VS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
- "movq\t{$src, $dst|$dst, $src}", [],
- IIC_SSE_MOVDQ>, VEX, VEX_W;
-} // SchedRW
-
//===---------------------------------------------------------------------===//
// SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
//===---------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/X86/avx-basic.ll b/llvm/test/CodeGen/X86/avx-basic.ll
index 64c4627c47c..1fd9085838d 100644
--- a/llvm/test/CodeGen/X86/avx-basic.ll
+++ b/llvm/test/CodeGen/X86/avx-basic.ll
@@ -122,10 +122,10 @@ define <16 x i16> @build_vec_16x16(i16 %a) nounwind readonly {
ret <16 x i16> %res
}
-;;; Check that VMOVPQIto64rr generates the assembly string "vmovd". Previously
+;;; Check that VMOVPQIto64rr generates the assembly string "vmovq". Previously
;;; an incorrect mnemonic of "movd" was printed for this instruction.
; CHECK: VMOVPQIto64rr
-; CHECK: vmovd
+; CHECK: vmovq
define i64 @VMOVPQIto64rr(<2 x i64> %a) {
entry:
%vecext.i = extractelement <2 x i64> %a, i32 0
diff --git a/llvm/test/CodeGen/X86/avx-bitcast.ll b/llvm/test/CodeGen/X86/avx-bitcast.ll
index ecc71be7c0d..c9d828c1f6e 100644
--- a/llvm/test/CodeGen/X86/avx-bitcast.ll
+++ b/llvm/test/CodeGen/X86/avx-bitcast.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
; CHECK: vmovsd (%
-; CHECK-NEXT: vmovd %xmm
+; CHECK-NEXT: vmovq %xmm
define i64 @bitcasti64tof64() {
%a = load double* undef
%b = bitcast double %a to i64
diff --git a/llvm/test/CodeGen/X86/avx-splat.ll b/llvm/test/CodeGen/X86/avx-splat.ll
index 5c01c2cc5b5..5d0781531f4 100644
--- a/llvm/test/CodeGen/X86/avx-splat.ll
+++ b/llvm/test/CodeGen/X86/avx-splat.ll
@@ -20,7 +20,7 @@ entry:
ret <16 x i16> %shuffle
}
-; CHECK: vmovd
+; CHECK: vmovq
; CHECK-NEXT: vmovlhps %xmm
; CHECK-NEXT: vinsertf128 $1
define <4 x i64> @funcC(i64 %q) nounwind uwtable readnone ssp {
diff --git a/llvm/test/CodeGen/X86/mcinst-avx-lowering.ll b/llvm/test/CodeGen/X86/mcinst-avx-lowering.ll
index 41f96e8856c..db72e0871c8 100644
--- a/llvm/test/CodeGen/X86/mcinst-avx-lowering.ll
+++ b/llvm/test/CodeGen/X86/mcinst-avx-lowering.ll
@@ -4,7 +4,7 @@ define i64 @t1(double %d_ivar) nounwind uwtable ssp {
entry:
; CHECK: t1
%0 = bitcast double %d_ivar to i64
-; CHECK: vmovd
+; CHECK: vmovq
; CHECK: encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
ret i64 %0
}
@@ -13,7 +13,7 @@ define double @t2(i64 %d_ivar) nounwind uwtable ssp {
entry:
; CHECK: t2
%0 = bitcast i64 %d_ivar to double
-; CHECK: vmovd
+; CHECK: vmovq
; CHECK: encoding: [0xc4,0xe1,0xf9,0x6e,0xc7]
ret double %0
}
diff --git a/llvm/test/MC/Disassembler/X86/x86-64.txt b/llvm/test/MC/Disassembler/X86/x86-64.txt
index f7e71fd15d6..b9478e81428 100644
--- a/llvm/test/MC/Disassembler/X86/x86-64.txt
+++ b/llvm/test/MC/Disassembler/X86/x86-64.txt
@@ -229,3 +229,9 @@
# CHECK: vmovq %xmm0, %xmm0
0xc5 0xfa 0x7e 0xc0
+
+# CHECK: vmovq %xmm0, %rax
+0xc4 0xe1 0xf9 0x7e 0xc0
+
+# CHECK: movd %xmm0, %rax
+0x66 0x48 0x0f 0x7e 0xc0
diff --git a/llvm/test/MC/X86/x86-64.s b/llvm/test/MC/X86/x86-64.s
index c0eac5eb42f..04a6a57d28a 100644
--- a/llvm/test/MC/X86/x86-64.s
+++ b/llvm/test/MC/X86/x86-64.s
@@ -1375,3 +1375,16 @@ fsub %st(1)
fsubr %st(1)
fdiv %st(1)
fdivr %st(1)
+
+// CHECK: movd %xmm0, %eax
+// CHECK: movd %xmm0, %rax
+// CHECK: movd %xmm0, %rax
+// CHECK: vmovd %xmm0, %eax
+// CHECK: vmovq %xmm0, %rax
+// CHECK: vmovq %xmm0, %rax
+movd %xmm0, %eax
+movd %xmm0, %rax
+movq %xmm0, %rax
+vmovd %xmm0, %eax
+vmovd %xmm0, %rax
+vmovq %xmm0, %rax
diff --git a/llvm/test/MC/X86/x86_64-avx-encoding.s b/llvm/test/MC/X86/x86_64-avx-encoding.s
index 1a4e7844511..e02cd28480b 100644
--- a/llvm/test/MC/X86/x86_64-avx-encoding.s
+++ b/llvm/test/MC/X86/x86_64-avx-encoding.s
@@ -2212,11 +2212,11 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
// CHECK: encoding: [0xc5,0x79,0x7e,0x30]
vmovd %xmm14, (%rax)
-// CHECK: vmovd %rax, %xmm14
+// CHECK: vmovq %rax, %xmm14
// CHECK: encoding: [0xc4,0x61,0xf9,0x6e,0xf0]
vmovd %rax, %xmm14
-// CHECK: vmovd %xmm0, %rax
+// CHECK: vmovq %xmm0, %rax
// CHECK: encoding: [0xc4,0xe1,0xf9,0x7e,0xc0]
vmovd %xmm0, %rax
@@ -4048,7 +4048,7 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11
// CHECK: encoding: [0xc4,0xe3,0x79,0x17,0xe1,0x07]
vextractps $7, %xmm4, %rcx
-// CHECK: vmovd %xmm4, %rcx
+// CHECK: vmovq %xmm4, %rcx
// CHECK: encoding: [0xc4,0xe1,0xf9,0x7e,0xe1]
vmovd %xmm4, %rcx
diff --git a/llvm/utils/TableGen/X86RecognizableInstr.cpp b/llvm/utils/TableGen/X86RecognizableInstr.cpp
index d3427207b3c..8a32ab18b8a 100644
--- a/llvm/utils/TableGen/X86RecognizableInstr.cpp
+++ b/llvm/utils/TableGen/X86RecognizableInstr.cpp
@@ -549,9 +549,7 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
Name == "MMX_MOVD64rrv164" ||
Name == "MOV64ri64i32" ||
Name == "VMASKMOVDQU64" ||
- Name == "VEXTRACTPSrr64" ||
- Name == "VMOVQd64rr" ||
- Name == "VMOVQs64rr")
+ Name == "VEXTRACTPSrr64")
return FILTER_WEAK;
// XACQUIRE and XRELEASE reuse REPNE and REP respectively.
OpenPOWER on IntegriCloud