summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorDale Johannesen <dalej@apple.com>2007-09-19 23:55:34 +0000
committerDale Johannesen <dalej@apple.com>2007-09-19 23:55:34 +0000
commit7d67e547b5e9342309885582e2e25e393fb80d8c (patch)
treeb7d7ac595164fd8aa6cc2401c99de4930cbc2147 /llvm/lib/Target
parentd0e360e16e4651eea3247d57e41aab0d5c93b0b6 (diff)
downloadbcm5719-llvm-7d67e547b5e9342309885582e2e25e393fb80d8c.tar.gz
bcm5719-llvm-7d67e547b5e9342309885582e2e25e393fb80d8c.zip
More long double fixes. x86_64 should build now.
llvm-svn: 42155
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/X86/X86CallingConv.td4
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp16
2 files changed, 14 insertions, 6 deletions
diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 7c555dd46f1..9c2d95a1991 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -118,8 +118,10 @@ def CC_X86_64_C : CallingConv<[
// 8-byte aligned if there are no more registers to hold them.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
+ // Long doubles get 16-byte stack slots that are 16-byte aligned.
// Vectors get 16-byte stack slots that are 16-byte aligned.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
+ CCIfType<[f80, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
+ CCAssignToStack<16, 16>>,
// __m64 vectors get 8-byte stack slots that are 8-byte aligned.
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>>
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 3b4b4f91750..bdd424fbfe1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -108,11 +108,10 @@ X86TargetLowering::X86TargetLowering(TargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
}
- if (!Subtarget->is64Bit()) {
- // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode.
- setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
- setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
- }
+ // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
+ // are Legal, f80 is custom lowered.
+ setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
+ setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
@@ -3343,6 +3342,9 @@ SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
// These are really Legal; caller falls through into that case.
if (SrcVT==MVT::i32 && Op.getValueType() != MVT::f80 && X86ScalarSSE)
return Result;
+ if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 &&
+ Subtarget->is64Bit())
+ return Result;
// Build the FILD
SDVTList Tys;
@@ -3397,6 +3399,10 @@ SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
if (Op.getValueType() == MVT::i32 && X86ScalarSSE &&
Op.getOperand(0).getValueType() != MVT::f80)
return Result;
+ if (Subtarget->is64Bit() &&
+ Op.getValueType() == MVT::i64 &&
+ Op.getOperand(0).getValueType() != MVT::f80)
+ return Result;
unsigned Opc;
switch (Op.getValueType()) {
OpenPOWER on IntegriCloud