summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallLowering.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FrameLowering.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp6
-rw-r--r--llvm/lib/Target/ARM/ARMCallLowering.cpp2
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonBitTracker.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp6
-rw-r--r--llvm/lib/Target/Mips/Mips16HardFloat.cpp16
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp4
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp3
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp19
-rw-r--r--llvm/lib/Target/X86/X86CallLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp8
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h2
-rw-r--r--llvm/lib/Target/XCore/XCoreFrameLowering.cpp2
21 files changed, 49 insertions, 47 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index f781ecb8997..b2f55a7e1e0 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -219,7 +219,7 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
auto &DL = F.getParent()->getDataLayout();
ArgInfo OrigArg{VReg, Val->getType()};
- setArgFlags(OrigArg, AttributeSet::ReturnIndex, DL, F);
+ setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
SmallVector<ArgInfo, 8> SplitArgs;
splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 5cce8f92db3..8c0cdc12c8f 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -883,7 +883,7 @@ static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
static bool produceCompactUnwindFrame(MachineFunction &MF) {
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
- AttributeSet Attrs = MF.getFunction()->getAttributes();
+ AttributeList Attrs = MF.getFunction()->getAttributes();
return Subtarget.isTargetMachO() &&
!(Subtarget.getTargetLowering()->supportSwiftError() &&
Attrs.hasAttrSomewhere(Attribute::SwiftError));
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4ad23caa2c9..b16e8d33dc3 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7762,7 +7762,7 @@ SDValue
AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
std::vector<SDNode *> *Created) const {
- AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
if (isIntDivCheap(N->getValueType(0), Attr))
return SDValue(N,0); // Lower SDIV as SDIV
@@ -10794,7 +10794,7 @@ void AArch64TargetLowering::insertCopiesSplitCSR(
}
}
-bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
+bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
// Integer division on AArch64 is expensive. However, when aggressively
// optimizing for code size, we prefer to use a div instruction, as it is
// usually smaller than the alternative sequence.
@@ -10803,7 +10803,7 @@ bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
// size, because it will have to be scalarized, while the alternative code
// sequence can be performed in vector form.
bool OptSize =
- Attr.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
return OptSize && !VT.isVector();
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 2cdc754205b..b8bef0d30d7 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -402,7 +402,7 @@ public:
return AArch64::X1;
}
- bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
+ bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
bool isCheapToSpeculateCttz() const override {
return true;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
index 1317c08662f..86dc9bd9ea7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp
@@ -90,8 +90,8 @@ Function *AMDGPUIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID,
Function *F
= cast<Function>(M->getOrInsertFunction(getName(IntrID, Tys), FTy));
- AttributeSet AS = getAttributes(M->getContext(),
- static_cast<AMDGPUIntrinsic::ID>(IntrID));
+ AttributeList AS =
+ getAttributes(M->getContext(), static_cast<AMDGPUIntrinsic::ID>(IntrID));
F->setAttributes(AS);
return F;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index ca25634afdb..c89c676857f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -309,11 +309,11 @@ AMDGPUPromoteAlloca::getLocalSizeYZ(IRBuilder<> &Builder) {
= Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
- DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NoAlias);
- DispatchPtr->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
+ DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
+ DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
// Size of the dispatch packet struct.
- DispatchPtr->addDereferenceableAttr(AttributeSet::ReturnIndex, 64);
+ DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64);
Type *I32Ty = Type::getInt32Ty(Mod->getContext());
Value *CastDispatchPtr = Builder.CreateBitCast(
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index 7485aaff49c..10e144f8900 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -185,7 +185,7 @@ bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 4> SplitVTs;
ArgInfo RetInfo(VReg, Val->getType());
- setArgFlags(RetInfo, AttributeSet::ReturnIndex, DL, F);
+ setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F);
splitToValueTypes(RetInfo, SplitVTs, DL, MF.getRegInfo());
CCAssignFn *AssignFn =
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index 03c5826e8e2..172b9fba7f9 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -1381,7 +1381,7 @@ AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// Don't emit the ret/reti instruction when the naked attribute is present in
// the function being compiled.
if (MF.getFunction()->getAttributes().hasAttribute(
- AttributeSet::FunctionIndex, Attribute::Naked)) {
+ AttributeList::FunctionIndex, Attribute::Naked)) {
return Chain;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp b/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
index adcea1ea1a0..90ccecb6629 100644
--- a/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonBitTracker.cpp
@@ -74,7 +74,7 @@ HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri,
// Module::AnyPointerSize.
if (Width == 0 || Width > 64)
break;
- AttributeSet Attrs = F.getAttributes();
+ AttributeList Attrs = F.getAttributes();
if (Attrs.hasAttribute(AttrIdx, Attribute::ByVal))
continue;
InPhysReg = getNextPhysReg(InPhysReg, Width);
diff --git a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
index 8b29461554b..06fc9195fa6 100644
--- a/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -176,11 +176,11 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, const Triple &TT,
const HexagonSubtarget *
HexagonTargetMachine::getSubtargetImpl(const Function &F) const {
- AttributeSet FnAttrs = F.getAttributes();
+ AttributeList FnAttrs = F.getAttributes();
Attribute CPUAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
+ FnAttrs.getAttribute(AttributeList::FunctionIndex, "target-cpu");
Attribute FSAttr =
- FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
+ FnAttrs.getAttribute(AttributeList::FunctionIndex, "target-features");
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
? CPUAttr.getValueAsString().str()
diff --git a/llvm/lib/Target/Mips/Mips16HardFloat.cpp b/llvm/lib/Target/Mips/Mips16HardFloat.cpp
index 191006d6463..53a5d2f0ac1 100644
--- a/llvm/lib/Target/Mips/Mips16HardFloat.cpp
+++ b/llvm/lib/Target/Mips/Mips16HardFloat.cpp
@@ -405,7 +405,7 @@ static bool fixupFPReturnAndCall(Function &F, Module *M,
"__mips16_ret_dc"
};
const char *Name = Helper[RV];
- AttributeSet A;
+ AttributeList A;
Value *Params[] = {RVal};
Modified = true;
//
@@ -414,11 +414,11 @@ static bool fixupFPReturnAndCall(Function &F, Module *M,
// during call setup, the proper call lowering to the helper
// functions will take place.
//
- A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ A = A.addAttribute(C, AttributeList::FunctionIndex,
"__Mips16RetHelper");
- A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ A = A.addAttribute(C, AttributeList::FunctionIndex,
Attribute::ReadNone);
- A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ A = A.addAttribute(C, AttributeList::FunctionIndex,
Attribute::NoInline);
Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, nullptr));
CallInst::Create(F, Params, "", &I);
@@ -490,15 +490,15 @@ static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
// remove the use-soft-float attribute
//
static void removeUseSoftFloat(Function &F) {
- AttributeSet A;
+ AttributeList A;
DEBUG(errs() << "removing -use-soft-float\n");
- A = A.addAttribute(F.getContext(), AttributeSet::FunctionIndex,
+ A = A.addAttribute(F.getContext(), AttributeList::FunctionIndex,
"use-soft-float", "false");
- F.removeAttributes(AttributeSet::FunctionIndex, A);
+ F.removeAttributes(AttributeList::FunctionIndex, A);
if (F.hasFnAttribute("use-soft-float")) {
DEBUG(errs() << "still has -use-soft-float\n");
}
- F.addAttributes(AttributeSet::FunctionIndex, A);
+ F.addAttributes(AttributeList::FunctionIndex, A);
}
diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index e5942997c20..307ca6b99ff 100644
--- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -1493,7 +1493,7 @@ void NVPTXAsmPrinter::printParamName(Function::const_arg_iterator I,
void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
const DataLayout &DL = getDataLayout();
- const AttributeSet &PAL = F->getAttributes();
+ const AttributeList &PAL = F->getAttributes();
const TargetLowering *TLI = nvptxSubtarget->getTargetLowering();
Function::const_arg_iterator I, E;
unsigned paramIndex = 0;
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 8b3e49abc82..69a210f1a91 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -2315,7 +2315,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
auto PtrVT = getPointerTy(DAG.getDataLayout());
const Function *F = MF.getFunction();
- const AttributeSet &PAL = F->getAttributes();
+ const AttributeList &PAL = F->getAttributes();
const TargetLowering *TLI = STI.getTargetLowering();
SDValue Root = DAG.getRoot();
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index f48b7f3af83..e7fd4ef33e1 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -596,7 +596,7 @@ bool WebAssemblyFastISel::fastLowerArguments() {
unsigned i = 0;
for (auto const &Arg : F->args()) {
- const AttributeSet &Attrs = F->getAttributes();
+ const AttributeList &Attrs = F->getAttributes();
if (Attrs.hasAttribute(i+1, Attribute::ByVal) ||
Attrs.hasAttribute(i+1, Attribute::SwiftSelf) ||
Attrs.hasAttribute(i+1, Attribute::SwiftError) ||
@@ -746,7 +746,7 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) {
if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
return false;
- const AttributeSet &Attrs = Call->getAttributes();
+ const AttributeList &Attrs = Call->getAttributes();
if (Attrs.hasAttribute(i+1, Attribute::ByVal) ||
Attrs.hasAttribute(i+1, Attribute::SwiftSelf) ||
Attrs.hasAttribute(i+1, Attribute::SwiftError) ||
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 9c8de79866f..31a5ca1f4cc 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -258,7 +258,8 @@ bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
return true;
}
-bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
+bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
+ AttributeList Attr) const {
// The current thinking is that wasm engines will perform this optimization,
// so we can save on code size.
return true;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
index 5bc723028e6..99d3d0d558f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
@@ -58,7 +58,7 @@ class WebAssemblyTargetLowering final : public TargetLowering {
unsigned AS) const override;
bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align,
bool *Fast) const override;
- bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
+ bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index 279b4bdfabe..16e7ffa58c0 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -412,7 +412,7 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) {
if (CI->doesNotReturn()) {
if (auto *F = dyn_cast<Function>(CI->getCalledValue()))
F->removeFnAttr(Attribute::NoReturn);
- CI->removeAttribute(AttributeSet::FunctionIndex, Attribute::NoReturn);
+ CI->removeAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
}
IRBuilder<> IRB(C);
@@ -435,24 +435,25 @@ Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) {
// Because we added the pointer to the callee as first argument, all
// argument attribute indices have to be incremented by one.
- SmallVector<AttributeSet, 8> AttributesVec;
- const AttributeSet &InvokePAL = CI->getAttributes();
+ SmallVector<AttributeList, 8> AttributesVec;
+ const AttributeList &InvokePAL = CI->getAttributes();
CallSite::arg_iterator AI = CI->arg_begin();
unsigned i = 1; // Argument attribute index starts from 1
for (unsigned e = CI->getNumArgOperands(); i <= e; ++AI, ++i) {
if (InvokePAL.hasAttributes(i)) {
AttrBuilder B(InvokePAL, i);
- AttributesVec.push_back(AttributeSet::get(C, i + 1, B));
+ AttributesVec.push_back(AttributeList::get(C, i + 1, B));
}
}
// Add any return attributes.
- if (InvokePAL.hasAttributes(AttributeSet::ReturnIndex))
- AttributesVec.push_back(AttributeSet::get(C, InvokePAL.getRetAttributes()));
+ if (InvokePAL.hasAttributes(AttributeList::ReturnIndex))
+ AttributesVec.push_back(
+ AttributeList::get(C, InvokePAL.getRetAttributes()));
// Add any function attributes.
- if (InvokePAL.hasAttributes(AttributeSet::FunctionIndex))
- AttributesVec.push_back(AttributeSet::get(C, InvokePAL.getFnAttributes()));
+ if (InvokePAL.hasAttributes(AttributeList::FunctionIndex))
+ AttributesVec.push_back(AttributeList::get(C, InvokePAL.getFnAttributes()));
// Reconstruct the AttributesList based on the vector we constructed.
- AttributeSet NewCallPAL = AttributeSet::get(C, AttributesVec);
+ AttributeList NewCallPAL = AttributeList::get(C, AttributesVec);
NewCall->setAttributes(NewCallPAL);
CI->replaceAllUsesWith(NewCall);
diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index 85ebf25c3a8..ce905c9d804 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -107,7 +107,7 @@ bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Function &F = *MF.getFunction();
ArgInfo OrigArg{VReg, Val->getType()};
- setArgFlags(OrigArg, AttributeSet::ReturnIndex, DL, F);
+ setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
SmallVector<ArgInfo, 8> SplitArgs;
splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6a7c6d59db9..88b90884715 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -20781,7 +20781,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
// Check that ECX wasn't needed by an 'inreg' parameter.
FunctionType *FTy = Func->getFunctionType();
- const AttributeSet &Attrs = Func->getAttributes();
+ const AttributeList &Attrs = Func->getAttributes();
if (!Attrs.isEmpty() && !Func->isVarArg()) {
unsigned InRegCount = 0;
@@ -35801,7 +35801,7 @@ int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
return -1;
}
-bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
+bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
// Integer division on x86 is expensive. However, when aggressively optimizing
// for code size, we prefer to use a div instruction, as it is usually smaller
// than the alternative sequence.
@@ -35809,8 +35809,8 @@ bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
// integer division, leaving the division as-is is a loss even in terms of
// size, because it will have to be scalarized, while the alternative code
// sequence can be performed in vector form.
- bool OptSize = Attr.hasAttribute(AttributeSet::FunctionIndex,
- Attribute::MinSize);
+ bool OptSize =
+ Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
return OptSize && !VT.isVector();
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 60dfe6bcdec..ff6c1275c0e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1046,7 +1046,7 @@ namespace llvm {
/// \brief Customize the preferred legalization strategy for certain types.
LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
- bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
+ bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
bool supportSwiftError() const override;
diff --git a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
index e0e2e031996..a752357400b 100644
--- a/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -238,7 +238,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF,
report_fatal_error("emitPrologue unsupported alignment: "
+ Twine(MFI.getMaxAlignment()));
- const AttributeSet &PAL = MF.getFunction()->getAttributes();
+ const AttributeList &PAL = MF.getFunction()->getAttributes();
if (PAL.hasAttrSomewhere(Attribute::Nest))
BuildMI(MBB, MBBI, dl, TII.get(XCore::LDWSP_ru6), XCore::R11).addImm(0);
// FIX: Needs addMemOperand() but can't use getFixedStack() or getStack().
OpenPOWER on IntegriCloud