summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CallLowering.cpp167
-rw-r--r--llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp10
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp17
-rw-r--r--llvm/lib/Target/AArch64/AArch64CallLowering.cpp15
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMCallLowering.cpp11
-rw-r--r--llvm/lib/Target/Mips/MipsCallLowering.cpp4
-rw-r--r--llvm/lib/Target/X86/X86CallLowering.cpp7
8 files changed, 179 insertions, 55 deletions
diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
index 5e08361fc05..d433155160b 100644
--- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -11,8 +11,9 @@
///
//===----------------------------------------------------------------------===//
-#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
@@ -71,29 +72,30 @@ template <typename FuncInfoTy>
void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
const DataLayout &DL,
const FuncInfoTy &FuncInfo) const {
+ auto &Flags = Arg.Flags[0];
const AttributeList &Attrs = FuncInfo.getAttributes();
if (Attrs.hasAttribute(OpIdx, Attribute::ZExt))
- Arg.Flags.setZExt();
+ Flags.setZExt();
if (Attrs.hasAttribute(OpIdx, Attribute::SExt))
- Arg.Flags.setSExt();
+ Flags.setSExt();
if (Attrs.hasAttribute(OpIdx, Attribute::InReg))
- Arg.Flags.setInReg();
+ Flags.setInReg();
if (Attrs.hasAttribute(OpIdx, Attribute::StructRet))
- Arg.Flags.setSRet();
+ Flags.setSRet();
if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf))
- Arg.Flags.setSwiftSelf();
+ Flags.setSwiftSelf();
if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError))
- Arg.Flags.setSwiftError();
+ Flags.setSwiftError();
if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
- Arg.Flags.setByVal();
+ Flags.setByVal();
if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
- Arg.Flags.setInAlloca();
+ Flags.setInAlloca();
- if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) {
+ if (Flags.isByVal() || Flags.isInAlloca()) {
Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
- Arg.Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
+ Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
@@ -102,11 +104,11 @@ void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2);
else
FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL);
- Arg.Flags.setByValAlign(FrameAlign);
+ Flags.setByValAlign(FrameAlign);
}
if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
- Arg.Flags.setNest();
- Arg.Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty));
+ Flags.setNest();
+ Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty));
}
template void
@@ -161,7 +163,7 @@ void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
}
bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
- ArrayRef<ArgInfo> Args,
+ SmallVectorImpl<ArgInfo> &Args,
ValueHandler &Handler) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
@@ -173,7 +175,7 @@ bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
bool CallLowering::handleAssignments(CCState &CCInfo,
SmallVectorImpl<CCValAssign> &ArgLocs,
MachineIRBuilder &MIRBuilder,
- ArrayRef<ArgInfo> Args,
+ SmallVectorImpl<ArgInfo> &Args,
ValueHandler &Handler) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
@@ -182,14 +184,101 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
unsigned NumArgs = Args.size();
for (unsigned i = 0; i != NumArgs; ++i) {
MVT CurVT = MVT::getVT(Args[i].Ty);
- if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) {
- // Try to use the register type if we couldn't assign the VT.
- if (!Handler.isIncomingArgumentHandler() || !CurVT.isValid())
+ if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i],
+ Args[i].Flags[0], CCInfo)) {
+ if (!CurVT.isValid())
return false;
- CurVT = TLI->getRegisterTypeForCallingConv(
+ MVT NewVT = TLI->getRegisterTypeForCallingConv(
F.getContext(), F.getCallingConv(), EVT(CurVT));
- if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo))
- return false;
+
+ // If we need to split the type over multiple regs, check it's a scenario
+ // we currently support.
+ unsigned NumParts = TLI->getNumRegistersForCallingConv(
+ F.getContext(), F.getCallingConv(), CurVT);
+ if (NumParts > 1) {
+ if (CurVT.isVector())
+ return false;
+ // For now only handle exact splits.
+ if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits())
+ return false;
+ }
+
+ // For incoming arguments (return values), we could have values in
+ // physregs (or memlocs) which we want to extract and copy to vregs.
+ // During this, we might have to deal with the LLT being split across
+ // multiple regs, so we have to record this information for later.
+ //
+ // If we have outgoing args, then we have the opposite case. We have a
+ // vreg with an LLT which we want to assign to a physical location, and
+ // we might have to record that the value has to be split later.
+ if (Handler.isIncomingArgumentHandler()) {
+ if (NumParts == 1) {
+ // Try to use the register type if we couldn't assign the VT.
+ if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
+ Args[i].Flags[0], CCInfo))
+ return false;
+ } else {
+ // We're handling an incoming arg which is split over multiple regs.
+ // E.g. returning an s128 on AArch64.
+ ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
+ Args[i].OrigRegs.push_back(Args[i].Regs[0]);
+ Args[i].Regs.clear();
+ Args[i].Flags.clear();
+ LLT NewLLT = getLLTForMVT(NewVT);
+ // For each split register, create and assign a vreg that will store
+ // the incoming component of the larger value. These will later be
+ // merged to form the final vreg.
+ for (unsigned Part = 0; Part < NumParts; ++Part) {
+ Register Reg =
+ MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT);
+ ISD::ArgFlagsTy Flags = OrigFlags;
+ if (Part == 0) {
+ Flags.setSplit();
+ } else {
+ Flags.setOrigAlign(1);
+ if (Part == NumParts - 1)
+ Flags.setSplitEnd();
+ }
+ Args[i].Regs.push_back(Reg);
+ Args[i].Flags.push_back(Flags);
+ if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
+ Args[i].Flags[Part], CCInfo)) {
+ // Still couldn't assign this smaller part type for some reason.
+ return false;
+ }
+ }
+ }
+ } else {
+ // Handling an outgoing arg that might need to be split.
+ if (NumParts < 2)
+ return false; // Don't know how to deal with this type combination.
+
+ // This type is passed via multiple registers in the calling convention.
+ // We need to extract the individual parts.
+ Register LargeReg = Args[i].Regs[0];
+ LLT SmallTy = LLT::scalar(NewVT.getSizeInBits());
+ auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg);
+ assert(Unmerge->getNumOperands() == NumParts + 1);
+ ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
+ // We're going to replace the regs and flags with the split ones.
+ Args[i].Regs.clear();
+ Args[i].Flags.clear();
+ for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
+ ISD::ArgFlagsTy Flags = OrigFlags;
+ if (PartIdx == 0) {
+ Flags.setSplit();
+ } else {
+ Flags.setOrigAlign(1);
+ if (PartIdx == NumParts - 1)
+ Flags.setSplitEnd();
+ }
+ Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
+ Args[i].Flags.push_back(Flags);
+ if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
+ Args[i].Flags[PartIdx], CCInfo))
+ return false;
+ }
+ }
}
}
@@ -204,9 +293,6 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
continue;
}
- assert(Args[i].Regs.size() == 1 &&
- "Can't handle multiple virtual regs yet");
-
// FIXME: Pack registers if we have more than one.
Register ArgReg = Args[i].Regs[0];
@@ -214,8 +300,25 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
MVT OrigVT = MVT::getVT(Args[i].Ty);
MVT VAVT = VA.getValVT();
if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) {
- if (VAVT.getSizeInBits() < OrigVT.getSizeInBits())
- return false; // Can't handle this type of arg yet.
+ if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
+ // Expected to be multiple regs for a single incoming arg.
+ unsigned NumArgRegs = Args[i].Regs.size();
+ if (NumArgRegs < 2)
+ return false;
+
+ assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
+ "Too many regs for number of args");
+ for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
+ // There should be Regs.size() ArgLocs per argument.
+ VA = ArgLocs[j + Part];
+ Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
+ }
+ j += NumArgRegs - 1;
+ // Merge the split registers into the expected larger result vreg
+ // of the original call.
+ MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
+ continue;
+ }
const LLT VATy(VAVT);
Register NewReg =
MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
@@ -236,6 +339,16 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
} else {
MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
}
+ } else if (!Handler.isIncomingArgumentHandler()) {
+ assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() &&
+ "Too many regs for number of args");
+ // This is an outgoing argument that might have been split.
+ for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) {
+ // There should be Regs.size() ArgLocs per argument.
+ VA = ArgLocs[j + Part];
+ Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
+ }
+ j += Args[i].Regs.size() - 1;
} else {
Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
}
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index b1bf8258119..da8898af8ef 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -419,16 +419,6 @@ static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
return MF.getFunction().hasOptSize();
}
-// Get a rough equivalent of an MVT for a given LLT.
-static MVT getMVTForLLT(LLT Ty) {
- if (!Ty.isVector())
- return MVT::getIntegerVT(Ty.getSizeInBits());
-
- return MVT::getVectorVT(
- MVT::getIntegerVT(Ty.getElementType().getSizeInBits()),
- Ty.getNumElements());
-}
-
// Returns a list of types to use for memory op lowering in MemOps. A partial
// port of findOptimalMemOpLowering in TargetLowering.
static bool findGISelOptimalMemOpLowering(
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index be09db16d83..a93e5153089 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -412,3 +412,20 @@ Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const unsigned Op1,
void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
AU.addPreserved<StackProtector>();
}
+
+MVT llvm::getMVTForLLT(LLT Ty) {
+ if (!Ty.isVector())
+ return MVT::getIntegerVT(Ty.getSizeInBits());
+
+ return MVT::getVectorVT(
+ MVT::getIntegerVT(Ty.getElementType().getSizeInBits()),
+ Ty.getNumElements());
+}
+
+LLT llvm::getLLTForMVT(MVT Ty) {
+ if (!Ty.isVector())
+ return LLT::scalar(Ty.getSizeInBits());
+
+ return LLT::vector(Ty.getVectorNumElements(),
+ Ty.getVectorElementType().getSizeInBits());
+}
diff --git a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
index e389aaaeb71..9ace33b3985 100644
--- a/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CallLowering.cpp
@@ -174,12 +174,13 @@ struct OutgoingArgHandler : public CallLowering::ValueHandler {
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
const CallLowering::ArgInfo &Info,
+ ISD::ArgFlagsTy Flags,
CCState &State) override {
bool Res;
if (Info.IsFixed)
- Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
else
- Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
StackSize = State.getNextStackOffset();
return Res;
@@ -208,7 +209,7 @@ void AArch64CallLowering::splitToValueTypes(
// No splitting to do, but we want to replace the original type (e.g. [1 x
// double] -> double).
SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
- OrigArg.Flags, OrigArg.IsFixed);
+ OrigArg.Flags[0], OrigArg.IsFixed);
return;
}
@@ -219,13 +220,13 @@ void AArch64CallLowering::splitToValueTypes(
OrigArg.Ty, CallConv, false);
for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
- SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags,
+ SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
OrigArg.IsFixed);
if (NeedsRegBlock)
- SplitArgs.back().Flags.setInConsecutiveRegs();
+ SplitArgs.back().Flags[0].setInConsecutiveRegs();
}
- SplitArgs.back().Flags.setInConsecutiveRegsLast();
+ SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
}
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
@@ -419,7 +420,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
splitToValueTypes(OrigArg, SplitArgs, DL, MRI, Info.CallConv);
// AAPCS requires that we zero-extend i1 to 8 bits by the caller.
if (OrigArg.Ty->isIntegerTy(1))
- SplitArgs.back().Flags.setZExt();
+ SplitArgs.back().Flags[0].setZExt();
}
// Find out which ABI gets to decide where things go.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index 6b194b1c61b..df235cb0f8f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -64,8 +64,9 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
const CallLowering::ArgInfo &Info,
+ ISD::ArgFlagsTy Flags,
CCState &State) override {
- return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
}
};
diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp
index 3f1c7d41342..52dba006be0 100644
--- a/llvm/lib/Target/ARM/ARMCallLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp
@@ -169,8 +169,9 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- const CallLowering::ArgInfo &Info, CCState &State) override {
- if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State))
+ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+ CCState &State) override {
+ if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State))
return true;
StackSize =
@@ -199,7 +200,7 @@ void ARMCallLowering::splitToValueTypes(const ArgInfo &OrigArg,
if (SplitVTs.size() == 1) {
// Even if there is no splitting to do, we still want to replace the
// original type (e.g. pointer type -> integer).
- auto Flags = OrigArg.Flags;
+ auto Flags = OrigArg.Flags[0];
unsigned OriginalAlignment = DL.getABITypeAlignment(OrigArg.Ty);
Flags.setOrigAlign(OriginalAlignment);
SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
@@ -211,7 +212,7 @@ void ARMCallLowering::splitToValueTypes(const ArgInfo &OrigArg,
for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) {
EVT SplitVT = SplitVTs[i];
Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
- auto Flags = OrigArg.Flags;
+ auto Flags = OrigArg.Flags[0];
unsigned OriginalAlignment = DL.getABITypeAlignment(SplitTy);
Flags.setOrigAlign(OriginalAlignment);
@@ -547,7 +548,7 @@ bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &
if (!Arg.IsFixed)
IsVarArg = true;
- if (Arg.Flags.isByVal())
+ if (Arg.Flags[0].isByVal())
return false;
splitToValueTypes(Arg, ArgInfos, MF);
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index 86913653bee..0d0e446fee9 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -508,7 +508,7 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
for (auto &Arg : Info.OrigArgs) {
if (!isSupportedType(Arg.Ty))
return false;
- if (Arg.Flags.isByVal() || Arg.Flags.isSRet())
+ if (Arg.Flags[0].isByVal() || Arg.Flags[0].isSRet())
return false;
}
@@ -641,7 +641,7 @@ void MipsCallLowering::subTargetRegTypeForCallingConv(
F.getContext(), F.getCallingConv(), VT);
for (unsigned i = 0; i < NumRegs; ++i) {
- ISD::ArgFlagsTy Flags = Arg.Flags;
+ ISD::ArgFlagsTy Flags = Arg.Flags[0];
if (i == 0)
Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));
diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp
index d559b2e2016..1dddf7878bc 100644
--- a/llvm/lib/Target/X86/X86CallLowering.cpp
+++ b/llvm/lib/Target/X86/X86CallLowering.cpp
@@ -155,8 +155,9 @@ struct OutgoingValueHandler : public CallLowering::ValueHandler {
bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
- const CallLowering::ArgInfo &Info, CCState &State) override {
- bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
+ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
+ CCState &State) override {
+ bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
StackSize = State.getNextStackOffset();
static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2,
@@ -405,7 +406,7 @@ bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
for (const auto &OrigArg : Info.OrigArgs) {
// TODO: handle not simple cases.
- if (OrigArg.Flags.isByVal())
+ if (OrigArg.Flags[0].isByVal())
return false;
if (OrigArg.Regs.size() > 1)
OpenPOWER on IntegriCloud