summaryrefslogtreecommitdiffstats
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
authorManman Ren <mren@apple.com>2012-10-16 19:01:37 +0000
committerManman Ren <mren@apple.com>2012-10-16 19:01:37 +0000
commitcca54d04607b5ad356d9c8babcfb4898ac75d72f (patch)
treea55fd06c0bf27c98be36ae476d251776d189d088 /clang/lib/CodeGen
parent282ad876bd89a7d7ce698ab4128dc8f0a18105cd (diff)
downloadbcm5719-llvm-cca54d04607b5ad356d9c8babcfb4898ac75d72f.tar.gz
bcm5719-llvm-cca54d04607b5ad356d9c8babcfb4898ac75d72f.zip
ARM ABI: fix ABI alignment issues when passing legal vector types as varargs.
We create an aligned temporary space and copy the content over from ap.cur to the temporary space. This is necessary if the natural alignment of the type is greater than the ABI alignment. rdar://12439123 llvm-svn: 166040
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp44
1 files changed, 38 insertions, 6 deletions
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index 75cc5a37a57..b43d33198fe 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -3169,27 +3169,59 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- // Handle address alignment for type alignment > 32 bits
+
+ uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ if (Ty->getAs<VectorType>()) {
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS)
+ TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
+ else
+ TyAlign = 4;
+ }
+
+ // Handle address alignment for ABI alignment > 4 bytes.
if (TyAlign > 4) {
assert((TyAlign & (TyAlign - 1)) == 0 &&
"Alignment is not power of 2!");
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
}
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::RoundUpToAlignment(Size, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ if (Ty->getAs<VectorType>() &&
+ (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8)) {
+ // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
+ // may not be correctly aligned for the vector type. We create an aligned
+ // temporary space and copy the content over from ap.cur to the temporary
+ // space. This is necessary if the natural alignment of the type is greater
+ // than the ABI alignment.
+ llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
+ llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
+ "var.align");
+ llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
+ llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
+ Builder.CreateMemCpy(Dst, Src,
+ llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
+ TyAlign, false);
+ Addr = AlignedTemp; //The content is in aligned location.
+ }
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
return AddrTyped;
}
OpenPOWER on IntegriCloud