summaryrefslogtreecommitdiffstats
path: root/clang/lib
diff options
context:
space:
mode:
authorYaxun Liu <Yaxun.Liu@amd.com>2018-04-25 01:10:37 +0000
committerYaxun Liu <Yaxun.Liu@amd.com>2018-04-25 01:10:37 +0000
commit887c569bcb83115fce7ee768d92c93010fe49b47 (patch)
treeb625a958bf24d8b4c60b58208fadace4316d4998 /clang/lib
parent7282d320b7c9729cd91c5aa23d39629577e92c65 (diff)
downloadbcm5719-llvm-887c569bcb83115fce7ee768d92c93010fe49b47.tar.gz
bcm5719-llvm-887c569bcb83115fce7ee768d92c93010fe49b47.zip
[HIP] Add hip input kind and codegen for kernel launching
HIP is a language similar to CUDA (https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_kernel_language.md ). The language syntax is very similar, which allows a hip program to be compiled as a CUDA program by Clang. The main difference is the host API. HIP has a set of vendor neutral host API which can be implemented on different platforms. Currently there is open source implementation of HIP runtime on amdgpu target (https://github.com/ROCm-Developer-Tools/HIP). This patch adds support of input kind and language standard hip. When hip file is compiled, both LangOpts.CUDA and LangOpts.HIP is turned on. This allows compilation of hip program as CUDA in most cases and only special handling of hip program is needed LangOpts.HIP is checked. This patch also adds support of kernel launching of HIP program using HIP host API. When -x hip is not specified, there is no behaviour change for CUDA. Patch by Greg Rodgers. Revised and lit test added by Yaxun Liu. Differential Revision: https://reviews.llvm.org/D44984 llvm-svn: 330790
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp58
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp13
-rw-r--r--clang/lib/Frontend/FrontendActions.cpp1
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp4
-rw-r--r--clang/lib/Sema/SemaCUDA.cpp5
-rw-r--r--clang/lib/Sema/SemaDecl.cpp6
6 files changed, 65 insertions, 22 deletions
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index 641405aa5b4..e9e5483e5ac 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -55,6 +55,8 @@ private:
llvm::FunctionType *getRegisterGlobalsFnTy() const;
llvm::FunctionType *getCallbackFnTy() const;
llvm::FunctionType *getRegisterLinkedBinaryFnTy() const;
+ std::string addPrefixToName(StringRef FuncName) const;
+ std::string addUnderscoredPrefixToName(StringRef FuncName) const;
/// Creates a function to register all kernel stubs generated in this module.
llvm::Function *makeRegisterGlobalsFn();
@@ -114,6 +116,18 @@ public:
}
+std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
+ if (CGM.getLangOpts().HIP)
+ return ((Twine("hip") + Twine(FuncName)).str());
+ return ((Twine("cuda") + Twine(FuncName)).str());
+}
+std::string
+CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
+ if (CGM.getLangOpts().HIP)
+ return ((Twine("__hip") + Twine(FuncName)).str());
+ return ((Twine("__cuda") + Twine(FuncName)).str());
+}
+
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()),
@@ -133,15 +147,21 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
// cudaError_t cudaSetupArgument(void *, size_t, size_t)
llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
- Params, false),
- "cudaSetupArgument");
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, Params, false),
+ addPrefixToName("SetupArgument"));
}
llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
- // cudaError_t cudaLaunch(char *)
- return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
+ if (CGM.getLangOpts().HIP) {
+ // hipError_t hipLaunchByPtr(char *);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
+ } else {
+ // cudaError_t cudaLaunch(char *);
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
+ }
}
llvm::FunctionType *CGNVCUDARuntime::getRegisterGlobalsFnTy() const {
@@ -222,7 +242,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
llvm::Function *RegisterKernelsFunc = llvm::Function::Create(
getRegisterGlobalsFnTy(), llvm::GlobalValue::InternalLinkage,
- "__cuda_register_globals", &TheModule);
+ addUnderscoredPrefixToName("_register_globals"), &TheModule);
llvm::BasicBlock *EntryBB =
llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc);
CGBuilderTy Builder(CGM, Context);
@@ -235,7 +255,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
llvm::Constant *RegisterFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
- "__cudaRegisterFunction");
+ addUnderscoredPrefixToName("RegisterFunction"));
// Extract GpuBinaryHandle passed as the first argument passed to
// __cuda_register_globals() and generate __cudaRegisterFunction() call for
@@ -259,7 +279,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
IntTy, IntTy};
llvm::Constant *RegisterVar = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterVarParams, false),
- "__cudaRegisterVar");
+ addUnderscoredPrefixToName("RegisterVar"));
for (auto &Pair : DeviceVars) {
llvm::GlobalVariable *Var = Pair.first;
unsigned Flags = Pair.second;
@@ -305,7 +325,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// void ** __cudaRegisterFatBinary(void *);
llvm::Constant *RegisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
- "__cudaRegisterFatBinary");
+ addUnderscoredPrefixToName("RegisterFatBinary"));
// struct { int magic, int version, void * gpu_binary, void * dont_care };
llvm::StructType *FatbinWrapperTy =
llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
@@ -324,7 +344,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
llvm::Function *ModuleCtorFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
- llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule);
+ llvm::GlobalValue::InternalLinkage,
+ addUnderscoredPrefixToName("_module_ctor"), &TheModule);
llvm::BasicBlock *CtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc);
CGBuilderTy CtorBuilder(CGM, Context);
@@ -357,7 +378,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// Unused in fatbin v1.
Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
- "__cuda_fatbin_wrapper", CGM.getPointerAlign(),
+ addUnderscoredPrefixToName("_fatbin_wrapper"), CGM.getPointerAlign(),
/*constant*/ true);
FatbinWrapper->setSection(FatbinSectionName);
@@ -370,7 +391,9 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
GpuBinaryHandle = new llvm::GlobalVariable(
TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
- llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
+ llvm::ConstantPointerNull::get(VoidPtrPtrTy),
+ addUnderscoredPrefixToName("_gpubin_handle"));
+
CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
CGM.getPointerAlign());
@@ -392,7 +415,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// void __cudaRegisterLinkedBinary%NVModuleID%(void (*)(void *), void *,
// void *, void (*)(void **))
- SmallString<128> RegisterLinkedBinaryName("__cudaRegisterLinkedBinary");
+ SmallString<128> RegisterLinkedBinaryName(
+ addUnderscoredPrefixToName("RegisterLinkedBinary"));
RegisterLinkedBinaryName += NVModuleID;
llvm::Constant *RegisterLinkedBinaryFunc = CGM.CreateRuntimeFunction(
getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
@@ -424,11 +448,13 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
// void __cudaUnregisterFatBinary(void ** handle);
llvm::Constant *UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
- "__cudaUnregisterFatBinary");
+ addUnderscoredPrefixToName("UnregisterFatBinary"));
llvm::Function *ModuleDtorFunc = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
- llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule);
+ llvm::GlobalValue::InternalLinkage,
+ addUnderscoredPrefixToName("_module_dtor"), &TheModule);
+
llvm::BasicBlock *DtorEntryBB =
llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc);
CGBuilderTy DtorBuilder(CGM, Context);
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index c516738fb7a..f2f22ee2c39 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -1608,6 +1608,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Case("c", InputKind::C)
.Case("cl", InputKind::OpenCL)
.Case("cuda", InputKind::CUDA)
+ .Case("hip", InputKind::HIP)
.Case("c++", InputKind::CXX)
.Case("objective-c", InputKind::ObjC)
.Case("objective-c++", InputKind::ObjCXX)
@@ -1887,6 +1888,9 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
case InputKind::RenderScript:
LangStd = LangStandard::lang_c99;
break;
+ case InputKind::HIP:
+ LangStd = LangStandard::lang_hip;
+ break;
}
}
@@ -1934,7 +1938,8 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
}
}
- Opts.CUDA = IK.getLanguage() == InputKind::CUDA;
+ Opts.HIP = IK.getLanguage() == InputKind::HIP;
+ Opts.CUDA = IK.getLanguage() == InputKind::CUDA || Opts.HIP;
if (Opts.CUDA)
// Set default FP_CONTRACT to FAST.
Opts.setDefaultFPContractMode(LangOptions::FPC_Fast);
@@ -2005,6 +2010,10 @@ static bool IsInputCompatibleWithStandard(InputKind IK,
return S.getLanguage() == InputKind::CUDA ||
S.getLanguage() == InputKind::CXX;
+ case InputKind::HIP:
+ return S.getLanguage() == InputKind::CXX ||
+ S.getLanguage() == InputKind::HIP;
+
case InputKind::Asm:
// Accept (and ignore) all -std= values.
// FIXME: The -std= value is not ignored; it affects the tokenization
@@ -2032,6 +2041,8 @@ static const StringRef GetInputKindName(InputKind IK) {
return "CUDA";
case InputKind::RenderScript:
return "RenderScript";
+ case InputKind::HIP:
+ return "HIP";
case InputKind::Asm:
return "Asm";
diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp
index 8cb6a04224d..3fcb72c025f 100644
--- a/clang/lib/Frontend/FrontendActions.cpp
+++ b/clang/lib/Frontend/FrontendActions.cpp
@@ -733,6 +733,7 @@ void PrintPreambleAction::ExecuteAction() {
case InputKind::ObjCXX:
case InputKind::OpenCL:
case InputKind::CUDA:
+ case InputKind::HIP:
break;
case InputKind::Unknown:
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index 17eff05a715..ebb3f2652d7 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -471,8 +471,10 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
Builder.defineMacro("__ASSEMBLER__");
- if (LangOpts.CUDA)
+ if (LangOpts.CUDA && !LangOpts.HIP)
Builder.defineMacro("__CUDA__");
+ if (LangOpts.HIP)
+ Builder.defineMacro("__HIP__");
}
/// Initialize the predefined C++ language feature test macros defined in
diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp
index 8224bd85f18..99265efdaf0 100644
--- a/clang/lib/Sema/SemaCUDA.cpp
+++ b/clang/lib/Sema/SemaCUDA.cpp
@@ -42,8 +42,9 @@ ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
SourceLocation GGGLoc) {
FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
if (!ConfigDecl)
- return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
- << "cudaConfigureCall");
+ return ExprError(
+ Diag(LLLLoc, diag::err_undeclared_var_use)
+ << (getLangOpts().HIP ? "hipConfigureCall" : "cudaConfigureCall"));
QualType ConfigQTy = ConfigDecl->getType();
DeclRefExpr *ConfigDR = new (Context)
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 84f9f7709f5..1bcc9329e4c 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -9056,11 +9056,13 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (getLangOpts().CUDA) {
IdentifierInfo *II = NewFD->getIdentifier();
- if (II && II->isStr("cudaConfigureCall") && !NewFD->isInvalidDecl() &&
+ if (II &&
+ II->isStr(getLangOpts().HIP ? "hipConfigureCall"
+ : "cudaConfigureCall") &&
+ !NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
Diag(NewFD->getLocation(), diag::err_config_scalar_return);
-
Context.setcudaConfigureCallDecl(NewFD);
}
OpenPOWER on IntegriCloud