From 8031238017a5aa0294168ecec4ef90fc1b3b76b4 Mon Sep 17 00:00:00 2001 From: Johannes Doerfert Date: Mon, 25 Jul 2016 12:48:45 +0000 Subject: [GSoC] Add PolyhedralInfo pass - new interface to polly analysis Adding a new pass PolyhedralInfo. This pass will be the interface to Polly. Initially, we will provide the following interface: - #IsParallel(Loop *L) - return a bool depending on whether the loop is parallel or not for the given program order. Patch by Utpal Bora Differential Revision: https://reviews.llvm.org/D21486 llvm-svn: 276637 --- polly/include/polly/LinkAllPasses.h | 2 + polly/include/polly/PolyhedralInfo.h | 101 +++++++++++++ polly/lib/Analysis/PolyhedralInfo.cpp | 162 +++++++++++++++++++++ polly/lib/CMakeLists.txt | 1 + polly/lib/Support/RegisterPasses.cpp | 9 ++ .../Ast/OpenMP/multiple_loops_outer_parallel.ll | 3 + .../Isl/Ast/OpenMP/nested_loop_both_parallel.ll | 4 + .../OpenMP/nested_loop_both_parallel_parametric.ll | 4 + .../Isl/Ast/OpenMP/nested_loop_inner_parallel.ll | 4 + .../Isl/Ast/OpenMP/nested_loop_outer_parallel.ll | 4 + .../Ast/OpenMP/single_loop_param_non_parallel.ll | 2 + .../Isl/Ast/OpenMP/single_loop_param_parallel.ll | 2 + .../single_loop_param_parallel_computeout.ll | 2 + polly/test/Isl/Ast/dependence_distance_constant.ll | 9 +- .../Ast/dependence_distance_multiple_constant.ll | 2 + .../test/Isl/Ast/dependence_distance_parametric.ll | 9 +- .../Isl/Ast/dependence_distance_parametric_expr.ll | 9 +- polly/test/Isl/Ast/dependence_distance_varying.ll | 2 + .../dependence_distance_varying_in_outer_loop.ll | 9 +- .../Ast/dependence_distance_varying_multiple.ll | 2 + .../reduction_clauses_multidimensional_access.ll | 12 +- polly/test/Isl/Ast/reduction_in_one_dimension.ll | 3 + polly/test/Isl/Ast/reduction_loop_reversal.ll | 4 + polly/test/Isl/Ast/reduction_modulo_schedule.ll | 4 + 24 files changed, 349 insertions(+), 16 deletions(-) create mode 100644 polly/include/polly/PolyhedralInfo.h create mode 100644 polly/lib/Analysis/PolyhedralInfo.cpp diff --git a/polly/include/polly/LinkAllPasses.h b/polly/include/polly/LinkAllPasses.h index 77cb7183ca7..cf0817e266a 100644 --- a/polly/include/polly/LinkAllPasses.h +++ b/polly/include/polly/LinkAllPasses.h @@ -37,6 +37,7 @@ llvm::Pass *createDOTViewerPass(); llvm::Pass *createJSONExporterPass(); llvm::Pass *createJSONImporterPass(); llvm::Pass *createPollyCanonicalizePass(); +llvm::Pass *createPolyhedralInfoPass(); llvm::Pass *createScopDetectionPass(); llvm::Pass *createScopInfoRegionPassPass(); llvm::Pass *createScopInfoWrapperPassPass(); @@ -72,6 +73,7 @@ struct PollyForcePassLinking { polly::createScopDetectionPass(); polly::createScopInfoRegionPassPass(); polly::createPollyCanonicalizePass(); + polly::createPolyhedralInfoPass(); polly::createIslAstInfoPass(); polly::createCodeGenerationPass(); #ifdef GPU_CODEGEN diff --git a/polly/include/polly/PolyhedralInfo.h b/polly/include/polly/PolyhedralInfo.h new file mode 100644 index 00000000000..3f2d6a39a66 --- /dev/null +++ b/polly/include/polly/PolyhedralInfo.h @@ -0,0 +1,101 @@ +//===- polly/PolyhedralInfo.h - PolyhedralInfo class definition -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// This file contains the declaration of the PolyhedralInfo class, which will +/// provide an interface to expose polyhedral analysis information of Polly. +/// +/// This is work in progress. We will add more API's as an when deemed required. +//===----------------------------------------------------------------------===/// + +#ifndef POLLY_POLYHEDRAL_INFO_H +#define POLLY_POLYHEDRAL_INFO_H + +#include "llvm/Pass.h" +#include "isl/ctx.h" +#include "isl/union_map.h" + +namespace llvm { +class Loop; +} + +namespace polly { + +class Scop; +class ScopInfoWrapperPass; +class DependenceInfoWrapperPass; + +class PolyhedralInfo : public llvm::FunctionPass { +public: + static char ID; // Pass identification, replacement for typeid + + /// @brief Construct a new PolyhedralInfo pass. + PolyhedralInfo() : FunctionPass(ID) {} + ~PolyhedralInfo() {} + + /// @brief Check if a given loop is parallel. + /// + /// @param L The loop. + /// + /// @return Returns true, if loop is parallel false otherwise. + bool isParallel(llvm::Loop *L) const; + + /// @brief Return the SCoP containing the @p L loop. + /// + /// @param L The loop. + /// + /// @return Returns the SCoP containing the given loop. + /// Returns null if the loop is not contained in any SCoP. + const Scop *getScopContainingLoop(llvm::Loop *L) const; + + /// @brief Computes the partial schedule for the given @p L loop. + /// + /// @param S The SCoP containing the given loop + /// @param L The loop. + /// + /// @return Returns the partial schedule for the given loop + __isl_give isl_union_map *getScheduleForLoop(const Scop *S, + llvm::Loop *L) const; + + /// @brief Get the SCoP and dependence analysis information for @p F. + bool runOnFunction(llvm::Function &F) override; + + /// @brief Release the internal memory. + void releaseMemory() override {} + + /// @brief Print to @p OS if each dimension of a loop nest is parallel or not. + void print(llvm::raw_ostream &OS, + const llvm::Module *M = nullptr) const override; + + /// @brief Register all analyses and transformation required. + void getAnalysisUsage(llvm::AnalysisUsage &AU) const override; + +private: + /// @brief Check if a given loop is parallel or vectorizable. + /// + /// @param L The loop. + /// @param MinDepDistPtr If not nullptr, the minimal dependence distance will + /// be returned at the address of that pointer + /// + /// @return Returns true if loop is parallel or vectorizable, false + /// otherwise. + bool checkParallel(llvm::Loop *L, + __isl_give isl_pw_aff **MinDepDistPtr = nullptr) const; + + ScopInfoWrapperPass *SI; + DependenceInfoWrapperPass *DI; +}; + +} // end namespace polly + +namespace llvm { +class PassRegistry; +void initializePolyhedralInfoPass(llvm::PassRegistry &); +} + +#endif diff --git a/polly/lib/Analysis/PolyhedralInfo.cpp b/polly/lib/Analysis/PolyhedralInfo.cpp new file mode 100644 index 00000000000..37ec23690d7 --- /dev/null +++ b/polly/lib/Analysis/PolyhedralInfo.cpp @@ -0,0 +1,162 @@ +//===--------- PolyhedralInfo.cpp - Create Scops from LLVM IR-------------===// +/// +/// The LLVM Compiler Infrastructure +/// +/// This file is distributed under the University of Illinois Open Source +/// License. See LICENSE.TXT for details. +/// +//===----------------------------------------------------------------------===// +/// +/// An interface to the Polyhedral analysis engine(Polly) of LLVM. +/// +/// This pass provides an interface to the polyhedral analysis performed by +/// Polly. +/// +/// This interface provides basic interface like isParallel, isVectorizable +/// that can be used in LLVM transformation passes. +/// +/// Work in progress, this file is subject to change. +//===----------------------------------------------------------------------===// + +#include "polly/PolyhedralInfo.h" +#include "polly/DependenceInfo.h" +#include "polly/LinkAllPasses.h" +#include "polly/Options.h" +#include "polly/ScopInfo.h" +#include "polly/Support/GICHelper.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Support/Debug.h" +#include +#include + +using namespace llvm; +using namespace polly; + +#define DEBUG_TYPE "polyhedral-info" + +static cl::opt CheckParallel("polly-check-parallel", + cl::desc("Check for parallel loops"), + cl::Hidden, cl::init(false), cl::ZeroOrMore, + cl::cat(PollyCategory)); + +static cl::opt CheckVectorizable("polly-check-vectorizable", + cl::desc("Check for vectorizable loops"), + cl::Hidden, cl::init(false), + cl::ZeroOrMore, cl::cat(PollyCategory)); + +void PolyhedralInfo::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequiredTransitive(); + AU.addRequired(); + AU.addRequiredTransitive(); + AU.setPreservesAll(); +} + +bool PolyhedralInfo::runOnFunction(Function &F) { + DI = &getAnalysis(); + SI = &getAnalysis(); + return false; +} + +void PolyhedralInfo::print(raw_ostream &OS, const Module *) const { + auto &LI = getAnalysis().getLoopInfo(); + for (auto *TopLevelLoop : LI) { + for (auto *L : depth_first(TopLevelLoop)) { + OS.indent(2) << L->getHeader()->getName() << ":\t"; + if (CheckParallel && isParallel(L)) + OS << "Loop is parallel.\n"; + else if (CheckParallel) + OS << "Loop is not parallel.\n"; + } + } +} + +bool PolyhedralInfo::checkParallel(Loop *L, isl_pw_aff **MinDepDistPtr) const { + bool IsParallel; + const Scop *S = getScopContainingLoop(L); + if (!S) + return false; + const Dependences &D = + DI->getDependences(const_cast(S), Dependences::AL_Access); + if (!D.hasValidDependences()) + return false; + DEBUG(dbgs() << "Loop :\t" << L->getHeader()->getName() << ":\n"); + + isl_union_map *Deps = + D.getDependences(Dependences::TYPE_RAW | Dependences::TYPE_WAW | + Dependences::TYPE_WAR | Dependences::TYPE_RED); + DEBUG(dbgs() << "Dependences :\t" << stringFromIslObj(Deps) << "\n"); + + isl_union_map *Schedule = getScheduleForLoop(S, L); + DEBUG(dbgs() << "Schedule: \t" << stringFromIslObj(Schedule) << "\n"); + + IsParallel = D.isParallel(Schedule, Deps, MinDepDistPtr); + isl_union_map_free(Schedule); + return IsParallel; +} + +bool PolyhedralInfo::isParallel(Loop *L) const { return checkParallel(L); } + +const Scop *PolyhedralInfo::getScopContainingLoop(Loop *L) const { + assert((SI) && "ScopInfoWrapperPass is required by PolyhedralInfo pass!\n"); + for (auto &It : *SI) { + Region *R = It.first; + if (R->contains(L)) + return It.second.get(); + } + return nullptr; +} + +// Given a Loop and the containing SCoP, we compute the partial schedule +// by taking union of individual schedules of each ScopStmt within the loop +// and projecting out the inner dimensions from the range of the schedule. +// for (i = 0; i < n; i++) +// for (j = 0; j < n; j++) +// A[j] = 1; //Stmt +// +// The original schedule will be +// Stmt[i0, i1] -> [i0, i1] +// The schedule for the outer loop will be +// Stmt[i0, i1] -> [i0] +// The schedule for the inner loop will be +// Stmt[i0, i1] -> [i0, i1] +__isl_give isl_union_map *PolyhedralInfo::getScheduleForLoop(const Scop *S, + Loop *L) const { + isl_union_map *Schedule = isl_union_map_empty(S->getParamSpace()); + int CurrDim = S->getRelativeLoopDepth(L); + DEBUG(dbgs() << "Relative loop depth:\t" << CurrDim << "\n"); + assert(CurrDim >= 0 && "Loop in region should have at least depth one"); + + for (auto *BB : L->blocks()) { + auto *SS = S->getStmtFor(BB); + if (!SS) + continue; + + unsigned int MaxDim = SS->getNumIterators(); + DEBUG(dbgs() << "Maximum depth of Stmt:\t" << MaxDim << "\n"); + auto *ScheduleMap = SS->getSchedule(); + + ScheduleMap = isl_map_project_out(ScheduleMap, isl_dim_out, CurrDim + 1, + MaxDim - CurrDim - 1); + ScheduleMap = + isl_map_set_tuple_id(ScheduleMap, isl_dim_in, SS->getDomainId()); + Schedule = + isl_union_map_union(Schedule, isl_union_map_from_map(ScheduleMap)); + } + + Schedule = isl_union_map_coalesce(Schedule); + return Schedule; +} + +char PolyhedralInfo::ID = 0; + +Pass *polly::createPolyhedralInfoPass() { return new PolyhedralInfo(); } + +INITIALIZE_PASS_BEGIN(PolyhedralInfo, "polyhedral-info", + "Polly - Interface to polyhedral analysis engine", false, + false); +INITIALIZE_PASS_DEPENDENCY(DependenceInfoWrapperPass); +INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass); +INITIALIZE_PASS_DEPENDENCY(ScopInfoWrapperPass); +INITIALIZE_PASS_END(PolyhedralInfo, "polyhedral-info", + "Polly - Interface to polyhedral analysis engine", false, + false) diff --git a/polly/lib/CMakeLists.txt b/polly/lib/CMakeLists.txt index 26f0d8db371..7a775171af4 100644 --- a/polly/lib/CMakeLists.txt +++ b/polly/lib/CMakeLists.txt @@ -28,6 +28,7 @@ endif () add_polly_library(Polly Analysis/DependenceInfo.cpp + Analysis/PolyhedralInfo.cpp Analysis/ScopDetection.cpp Analysis/ScopDetectionDiagnostic.cpp Analysis/ScopInfo.cpp diff --git a/polly/lib/Support/RegisterPasses.cpp b/polly/lib/Support/RegisterPasses.cpp index 631240ae39d..0b8c7a51720 100644 --- a/polly/lib/Support/RegisterPasses.cpp +++ b/polly/lib/Support/RegisterPasses.cpp @@ -26,6 +26,7 @@ #include "polly/DependenceInfo.h" #include "polly/LinkAllPasses.h" #include "polly/Options.h" +#include "polly/PolyhedralInfo.h" #include "polly/ScopDetection.h" #include "polly/ScopInfo.h" #include "llvm/Analysis/CFGPrinter.h" @@ -152,6 +153,11 @@ static cl::opt cl::desc("Show the Polly CFG right after code generation"), cl::Hidden, cl::init(false), cl::cat(PollyCategory)); +static cl::opt + EnablePolyhedralInfo("polly-enable-polyhedralinfo", + cl::desc("Enable polyhedral interface of Polly"), + cl::Hidden, cl::init(false), cl::cat(PollyCategory)); + namespace polly { void initializePollyPasses(PassRegistry &Registry) { initializeCodeGenerationPass(Registry); @@ -168,6 +174,7 @@ void initializePollyPasses(PassRegistry &Registry) { initializeIslAstInfoPass(Registry); initializeIslScheduleOptimizerPass(Registry); initializePollyCanonicalizePass(Registry); + initializePolyhedralInfoPass(Registry); initializeScopDetectionPass(Registry); initializeScopInfoRegionPassPass(Registry); initializeScopInfoWrapperPassPass(Registry); @@ -216,6 +223,8 @@ void registerPollyPasses(llvm::legacy::PassManagerBase &PM) { PM.add(polly::createDOTOnlyPrinterPass()); PM.add(polly::createScopInfoRegionPassPass()); + if (EnablePolyhedralInfo) + PM.add(polly::createPolyhedralInfoPass()); if (ImportJScop) PM.add(polly::createJSONImporterPass()); diff --git a/polly/test/Isl/Ast/OpenMP/multiple_loops_outer_parallel.ll b/polly/test/Isl/Ast/OpenMP/multiple_loops_outer_parallel.ll index ee1ded451a8..9c8b2190d0f 100644 --- a/polly/test/Isl/Ast/OpenMP/multiple_loops_outer_parallel.ll +++ b/polly/test/Isl/Ast/OpenMP/multiple_loops_outer_parallel.ll @@ -1,10 +1,13 @@ ; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-parallel-force -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; void jd(int *A) { ; CHECK: #pragma omp parallel for +; PINFO: for.cond2: Loop is parallel. ; for (int i = 0; i < 1024; i++) ; A[i] = 1; ; CHECK: #pragma omp parallel for +; PINFO: for.cond: Loop is parallel. ; for (int i = 0; i < 1024; i++) ; A[i] = A[i] * 2; ; } diff --git a/polly/test/Isl/Ast/OpenMP/nested_loop_both_parallel.ll b/polly/test/Isl/Ast/OpenMP/nested_loop_both_parallel.ll index facff314f89..be5df96593b 100644 --- a/polly/test/Isl/Ast/OpenMP/nested_loop_both_parallel.ll +++ b/polly/test/Isl/Ast/OpenMP/nested_loop_both_parallel.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-ast -polly-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; for (i = 0; i < 1024; i++) @@ -49,3 +50,6 @@ ret: ; CHECK-NOT: #pragma omp parallel for ; CHECK: for (int c1 = 0; c1 <= 1023; c1 += 1) ; CHECK: Stmt_loop_body(c0, c1); +; +; PINFO: loop.i: Loop is parallel. +; PINFO-NEXT: loop.j: Loop is parallel. diff --git a/polly/test/Isl/Ast/OpenMP/nested_loop_both_parallel_parametric.ll b/polly/test/Isl/Ast/OpenMP/nested_loop_both_parallel_parametric.ll index 9148c901f9e..5d2a2fa4a2d 100644 --- a/polly/test/Isl/Ast/OpenMP/nested_loop_both_parallel_parametric.ll +++ b/polly/test/Isl/Ast/OpenMP/nested_loop_both_parallel_parametric.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-parallel-force -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; int A[1024][1024]; ; void bar(int n) { @@ -46,3 +47,6 @@ ret: ; CHECK: #pragma simd ; CHECK: for (int c1 = 0; c1 < n; c1 += 1) ; CHECK: Stmt_loop_body(c0, c1); + +; PINFO: loop.i: Loop is parallel. +; PINFO-NEXT: loop.j: Loop is parallel. diff --git a/polly/test/Isl/Ast/OpenMP/nested_loop_inner_parallel.ll b/polly/test/Isl/Ast/OpenMP/nested_loop_inner_parallel.ll index 22a24c13f4a..615c101ed44 100644 --- a/polly/test/Isl/Ast/OpenMP/nested_loop_inner_parallel.ll +++ b/polly/test/Isl/Ast/OpenMP/nested_loop_inner_parallel.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-parallel-force -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; for (i = 0; i < n; i++) @@ -44,3 +45,6 @@ ret: ; CHECK: #pragma omp parallel for ; CHECK: for (int c1 = 0; c1 < n; c1 += 1) ; CHECK: Stmt_loop_body(c0, c1); + +; PINFO: loop.i: Loop is not parallel. +; PINFO-NEXT: loop.j: Loop is parallel. diff --git a/polly/test/Isl/Ast/OpenMP/nested_loop_outer_parallel.ll b/polly/test/Isl/Ast/OpenMP/nested_loop_outer_parallel.ll index b6aa08d103d..148933fd67e 100644 --- a/polly/test/Isl/Ast/OpenMP/nested_loop_outer_parallel.ll +++ b/polly/test/Isl/Ast/OpenMP/nested_loop_outer_parallel.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-ast -polly-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; for (i = 0; i < n; i++) @@ -43,3 +44,6 @@ ret: ; CHECK: for (int c0 = 0; c0 < n; c0 += 1) ; CHECK: for (int c1 = 0; c1 < n; c1 += 1) ; CHECK: Stmt_loop_body(c0, c1); + +; PINFO: loop.i: Loop is parallel. +; PINFO-NEXT: loop.j: Loop is not parallel. diff --git a/polly/test/Isl/Ast/OpenMP/single_loop_param_non_parallel.ll b/polly/test/Isl/Ast/OpenMP/single_loop_param_non_parallel.ll index 607f7d2d444..a69c891282e 100644 --- a/polly/test/Isl/Ast/OpenMP/single_loop_param_non_parallel.ll +++ b/polly/test/Isl/Ast/OpenMP/single_loop_param_non_parallel.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-ast -polly-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; for (i = 0; i < n; i++) @@ -31,3 +32,4 @@ ret: ; CHECK: for (int c0 = 0; c0 < n; c0 += 1) ; CHECK: Stmt_loop_body(c0) +; PINFO: loop.header: Loop is not parallel. diff --git a/polly/test/Isl/Ast/OpenMP/single_loop_param_parallel.ll b/polly/test/Isl/Ast/OpenMP/single_loop_param_parallel.ll index 8e04d1d282f..382a2873efd 100644 --- a/polly/test/Isl/Ast/OpenMP/single_loop_param_parallel.ll +++ b/polly/test/Isl/Ast/OpenMP/single_loop_param_parallel.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-parallel-force -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; for (i = 0; i < n; i++) @@ -33,3 +34,4 @@ ret: ; CHECK: #pragma omp parallel for ; CHECK: for (int c0 = 0; c0 < n; c0 += 1) ; CHECK: Stmt_loop_body(c0) +; PINFO: loop.header: Loop is parallel. diff --git a/polly/test/Isl/Ast/OpenMP/single_loop_param_parallel_computeout.ll b/polly/test/Isl/Ast/OpenMP/single_loop_param_parallel_computeout.ll index f25db09c363..8439500fef8 100644 --- a/polly/test/Isl/Ast/OpenMP/single_loop_param_parallel_computeout.ll +++ b/polly/test/Isl/Ast/OpenMP/single_loop_param_parallel_computeout.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-dependences-computeout=1 -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" ; for (i = 0; i < n; i++) @@ -33,3 +34,4 @@ ret: ; CHECK-NOT: #pragma omp parallel for ; CHECK: for (int c0 = 0; c0 < n; c0 += 1) ; CHECK: Stmt_loop_body(c0) +; PINFO: loop.header: Loop is parallel. diff --git a/polly/test/Isl/Ast/dependence_distance_constant.ll b/polly/test/Isl/Ast/dependence_distance_constant.ll index 3f37292f829..ffa8cd196ff 100644 --- a/polly/test/Isl/Ast/dependence_distance_constant.ll +++ b/polly/test/Isl/Ast/dependence_distance_constant.ll @@ -1,11 +1,14 @@ ; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; void f(int *A, int N) { ; CHECK: #pragma minimal dependence distance: 1 +; PINFO: for.cond: Loop is not parallel. ; for (int j = 0; j < N; j++) -; CHECK: #pragma minimal dependence distance: 8 -; for (int i = 0; i < N; i++) -; A[i + 8] = A[i] + 1; +; CHECK: #pragma minimal dependence distance: 8 +; PINFO-NEXT: for.cond1: Loop is not parallel. +; for (int i = 0; i < N; i++) +; A[i + 8] = A[i] + 1; ; } ; target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64" diff --git a/polly/test/Isl/Ast/dependence_distance_multiple_constant.ll b/polly/test/Isl/Ast/dependence_distance_multiple_constant.ll index 5148785e6e4..fdb1902995c 100644 --- a/polly/test/Isl/Ast/dependence_distance_multiple_constant.ll +++ b/polly/test/Isl/Ast/dependence_distance_multiple_constant.ll @@ -1,7 +1,9 @@ ; RUN: opt %loadPolly -basicaa -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; void f(int *restrict A, int *restrict B, int N) { ; CHECK: #pragma minimal dependence distance: 5 +; PINFO: for.cond: Loop is not parallel. ; for (int i = 0; i < N; i++) { ; A[i + 7] = A[i] + 1; ; B[i + 5] = B[i] + 1; diff --git a/polly/test/Isl/Ast/dependence_distance_parametric.ll b/polly/test/Isl/Ast/dependence_distance_parametric.ll index 84ce2cce463..e979a9f9e7b 100644 --- a/polly/test/Isl/Ast/dependence_distance_parametric.ll +++ b/polly/test/Isl/Ast/dependence_distance_parametric.ll @@ -1,11 +1,14 @@ ; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; void f(int *A, int N, int c) { ; CHECK: #pragma minimal dependence distance: 1 +; PINFO: for.cond: Loop is not parallel. ; for (int j = 0; j < N; j++) -; CHECK: #pragma minimal dependence distance: max(-c, c) -; for (int i = 0; i < N; i++) -; A[i + c] = A[i] + 1; +; CHECK: #pragma minimal dependence distance: max(-c, c) +; PINFO-NEXT: for.cond1: Loop is not parallel. +; for (int i = 0; i < N; i++) +; A[i + c] = A[i] + 1; ; } ; target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64" diff --git a/polly/test/Isl/Ast/dependence_distance_parametric_expr.ll b/polly/test/Isl/Ast/dependence_distance_parametric_expr.ll index c9508309f3f..1b5aad31d4d 100644 --- a/polly/test/Isl/Ast/dependence_distance_parametric_expr.ll +++ b/polly/test/Isl/Ast/dependence_distance_parametric_expr.ll @@ -1,11 +1,14 @@ ; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; void f(int *A, int N, int c, int v) { ; CHECK: #pragma minimal dependence distance: 1 +; PINFO: for.cond: Loop is not parallel. ; for (int j = 0; j < N; j++) -; CHECK: #pragma minimal dependence distance: max(-c - v, c + v) -; for (int i = 0; i < N; i++) -; A[i + c + v] = A[i] + 1; +; CHECK: #pragma minimal dependence distance: max(-c - v, c + v) +; PINFO-NEXT: for.cond1: Loop is not parallel. +; for (int i = 0; i < N; i++) +; A[i + c + v] = A[i] + 1; ; } ; target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64" diff --git a/polly/test/Isl/Ast/dependence_distance_varying.ll b/polly/test/Isl/Ast/dependence_distance_varying.ll index d334985a812..ff524d9756a 100644 --- a/polly/test/Isl/Ast/dependence_distance_varying.ll +++ b/polly/test/Isl/Ast/dependence_distance_varying.ll @@ -1,7 +1,9 @@ ; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; void f(int *A, int N) { ; CHECK: #pragma minimal dependence distance: ((N - 1) % 2) + 1 +; PINFO: for.cond: Loop is not parallel. ; for (int i = 0; i < N; i++) ; A[i] = A[N - i] + 1; ; } diff --git a/polly/test/Isl/Ast/dependence_distance_varying_in_outer_loop.ll b/polly/test/Isl/Ast/dependence_distance_varying_in_outer_loop.ll index 87a6205115e..3b07f64e71b 100644 --- a/polly/test/Isl/Ast/dependence_distance_varying_in_outer_loop.ll +++ b/polly/test/Isl/Ast/dependence_distance_varying_in_outer_loop.ll @@ -1,11 +1,14 @@ ; RUN: opt %loadPolly -polly-canonicalize -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; void f(int *restrict A, int *restrict sum) { ; CHECK: #pragma minimal dependence distance: 1 +; PINFO: for.cond: Loop is not parallel. ; for (int j = 0; j < 1024; j++) -; CHECK: #pragma minimal dependence distance: 1 -; for (int i = j; i < 1024; i++) -; A[i - 3] = A[j] * 2 + A[j] + 2; +; CHECK: #pragma minimal dependence distance: 1 +; PINFO-NEXT: for.cond1: Loop is not parallel. +; for (int i = j; i < 1024; i++) +; A[i - 3] = A[j] * 2 + A[j] + 2; ; } ; target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64" diff --git a/polly/test/Isl/Ast/dependence_distance_varying_multiple.ll b/polly/test/Isl/Ast/dependence_distance_varying_multiple.ll index 34d723304dc..3bad27c89dd 100644 --- a/polly/test/Isl/Ast/dependence_distance_varying_multiple.ll +++ b/polly/test/Isl/Ast/dependence_distance_varying_multiple.ll @@ -1,8 +1,10 @@ ; RUN: opt %loadPolly -basicaa -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; void f(int *restrict A, int *restrict B, int *restrict C, int *restrict D, ; int *restrict E, int N) { ; CHECK: #pragma minimal dependence distance: N >= 35 ? 1 : N >= 17 && N <= 34 ? 2 : 5 +; PINFO: for.cond: Loop is not parallel. ; for (int i = 0; i < N; i++) { ; A[i] = A[100 - 2 * i] + 1; ; B[i] = B[100 - 3 * i] + 1; diff --git a/polly/test/Isl/Ast/reduction_clauses_multidimensional_access.ll b/polly/test/Isl/Ast/reduction_clauses_multidimensional_access.ll index 3055e4cf221..e14e491e05c 100644 --- a/polly/test/Isl/Ast/reduction_clauses_multidimensional_access.ll +++ b/polly/test/Isl/Ast/reduction_clauses_multidimensional_access.ll @@ -1,12 +1,16 @@ ; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; CHECK: #pragma known-parallel reduction (^ : sum) ; void f(int N, int M, int P, int sum[P][M]) { +; PINFO: for.cond: Loop is not parallel. ; for (int i = 0; i < N; i++) -; for (int j = 0; j < P; j++) -; CHECK: #pragma simd -; for (int k = 0; k < M; k++) -; sum[j][k] ^= j; +; PINFO-NEXT: for.cond1: Loop is parallel. +; for (int j = 0; j < P; j++) +; CHECK: #pragma simd +; PINFO-NEXT: for.cond4: Loop is parallel. +; for (int k = 0; k < M; k++) +; sum[j][k] ^= j; ; } ; target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64" diff --git a/polly/test/Isl/Ast/reduction_in_one_dimension.ll b/polly/test/Isl/Ast/reduction_in_one_dimension.ll index 94240d3885e..70f3c7a304a 100644 --- a/polly/test/Isl/Ast/reduction_in_one_dimension.ll +++ b/polly/test/Isl/Ast/reduction_in_one_dimension.ll @@ -1,10 +1,13 @@ ; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; Verify that we won't privatize anything in the outer dimension ; ; CHECK: #pragma known-parallel +; PINFO: for.cond: Loop is parallel. ; CHECK: for (int c0 = 0; c0 < 2 * n; c0 += 1) ; CHECK: #pragma simd reduction +; PINFO-NEXT: for.cond1: Loop is not parallel. ; CHECK: for (int c1 = 0; c1 <= 1023; c1 += 1) ; CHECK: Stmt_for_body3(c0, c1); ; diff --git a/polly/test/Isl/Ast/reduction_loop_reversal.ll b/polly/test/Isl/Ast/reduction_loop_reversal.ll index 91fae5e58ea..a7de2a764ec 100644 --- a/polly/test/Isl/Ast/reduction_loop_reversal.ll +++ b/polly/test/Isl/Ast/reduction_loop_reversal.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-import-jscop-dir=%S -polly-import-jscop -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; CHECK-NOT: #pragma simd{{\s*$}} ; CHECK: #pragma simd reduction @@ -6,6 +7,9 @@ ; CHECK: #pragma simd{{\s*$}} ; CHECK: Stmt_S1(n - c1) ; +; PINFO: for.cond2: Loop is parallel. +; PINFO-NEXT: for.cond: Loop is not parallel. +; ; void rlr(int *A, long n) { ; for (long i = 0; i < 2 * n; i++) ; S0: A[0] += i; diff --git a/polly/test/Isl/Ast/reduction_modulo_schedule.ll b/polly/test/Isl/Ast/reduction_modulo_schedule.ll index b85977e4dfe..986ae4f9c1d 100644 --- a/polly/test/Isl/Ast/reduction_modulo_schedule.ll +++ b/polly/test/Isl/Ast/reduction_modulo_schedule.ll @@ -1,4 +1,5 @@ ; RUN: opt %loadPolly -polly-import-jscop-dir=%S -polly-import-jscop -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s +; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO ; ; CHECK: #pragma known-parallel reduction (+ : A) ; CHECK-NEXT: for (int c0 = 0; c0 <= 2; c0 += 1) { @@ -12,6 +13,9 @@ ; CHECK-NEXT: Stmt_S0(c1); ; CHECK-NEXT: } ; +; PINFO: for.cond2: Loop is parallel. +; PINFO-NEXT: for.cond: Loop is not parallel. +; ; void rms(int *A, long n) { ; for (long i = 0; i < 2 * n; i++) ; S0: A[0] += i; -- cgit v1.2.1