diff options
author | George Karpenkov <ekarpenkov@apple.com> | 2017-10-02 17:59:12 +0000 |
---|---|---|
committer | George Karpenkov <ekarpenkov@apple.com> | 2017-10-02 17:59:12 +0000 |
commit | a807660918ec33d05c39fb20fffeab84775cfcbc (patch) | |
tree | 59bdb9b6cca79fc479d5524ed03245827b73e78c /clang/utils/analyzer/SATestBuild.py | |
parent | f21a60233c8089ec47c0685db657df9f75188701 (diff) | |
download | bcm5719-llvm-a807660918ec33d05c39fb20fffeab84775cfcbc.tar.gz bcm5719-llvm-a807660918ec33d05c39fb20fffeab84775cfcbc.zip |
[Analyzer] Make testing scripts flake8 compliant
Differential Review: https://reviews.llvm.org/D38213
llvm-svn: 314692
Diffstat (limited to 'clang/utils/analyzer/SATestBuild.py')
-rwxr-xr-x | clang/utils/analyzer/SATestBuild.py | 392 |
1 files changed, 230 insertions, 162 deletions
diff --git a/clang/utils/analyzer/SATestBuild.py b/clang/utils/analyzer/SATestBuild.py index c735996c3b2..26c519543e8 100755 --- a/clang/utils/analyzer/SATestBuild.py +++ b/clang/utils/analyzer/SATestBuild.py @@ -3,8 +3,8 @@ """ Static Analyzer qualification infrastructure. -The goal is to test the analyzer against different projects, check for failures, -compare results, and measure performance. +The goal is to test the analyzer against different projects, +check for failures, compare results, and measure performance. Repository Directory will contain sources of the projects as well as the information on how to build them and the expected output. @@ -20,7 +20,8 @@ Note that the build tree must be inside the project dir. To test the build of the analyzer one would: - Copy over a copy of the Repository Directory. (TODO: Prefer to ensure that - the build directory does not pollute the repository to min network traffic). + the build directory does not pollute the repository to min network + traffic). - Build all projects, until error. Produce logs to report errors. - Compare results. @@ -60,69 +61,78 @@ import multiprocessing #------------------------------------------------------------------------------ -def which(command, paths = None): - """which(command, [paths]) - Look up the given command in the paths string - (or the PATH environment variable, if unspecified).""" +def which(command, paths=None): + """which(command, [paths]) - Look up the given command in the paths string + (or the PATH environment variable, if unspecified).""" - if paths is None: - paths = os.environ.get('PATH','') + if paths is None: + paths = os.environ.get('PATH', '') - # Check for absolute match first. - if os.path.exists(command): - return command + # Check for absolute match first. + if os.path.exists(command): + return command - # Would be nice if Python had a lib function for this. - if not paths: - paths = os.defpath + # Would be nice if Python had a lib function for this. + if not paths: + paths = os.defpath - # Get suffixes to search. - # On Cygwin, 'PATHEXT' may exist but it should not be used. - if os.pathsep == ';': - pathext = os.environ.get('PATHEXT', '').split(';') - else: - pathext = [''] + # Get suffixes to search. + # On Cygwin, 'PATHEXT' may exist but it should not be used. + if os.pathsep == ';': + pathext = os.environ.get('PATHEXT', '').split(';') + else: + pathext = [''] + + # Search the paths... + for path in paths.split(os.pathsep): + for ext in pathext: + p = os.path.join(path, command + ext) + if os.path.exists(p): + return p - # Search the paths... - for path in paths.split(os.pathsep): - for ext in pathext: - p = os.path.join(path, command + ext) - if os.path.exists(p): - return p + return None - return None -# Make sure we flush the output after every print statement. class flushfile(object): + """ + Wrapper to flush the output after every print statement. + """ def __init__(self, f): self.f = f + def write(self, x): self.f.write(x) self.f.flush() + sys.stdout = flushfile(sys.stdout) + def getProjectMapPath(): ProjectMapPath = os.path.join(os.path.abspath(os.curdir), ProjectMapFile) if not os.path.exists(ProjectMapPath): print "Error: Cannot find the Project Map file " + ProjectMapPath +\ - "\nRunning script for the wrong directory?" + "\nRunning script for the wrong directory?" sys.exit(-1) return ProjectMapPath + def getProjectDir(ID): return os.path.join(os.path.abspath(os.curdir), ID) -def getSBOutputDirName(IsReferenceBuild) : - if IsReferenceBuild == True : + +def getSBOutputDirName(IsReferenceBuild): + if IsReferenceBuild: return SBOutputDirReferencePrefix + SBOutputDirName - else : + else: return SBOutputDirName #------------------------------------------------------------------------------ # Configuration setup. #------------------------------------------------------------------------------ + # Find Clang for static analysis. if 'CC' in os.environ: Clang = os.environ['CC'] @@ -160,9 +170,10 @@ DiffsSummaryFileName = "diffs.txt" SBOutputDirName = "ScanBuildResults" SBOutputDirReferencePrefix = "Ref" -# The name of the directory storing the cached project source. If this directory -# does not exist, the download script will be executed. That script should -# create the "CachedSource" directory and download the project source into it. +# The name of the directory storing the cached project source. If this +# directory does not exist, the download script will be executed. +# That script should create the "CachedSource" directory and download the +# project source into it. CachedSourceDirName = "CachedSource" # The name of the directory containing the source code that will be analyzed. @@ -178,7 +189,7 @@ PatchfileName = "changes_for_analyzer.patch" # The list of checkers used during analyzes. # Currently, consists of all the non-experimental checkers, plus a few alpha # checkers we don't want to regress on. -Checkers=",".join([ +Checkers = ",".join([ "alpha.unix.SimpleStream", "alpha.security.taint", "cplusplus.NewDeleteLeaks", @@ -197,46 +208,60 @@ Verbose = 1 # Test harness logic. #------------------------------------------------------------------------------ -# Run pre-processing script if any. + def runCleanupScript(Dir, PBuildLogFile): + """ + Run pre-processing script if any. + """ Cwd = os.path.join(Dir, PatchedSourceDirName) ScriptPath = os.path.join(Dir, CleanupScript) runScript(ScriptPath, PBuildLogFile, Cwd) -# Run the script to download the project, if it exists. + def runDownloadScript(Dir, PBuildLogFile): + """ + Run the script to download the project, if it exists. + """ ScriptPath = os.path.join(Dir, DownloadScript) runScript(ScriptPath, PBuildLogFile, Dir) -# Run the provided script if it exists. + def runScript(ScriptPath, PBuildLogFile, Cwd): + """ + Run the provided script if it exists. + """ if os.path.exists(ScriptPath): try: if Verbose == 1: print " Executing: %s" % (ScriptPath,) - check_call("chmod +x '%s'" % ScriptPath, cwd = Cwd, - stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) - check_call("'%s'" % ScriptPath, cwd = Cwd, stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) + check_call("chmod +x '%s'" % ScriptPath, cwd=Cwd, + stderr=PBuildLogFile, + stdout=PBuildLogFile, + shell=True) + check_call("'%s'" % ScriptPath, cwd=Cwd, + stderr=PBuildLogFile, + stdout=PBuildLogFile, + shell=True) except: - print "Error: Running %s failed. See %s for details." % (ScriptPath, - PBuildLogFile.name) + print "Error: Running %s failed. See %s for details." % ( + ScriptPath, PBuildLogFile.name) sys.exit(-1) -# Download the project and apply the local patchfile if it exists. + def downloadAndPatch(Dir, PBuildLogFile): + """ + Download the project and apply the local patchfile if it exists. + """ CachedSourceDirPath = os.path.join(Dir, CachedSourceDirName) # If the we don't already have the cached source, run the project's # download script to download it. if not os.path.exists(CachedSourceDirPath): - runDownloadScript(Dir, PBuildLogFile) - if not os.path.exists(CachedSourceDirPath): - print "Error: '%s' not found after download." % (CachedSourceDirPath) - exit(-1) + runDownloadScript(Dir, PBuildLogFile) + if not os.path.exists(CachedSourceDirPath): + print "Error: '%s' not found after download." % ( + CachedSourceDirPath) + exit(-1) PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName) @@ -248,6 +273,7 @@ def downloadAndPatch(Dir, PBuildLogFile): shutil.copytree(CachedSourceDirPath, PatchedSourceDirPath, symlinks=True) applyPatch(Dir, PBuildLogFile) + def applyPatch(Dir, PBuildLogFile): PatchfilePath = os.path.join(Dir, PatchfileName) PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName) @@ -258,30 +284,33 @@ def applyPatch(Dir, PBuildLogFile): print " Applying patch." try: check_call("patch -p1 < '%s'" % (PatchfilePath), - cwd = PatchedSourceDirPath, - stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) + cwd=PatchedSourceDirPath, + stderr=PBuildLogFile, + stdout=PBuildLogFile, + shell=True) except: print "Error: Patch failed. See %s for details." % (PBuildLogFile.name) sys.exit(-1) -# Build the project with scan-build by reading in the commands and -# prefixing them with the scan-build options. + def runScanBuild(Dir, SBOutputDir, PBuildLogFile): + """ + Build the project with scan-build by reading in the commands and + prefixing them with the scan-build options. + """ BuildScriptPath = os.path.join(Dir, BuildScript) if not os.path.exists(BuildScriptPath): print "Error: build script is not defined: %s" % BuildScriptPath sys.exit(-1) AllCheckers = Checkers - if os.environ.has_key('SA_ADDITIONAL_CHECKERS'): + if 'SA_ADDITIONAL_CHECKERS' in os.environ: AllCheckers = AllCheckers + ',' + os.environ['SA_ADDITIONAL_CHECKERS'] # Run scan-build from within the patched source directory. SBCwd = os.path.join(Dir, PatchedSourceDirName) - SBOptions = "--use-analyzer '%s' " % Clang + SBOptions = "--use-analyzer '%s' " % Clang SBOptions += "-plist-html -o '%s' " % SBOutputDir SBOptions += "-enable-checker " + AllCheckers + " " SBOptions += "--keep-empty " @@ -294,7 +323,7 @@ def runScanBuild(Dir, SBOutputDir, PBuildLogFile): for Command in SBCommandFile: Command = Command.strip() if len(Command) == 0: - continue; + continue # If using 'make', auto imply a -jX argument # to speed up analysis. xcodebuild will # automatically use the maximum number of cores. @@ -304,42 +333,45 @@ def runScanBuild(Dir, SBOutputDir, PBuildLogFile): SBCommand = SBPrefix + Command if Verbose == 1: print " Executing: %s" % (SBCommand,) - check_call(SBCommand, cwd = SBCwd, stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) + check_call(SBCommand, cwd=SBCwd, + stderr=PBuildLogFile, + stdout=PBuildLogFile, + shell=True) except: - print "Error: scan-build failed. See ",PBuildLogFile.name,\ + print "Error: scan-build failed. See ", PBuildLogFile.name,\ " for details." raise + def hasNoExtension(FileName): (Root, Ext) = os.path.splitext(FileName) - if ((Ext == "")) : - return True - return False + return (Ext == "") + def isValidSingleInputFile(FileName): (Root, Ext) = os.path.splitext(FileName) - if ((Ext == ".i") | (Ext == ".ii") | - (Ext == ".c") | (Ext == ".cpp") | - (Ext == ".m") | (Ext == "")) : - return True - return False - -# Get the path to the SDK for the given SDK name. Returns None if -# the path cannot be determined. + return Ext in (".i", ".ii", ".c", ".cpp", ".m", "") + + def getSDKPath(SDKName): + """ + Get the path to the SDK for the given SDK name. Returns None if + the path cannot be determined. + """ if which("xcrun") is None: return None Cmd = "xcrun --sdk " + SDKName + " --show-sdk-path" return check_output(Cmd, shell=True).rstrip() -# Run analysis on a set of preprocessed files. + def runAnalyzePreprocessed(Dir, SBOutputDir, Mode): + """ + Run analysis on a set of preprocessed files. + """ if os.path.exists(os.path.join(Dir, BuildScript)): print "Error: The preprocessed files project should not contain %s" % \ - BuildScript + BuildScript raise Exception() CmdPrefix = Clang + " -cc1 " @@ -348,17 +380,18 @@ def runAnalyzePreprocessed(Dir, SBOutputDir, Mode): # with the OS X SDK. SDKPath = getSDKPath("macosx") if SDKPath is not None: - CmdPrefix += "-isysroot " + SDKPath + " " + CmdPrefix += "-isysroot " + SDKPath + " " CmdPrefix += "-analyze -analyzer-output=plist -w " - CmdPrefix += "-analyzer-checker=" + Checkers +" -fcxx-exceptions -fblocks " + CmdPrefix += "-analyzer-checker=" + Checkers + CmdPrefix += " -fcxx-exceptions -fblocks " - if (Mode == 2) : + if (Mode == 2): CmdPrefix += "-std=c++11 " PlistPath = os.path.join(Dir, SBOutputDir, "date") - FailPath = os.path.join(PlistPath, "failures"); - os.makedirs(FailPath); + FailPath = os.path.join(PlistPath, "failures") + os.makedirs(FailPath) for FullFileName in glob.glob(Dir + "/*"): FileName = os.path.basename(FullFileName) @@ -367,7 +400,7 @@ def runAnalyzePreprocessed(Dir, SBOutputDir, Mode): # Only run the analyzes on supported files. if (hasNoExtension(FileName)): continue - if (isValidSingleInputFile(FileName) == False): + if (not isValidSingleInputFile(FileName)): print "Error: Invalid single input file %s." % (FullFileName,) raise Exception() @@ -378,44 +411,47 @@ def runAnalyzePreprocessed(Dir, SBOutputDir, Mode): try: if Verbose == 1: print " Executing: %s" % (Command,) - check_call(Command, cwd = Dir, stderr=LogFile, - stdout=LogFile, - shell=True) + check_call(Command, cwd=Dir, stderr=LogFile, + stdout=LogFile, + shell=True) except CalledProcessError, e: print "Error: Analyzes of %s failed. See %s for details." \ - "Error code %d." % \ - (FullFileName, LogFile.name, e.returncode) + "Error code %d." % ( + FullFileName, LogFile.name, e.returncode) Failed = True finally: LogFile.close() # If command did not fail, erase the log file. - if Failed == False: - os.remove(LogFile.name); + if not Failed: + os.remove(LogFile.name) + def getBuildLogPath(SBOutputDir): - return os.path.join(SBOutputDir, LogFolderName, BuildLogName) + return os.path.join(SBOutputDir, LogFolderName, BuildLogName) + def removeLogFile(SBOutputDir): - BuildLogPath = getBuildLogPath(SBOutputDir) - # Clean up the log file. - if (os.path.exists(BuildLogPath)) : - RmCommand = "rm '%s'" % BuildLogPath - if Verbose == 1: - print " Executing: %s" % (RmCommand,) - check_call(RmCommand, shell=True) + BuildLogPath = getBuildLogPath(SBOutputDir) + # Clean up the log file. + if (os.path.exists(BuildLogPath)): + RmCommand = "rm '%s'" % BuildLogPath + if Verbose == 1: + print " Executing: %s" % (RmCommand,) + check_call(RmCommand, shell=True) + def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild): TBegin = time.time() BuildLogPath = getBuildLogPath(SBOutputDir) print "Log file: %s" % (BuildLogPath,) - print "Output directory: %s" %(SBOutputDir, ) + print "Output directory: %s" % (SBOutputDir, ) removeLogFile(SBOutputDir) # Clean up scan build results. - if (os.path.exists(SBOutputDir)) : + if (os.path.exists(SBOutputDir)): RmCommand = "rm -r '%s'" % SBOutputDir if Verbose == 1: print " Executing: %s" % (RmCommand,) @@ -437,7 +473,8 @@ def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild): normalizeReferenceResults(Dir, SBOutputDir, ProjectBuildMode) print "Build complete (time: %.2f). See the log for more details: %s" % \ - ((time.time()-TBegin), BuildLogPath) + ((time.time() - TBegin), BuildLogPath) + def normalizeReferenceResults(Dir, SBOutputDir, ProjectBuildMode): """ @@ -452,15 +489,19 @@ def normalizeReferenceResults(Dir, SBOutputDir, ProjectBuildMode): PathPrefix = Dir if (ProjectBuildMode == 1): PathPrefix = os.path.join(Dir, PatchedSourceDirName) - Paths = [SourceFile[len(PathPrefix)+1:]\ - if SourceFile.startswith(PathPrefix)\ - else SourceFile for SourceFile in Data['files']] + Paths = [SourceFile[len(PathPrefix) + 1:] + if SourceFile.startswith(PathPrefix) + else SourceFile for SourceFile in Data['files']] Data['files'] = Paths plistlib.writePlist(Data, Plist) -# A plist file is created for each call to the analyzer(each source file). -# We are only interested on the once that have bug reports, so delete the rest. + def CleanUpEmptyPlists(SBOutputDir): + """ + A plist file is created for each call to the analyzer(each source file). + We are only interested on the once that have bug reports, + so delete the rest. + """ for F in glob.glob(SBOutputDir + "/*/*.plist"): P = os.path.join(SBOutputDir, F) @@ -470,55 +511,66 @@ def CleanUpEmptyPlists(SBOutputDir): os.remove(P) continue -# Given the scan-build output directory, checks if the build failed -# (by searching for the failures directories). If there are failures, it -# creates a summary file in the output directory. + def checkBuild(SBOutputDir): + """ + Given the scan-build output directory, checks if the build failed + (by searching for the failures directories). If there are failures, it + creates a summary file in the output directory. + + """ # Check if there are failures. Failures = glob.glob(SBOutputDir + "/*/failures/*.stderr.txt") - TotalFailed = len(Failures); + TotalFailed = len(Failures) if TotalFailed == 0: CleanUpEmptyPlists(SBOutputDir) Plists = glob.glob(SBOutputDir + "/*/*.plist") print "Number of bug reports (non-empty plist files) produced: %d" %\ - len(Plists) - return; + len(Plists) + return # Create summary file to display when the build fails. - SummaryPath = os.path.join(SBOutputDir, LogFolderName, FailuresSummaryFileName) + SummaryPath = os.path.join( + SBOutputDir, LogFolderName, FailuresSummaryFileName) if (Verbose > 0): print " Creating the failures summary file %s" % (SummaryPath,) with open(SummaryPath, "w+") as SummaryLog: SummaryLog.write("Total of %d failures discovered.\n" % (TotalFailed,)) if TotalFailed > NumOfFailuresInSummary: - SummaryLog.write("See the first %d below.\n" - % (NumOfFailuresInSummary,)) + SummaryLog.write("See the first %d below.\n" % ( + NumOfFailuresInSummary,)) # TODO: Add a line "See the results folder for more." Idx = 0 for FailLogPathI in Failures: if Idx >= NumOfFailuresInSummary: - break; + break Idx += 1 - SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,)); + SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,)) with open(FailLogPathI, "r") as FailLogI: - shutil.copyfileobj(FailLogI, SummaryLog); + shutil.copyfileobj(FailLogI, SummaryLog) print "Error: analysis failed. See ", SummaryPath sys.exit(-1) -# Auxiliary object to discard stdout. + class Discarder(object): + """ + Auxiliary object to discard stdout. + """ def write(self, text): - pass # do nothing - -# Compare the warnings produced by scan-build. -# Strictness defines the success criteria for the test: -# 0 - success if there are no crashes or analyzer failure. -# 1 - success if there are no difference in the number of reported bugs. -# 2 - success if all the bug reports are identical. -def runCmpResults(Dir, Strictness = 0): + pass # do nothing + + +def runCmpResults(Dir, Strictness=0): + """ + Compare the warnings produced by scan-build. + Strictness defines the success criteria for the test: + 0 - success if there are no crashes or analyzer failure. + 1 - success if there are no difference in the number of reported bugs. + 2 - success if all the bug reports are identical. + """ TBegin = time.time() RefDir = os.path.join(Dir, SBOutputDirReferencePrefix + SBOutputDirName) @@ -559,29 +611,35 @@ def runCmpResults(Dir, Strictness = 0): DiffsPath = os.path.join(NewDir, DiffsSummaryFileName) PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName) Opts = CmpRuns.CmpOptions(DiffsPath, "", PatchedSourceDirPath) - # Discard everything coming out of stdout (CmpRun produces a lot of them). + # Discard everything coming out of stdout + # (CmpRun produces a lot of them). OLD_STDOUT = sys.stdout sys.stdout = Discarder() # Scan the results, delete empty plist files. NumDiffs, ReportsInRef, ReportsInNew = \ CmpRuns.dumpScanBuildResultsDiff(RefDir, NewDir, Opts, False) sys.stdout = OLD_STDOUT - if (NumDiffs > 0) : + if (NumDiffs > 0): print "Warning: %r differences in diagnostics. See %s" % \ (NumDiffs, DiffsPath,) if Strictness >= 2 and NumDiffs > 0: print "Error: Diffs found in strict mode (2)." sys.exit(-1) elif Strictness >= 1 and ReportsInRef != ReportsInNew: - print "Error: The number of results are different in strict mode (1)." + print "Error: The number of results are different in "\ + "strict mode (1)." sys.exit(-1) - print "Diagnostic comparison complete (time: %.2f)." % (time.time()-TBegin) + print "Diagnostic comparison complete (time: %.2f)." % ( + time.time() - TBegin) return (NumDiffs > 0) + def cleanupReferenceResults(SBOutputDir): - # Delete html, css, and js files from reference results. These can - # include multiple copies of the benchmark source and so get very large. + """ + Delete html, css, and js files from reference results. These can + include multiple copies of the benchmark source and so get very large. + """ Extensions = ["html", "css", "js"] for E in Extensions: for F in glob.glob("%s/*/*.%s" % (SBOutputDir, E)): @@ -592,6 +650,7 @@ def cleanupReferenceResults(SBOutputDir): # Remove the log file. It leaks absolute path names. removeLogFile(SBOutputDir) + def updateSVN(Mode, PMapFile): """ svn delete or svn add (depending on `Mode`) all folders defined in the file @@ -625,7 +684,8 @@ def updateSVN(Mode, PMapFile): print "Error: SVN update failed." sys.exit(-1) -def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Strictness = 0): + +def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Strictness=0): print " \n\n--- Building project %s" % (ID,) TBegin = time.time() @@ -648,72 +708,80 @@ def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Strictness = 0): runCmpResults(Dir, Strictness) print "Completed tests for project %s (time: %.2f)." % \ - (ID, (time.time()-TBegin)) + (ID, (time.time() - TBegin)) + def isCommentCSVLine(Entries): - # Treat CSV lines starting with a '#' as a comment. + """ + Treat CSV lines starting with a '#' as a comment. + """ return len(Entries) > 0 and Entries[0].startswith("#") + def projectFileHandler(): return open(getProjectMapPath(), "rb") + def iterateOverProjects(PMapFile): """ Iterate over all projects defined in the project file handler `PMapFile` from the start. """ PMapFile.seek(0) - try: - for I in csv.reader(PMapFile): - if (isCommentCSVLine(I)): - continue - yield I - except: - print "Error occurred. Premature termination." - raise + for I in csv.reader(PMapFile): + if (isCommentCSVLine(I)): + continue + yield I + def validateProjectFile(PMapFile): """ Validate project file. """ for I in iterateOverProjects(PMapFile): - if (len(I) != 2) : + if (len(I) != 2): print "Error: Rows in the ProjectMapFile should have 2 entries." raise Exception() if (not ((I[1] == "0") | (I[1] == "1") | (I[1] == "2"))): print "Error: Second entry in the ProjectMapFile should be 0" \ - " (single file), 1 (project), or 2(single file c++11)." + " (single file), 1 (project), or 2(single file c++11)." raise Exception() -def testAll(IsReferenceBuild = False, UpdateSVN = False, Strictness = 0): + +def testAll(IsReferenceBuild=False, UpdateSVN=False, Strictness=0): with projectFileHandler() as PMapFile: validateProjectFile(PMapFile) # When we are regenerating the reference results, we might need to # update svn. Remove reference results from SVN. - if UpdateSVN == True: - assert(IsReferenceBuild == True); - updateSVN("delete", PMapFile); + if UpdateSVN: + assert(IsReferenceBuild) + updateSVN("delete", PMapFile) # Test the projects. for (ProjName, ProjBuildMode) in iterateOverProjects(PMapFile): - testProject(ProjName, int(ProjBuildMode), IsReferenceBuild, Strictness) + testProject( + ProjName, int(ProjBuildMode), IsReferenceBuild, Strictness) # Re-add reference results to SVN. - if UpdateSVN == True: - updateSVN("add", PMapFile); + if UpdateSVN: + updateSVN("add", PMapFile) + if __name__ == '__main__': # Parse command line arguments. - Parser = argparse.ArgumentParser(description='Test the Clang Static Analyzer.') + Parser = argparse.ArgumentParser( + description='Test the Clang Static Analyzer.') Parser.add_argument('--strictness', dest='strictness', type=int, default=0, - help='0 to fail on runtime errors, 1 to fail when the number\ - of found bugs are different from the reference, 2 to \ - fail on any difference from the reference. Default is 0.') - Parser.add_argument('-r', dest='regenerate', action='store_true', default=False, - help='Regenerate reference output.') + help='0 to fail on runtime errors, 1 to fail when the \ + number of found bugs are different from the \ + reference, 2 to fail on any difference from the \ + reference. Default is 0.') + Parser.add_argument('-r', dest='regenerate', action='store_true', + default=False, help='Regenerate reference output.') Parser.add_argument('-rs', dest='update_reference', action='store_true', - default=False, help='Regenerate reference output and update svn.') + default=False, + help='Regenerate reference output and update svn.') Args = Parser.parse_args() IsReference = False |