diff options
| -rw-r--r-- | llvm/utils/lit/tests/lit.cfg | 11 | ||||
| -rw-r--r-- | llvm/utils/lit/tests/shtest-run-at-line.py | 2 | 
2 files changed, 10 insertions, 3 deletions
diff --git a/llvm/utils/lit/tests/lit.cfg b/llvm/utils/lit/tests/lit.cfg index ebdcb5000b9..2830956f80d 100644 --- a/llvm/utils/lit/tests/lit.cfg +++ b/llvm/utils/lit/tests/lit.cfg @@ -44,10 +44,17 @@ for attribute in ('llvm_tools_dir', 'lit_tools_dir'):      if directory:          llvm_config.with_environment('PATH', directory, append_path=True) +# This test suite calls %{lit} to test lit's behavior for the sample test +# suites in %{inputs}.  This test suite's results are then determined in part +# by %{lit}'s textual output, which includes the output of FileCheck calls +# within %{inputs}'s test suites.  Thus, %{lit} clears environment variables +# that can affect FileCheck's output.  config.substitutions.append(('%{inputs}', os.path.join(      config.test_source_root, 'Inputs'))) -config.substitutions.append(('%{lit}', "%%{python} %s" % ( -    os.path.join(lit_path, 'lit.py'),))) +config.substitutions.append(('%{lit}', +    "{env} %{{python}} {lit}".format( +        env="env -u FILECHECK_OPTS -u FILECHECK_DUMP_INPUT_ON_FAILURE", +        lit=os.path.join(lit_path, 'lit.py'))))  config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))  # Enable coverage.py reporting, assuming the coverage module has been installed diff --git a/llvm/utils/lit/tests/shtest-run-at-line.py b/llvm/utils/lit/tests/shtest-run-at-line.py index 7e5d53b8e26..cd0e08137ee 100644 --- a/llvm/utils/lit/tests/shtest-run-at-line.py +++ b/llvm/utils/lit/tests/shtest-run-at-line.py @@ -1,7 +1,7 @@  # Check that -vv makes the line number of the failing RUN command clear.  # (-v is actually sufficient in the case of the internal shell.)  # -# RUN: env -u FILECHECK_OPTS not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out +# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out  # RUN: FileCheck --input-file %t.out %s  #  # END.  | 

