Skip to content

Commit

Permalink
litsupport: Rework test module support
Browse files Browse the repository at this point in the history
- Moved modules to litsupport.modules
- Import all modules on startup
- Only apply modules in config.test_modules to a benchmark. This allows
  to modify the module list per-directory in `lit.local.cfg`.

llvm-svn: 314239
  • Loading branch information
MatzeB committed Sep 26, 2017
1 parent 94704ab commit fab1135
Show file tree
Hide file tree
Showing 15 changed files with 57 additions and 64 deletions.
29 changes: 15 additions & 14 deletions lit.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,24 @@ import logging
import os
import site
import sys

# Setup logging.
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_log = logging.FileHandler("%s/test.log" % config.test_exec_root,
mode="w")
file_log.setLevel(logging.DEBUG)
logger.addHandler(file_log)
console_log = logging.StreamHandler()
console_log.setLevel(logging.WARNING)
logger.addHandler(console_log)

# Load test-suite litsupport code
site.addsitedir(os.path.dirname(__file__))
from litsupport import test
import litsupport.test

config.name = 'test-suite'
config.test_format = test.TestSuiteTest()
config.test_format = litsupport.test.TestSuiteTest()
config.suffixes = ['.test']
config.excludes = ['ABI-Testsuite']
config.remote_flags = ""
Expand All @@ -23,17 +36,6 @@ if previous_results_file:
else:
config.previous_results = None

# Setup logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_log = logging.FileHandler("%s/test.log" % config.test_exec_root,
mode="w")
file_log.setLevel(logging.DEBUG)
logger.addHandler(file_log)
console_log = logging.StreamHandler()
console_log.setLevel(logging.WARNING)
logger.addHandler(console_log)

# Pass on some options to context object:
config.perf_profile_events = "cycles,cache-misses,branch-misses,instructions"
if lit_config.params.get('perf_profile_events'):
Expand All @@ -42,4 +44,3 @@ if lit_config.params.get('perf_profile_events'):
# Find and initialize lit modules.
if lit_config.params.get('profile') == 'perf':
config.test_modules += ['perf']
test.load_modules(config.test_modules)
1 change: 0 additions & 1 deletion litsupport-tests/run/check/test.log

This file was deleted.

2 changes: 2 additions & 0 deletions litsupport/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ code; typical examples are:
`-fprofile-instr-generate` and enables the `profilegen` module that runs
`llvm-profdata` after running the benchmarks.

Available modules are found in the `litsupport/modules` directory.

Developing New Modules
----------------------

Expand Down
16 changes: 16 additions & 0 deletions litsupport/modules/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import importlib
import logging
import pkgutil

# Load all modules
modules = dict()
for importer, modname, ispkg in pkgutil.walk_packages(path=__path__,
prefix=__name__+'.'):
module = importlib.import_module(modname)
if not hasattr(module, 'mutatePlan'):
logging.error('Skipping %s: No mutatePlan function' % modname)
continue
assert modname.startswith('litsupport.modules.')
shortname = modname[len('litsupport.modules.'):]
modules[shortname] = module
logging.info("Loaded test module %s" % module.__file__)
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Test module to collect compile time metrics. This just finds and summarizes
the *.time files generated by the build."""
from litsupport import timeit
from litsupport.modules import timeit
import os


Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion litsupport/perf.py → litsupport/modules/perf.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
perf tool."""
from litsupport import shellcommand
from litsupport import testplan
from litsupport import run_under
from litsupport.modules import run_under


def _mutateCommandLine(context, commandline):
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
10 changes: 5 additions & 5 deletions litsupport/run_under.py → litsupport/modules/run_under.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@ def _mutateCommandLine(context, commandline):
cmd = shellcommand.parse(commandline)
run_under_cmd = shellcommand.parse(context.config.run_under)

if (run_under_cmd.stdin is not None or
run_under_cmd.stdout is not None or
run_under_cmd.stderr is not None or
run_under_cmd.workdir is not None or
run_under_cmd.envvars):
if run_under_cmd.stdin is not None or \
run_under_cmd.stdout is not None or \
run_under_cmd.stderr is not None or \
run_under_cmd.workdir is not None or \
run_under_cmd.envvars:
raise Exception("invalid run_under argument!")

cmd.wrap(run_under_cmd.executable, run_under_cmd.arguments)
Expand Down
File renamed without changes.
File renamed without changes.
59 changes: 17 additions & 42 deletions litsupport/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,32 +2,20 @@
Main integration for llvm-lit: This defines a lit test format.
Also contains logic to load benchmark modules.
"""
import importlib
import lit
import lit.TestRunner
import lit.util
import lit.formats
import litsupport.modules
import litsupport.modules.hash
import litsupport.testfile
import litsupport.testplan
import logging
import os
from lit.formats import ShTest
from lit.TestRunner import getTempPaths
from lit import Test
from lit.util import to_bytes, to_string

from litsupport import codesize
from litsupport import compiletime
from litsupport import hash
from litsupport import perf
from litsupport import profilegen
from litsupport import remote
from litsupport import run
from litsupport import run_under
from litsupport import testfile
from litsupport import testplan
from litsupport import timeit


SKIPPED = lit.Test.ResultCode('SKIPPED', False)
NOEXE = lit.Test.ResultCode('NOEXE', True)
modules = []


class TestContext:
Expand All @@ -43,23 +31,7 @@ def __init__(self, test, litConfig, tmpDir, tmpBase):
self.tmpBase = tmpBase


def load_modules(test_modules):
for name in test_modules:
modulename = 'litsupport.%s' % name
try:
module = importlib.import_module(modulename)
except ImportError as e:
logging.error("Could not import module '%s'" % modulename)
sys.exit(1)
if not hasattr(module, 'mutatePlan'):
logging.error("Invalid test module '%s': No mutatePlan() function."
% modulename)
sys.exit(1)
logging.info("Loaded test module %s" % module.__file__)
modules.append(module)


class TestSuiteTest(ShTest):
class TestSuiteTest(lit.formats.ShTest):
def __init__(self):
super(TestSuiteTest, self).__init__()

Expand All @@ -71,11 +43,11 @@ def execute(self, test, litConfig):
return lit.Test.Result(Test.PASS)

# Parse .test file and initialize context
tmpDir, tmpBase = getTempPaths(test)
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
lit.util.mkdir_p(os.path.dirname(tmpBase))
context = TestContext(test, litConfig, tmpDir, tmpBase)
testfile.parse(context, test.getSourcePath())
plan = testplan.TestPlan()
litsupport.testfile.parse(context, test.getSourcePath())
plan = litsupport.testplan.TestPlan()

# Report missing test executables.
if not os.path.exists(context.executable):
Expand All @@ -84,19 +56,22 @@ def execute(self, test, litConfig):

# Skip unchanged tests
if config.previous_results:
hash.compute(context)
if hash.same_as_previous(context):
litsupport.modules.hash.compute(context)
if litsupport.modules.hash.same_as_previous(context):
result = lit.Test.Result(
SKIPPED, 'Executable identical to previous run')
val = lit.Test.toMetricValue(context.executable_hash)
result.addMetric('hash', val)
return result

# Let test modules modify the test plan.
for module in modules:
for modulename in config.test_modules:
module = litsupport.modules.modules.get(modulename)
if module is None:
raise Exception("Unknown testmodule '%s'" % modulename)
module.mutatePlan(context, plan)

# Execute Test plan
result = testplan.executePlanTestResult(context, plan)
result = litsupport.testplan.executePlanTestResult(context, plan)

return result

0 comments on commit fab1135

Please sign in to comment.