We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
inherits = ".."
1 parent 69f3b19 commit 05c9ae8Copy full SHA for 05c9ae8
pyperformance/tests/__init__.py
@@ -1,8 +1,27 @@
1
import contextlib
2
import errno
3
import os
4
+import subprocess
5
+import sys
6
import tempfile
7
8
+DATA_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data'))
9
+
10
11
+def run_cmd(cmd):
12
+ print("Execute: %s" % ' '.join(cmd))
13
+ proc = subprocess.Popen(cmd)
14
+ try:
15
+ proc.wait()
16
+ except: # noqa
17
+ proc.kill()
18
19
+ raise
20
21
+ exitcode = proc.returncode
22
+ if exitcode:
23
+ sys.exit(exitcode)
24
25
26
@contextlib.contextmanager
27
def temporary_file(**kwargs):
pyperformance/tests/data/user_defined_bm/MANIFEST
@@ -0,0 +1,4 @@
+[benchmarks]
+name metafile
+1 <local>
pyperformance/tests/data/user_defined_bm/base.toml
@@ -0,0 +1,3 @@
+[project]
+dynamic = ["name"]
+version = "1.0.0"
pyperformance/tests/data/user_defined_bm/bm_1/pyproject.toml
@@ -0,0 +1,11 @@
+name = "test_bm_1"
+requires-python = ">=3.8"
+dependencies = ["pyperf"]
+urls = { repository = "https://github.com/python/pyperformance" }
+dynamic = ["version"]
+[tool.pyperformance]
+name = "1"
+tags = "test"
+inherits = ".."
pyperformance/tests/test_compare.py
@@ -7,24 +7,7 @@
import unittest
from pyperformance import tests
-
-DATA_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data'))
-def run_cmd(cmd):
- print("Execute: %s" % ' '.join(cmd))
- proc = subprocess.Popen(cmd)
- try:
- proc.wait()
- except: # noqa
- proc.kill()
- raise
- exitcode = proc.returncode
- if exitcode:
- sys.exit(exitcode)
+from pyperformance.tests import run_cmd, DATA_DIR
28
29
30
class CompareTests(unittest.TestCase):
pyperformance/tests/test_user_defined_bm.py
@@ -0,0 +1,18 @@
+import os.path
+import unittest
+from pyperformance.tests import DATA_DIR, run_cmd
+USER_DEFINED_MANIFEST = os.path.join(DATA_DIR, 'user_defined_bm', 'MANIFEST')
+class TestBM(unittest.TestCase):
+ def test_user_defined_bm(self):
+ cmd = [sys.executable, '-m', 'pyperformance', 'run', f'--manifest={USER_DEFINED_MANIFEST}']
+ run_cmd(cmd)
+if __name__ == "__main__":
+ unittest.main()
runtests.py
@@ -52,8 +52,8 @@ def run_bench(*cmd):
52
run_bench(python, '-u', script, 'venv', 'create')
53
54
for filename in (
55
- os.path.join('pyperformance', 'tests', 'data', 'py36.json'),
56
- os.path.join('pyperformance', 'tests', 'data', 'mem1.json'),
+ os.path.join('pyperformance', 'tests', 'data', 'py36.json'),
+ os.path.join('pyperformance', 'tests', 'data', 'mem1.json'),
57
):
58
run_cmd((python, script, 'show', filename))
59
@@ -78,7 +78,8 @@ def run_bench(*cmd):
78
def main():
79
# Unit tests
80
cmd = [sys.executable, '-u',
81
- os.path.join('pyperformance', 'tests', 'test_compare.py')]
+ os.path.join('pyperformance', 'tests', 'test_compare.py'),
82
+ os.path.join('pyperformance', 'tests', 'test_user_defined_bm.py')]
83
run_cmd(cmd)
84
85
# Functional tests
0 commit comments