Skip to content

Commit 316258f

Browse files
committed
Add Evergreen automation for perf tests
1 parent f7e7345 commit 316258f

File tree

6 files changed

+188
-3
lines changed

6 files changed

+188
-3
lines changed

.evergreen/config.yml

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,62 @@ functions:
4848
args:
4949
- ./.evergreen/run-tests.sh
5050

51+
"run performance tests":
52+
- command: subprocess.exec
53+
type: test
54+
params:
55+
binary: bash
56+
working_dir: "src"
57+
include_expansions_in_env: [ "DRIVERS_TOOLS", "MONGODB_URI" ]
58+
args:
59+
- ./.evergreen/run-perf-tests.sh
60+
61+
attach benchmark test results:
62+
- command: attach.results
63+
params:
64+
file_location: src/report.json
65+
66+
send dashboard data:
67+
- command: subprocess.exec
68+
params:
69+
binary: bash
70+
args:
71+
- .evergreen/perf-submission-setup.sh
72+
working_dir: src
73+
include_expansions_in_env:
74+
- requester
75+
- revision_order_id
76+
- project_id
77+
- version_id
78+
- build_variant
79+
- parsed_order_id
80+
- task_name
81+
- task_id
82+
- execution
83+
- is_mainline
84+
type: test
85+
- command: expansions.update
86+
params:
87+
file: src/expansion.yml
88+
- command: subprocess.exec
89+
params:
90+
binary: bash
91+
args:
92+
- .evergreen/perf-submission.sh
93+
working_dir: src
94+
include_expansions_in_env:
95+
- requester
96+
- revision_order_id
97+
- project_id
98+
- version_id
99+
- build_variant
100+
- parsed_order_id
101+
- task_name
102+
- task_id
103+
- execution
104+
- is_mainline
105+
type: test
106+
51107
"teardown":
52108
- command: subprocess.exec
53109
params:
@@ -67,6 +123,12 @@ tasks:
67123
commands:
68124
- func: "run unit tests"
69125

126+
- name: perf-tests
127+
commands:
128+
- func: "run performance tests"
129+
- func: "attach benchmark test results"
130+
- func: "send dashboard data"
131+
70132
buildvariants:
71133
- name: tests-6-noauth-nossl
72134
display_name: Run Tests 6.0 NoAuth NoSSL
@@ -111,3 +173,11 @@ buildvariants:
111173
SSL: "ssl"
112174
tasks:
113175
- name: run-tests
176+
177+
- name: performance-benchmarks
178+
display_name: Performance Benchmarks
179+
run_on:
180+
- rhel90-dbx-perf-large
181+
batchtime: 10080
182+
tasks:
183+
- name: perf-tests

.evergreen/perf-submission-setup.sh

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
#!/bin/bash
2+
# We use the requester expansion to determine whether the data is from a mainline evergreen run or not
3+
4+
set -eu
5+
6+
# shellcheck disable=SC2154
7+
if [ "${requester}" == "commit" ]; then
8+
echo "is_mainline: true" >> expansion.yml
9+
else
10+
echo "is_mainline: false" >> expansion.yml
11+
fi
12+
13+
# We parse the username out of the order_id as patches append that in and SPS does not need that information
14+
# shellcheck disable=SC2154
15+
echo "parsed_order_id: $(echo "${revision_order_id}" | awk -F'_' '{print $NF}')" >> expansion.yml

.evergreen/perf-submission.sh

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
#!/bin/bash
2+
# We use the requester expansion to determine whether the data is from a mainline evergreen run or not
3+
4+
set -eu
5+
6+
# Submit the performance data to the SPS endpoint
7+
# shellcheck disable=SC2154
8+
response=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X 'POST' \
9+
"https://performance-monitoring-api.corp.mongodb.com/raw_perf_results/cedar_report?project=${project_id}&version=${version_id}&variant=${build_variant}&order=${parsed_order_id}&task_name=${task_name}&task_id=${task_id}&execution=${execution}&mainline=${is_mainline}" \
10+
-H 'accept: application/json' \
11+
-H 'Content-Type: application/json' \
12+
-d @results.json)
13+
14+
http_status=$(echo "$response" | grep "HTTP_STATUS" | awk -F':' '{print $2}')
15+
response_body=$(echo "$response" | sed '/HTTP_STATUS/d')
16+
17+
# We want to throw an error if the data was not successfully submitted
18+
if [ "$http_status" -ne 200 ]; then
19+
echo "Error: Received HTTP status $http_status"
20+
echo "Response Body: $response_body"
21+
exit 1
22+
fi
23+
24+
echo "Response Body: $response_body"
25+
echo "HTTP Status: $http_status"

.evergreen/run-perf-tests.sh

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
#!/usr/bin/bash
2+
3+
set -eux
4+
5+
export OUTPUT_FILE="results.json"
6+
7+
# Install django-mongodb-backend
8+
/opt/python/3.10/bin/python3 -m venv venv
9+
. venv/bin/activate
10+
python -m pip install -U pip
11+
pip install -e .
12+
13+
# Install django and test dependencies
14+
git clone --branch mongodb-5.2.x https://github.com/mongodb-forks/django django_repo
15+
pushd django_repo/tests/
16+
pip install -e ..
17+
pip install -r requirements/py3.txt
18+
popd
19+
20+
python run_perf_test.py

.evergreen/run_perf_test.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import json
2+
import logging
3+
import os
4+
import shlex
5+
import subprocess
6+
import sys
7+
from datetime import datetime
8+
9+
LOGGER = logging.getLogger("test")
10+
logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s")
11+
OUTPUT_FILE = os.environ.get("OUTPUT_FILE")
12+
13+
14+
def handle_perf(start_time: datetime):
15+
end_time = datetime.now()
16+
elapsed_secs = (end_time - start_time).total_seconds()
17+
with open(OUTPUT_FILE) as fid: # noqa: PTH123
18+
results = json.load(fid)
19+
LOGGER.info("results.json:\n%s", json.dumps(results, indent=2))
20+
21+
results = {
22+
"status": "PASS",
23+
"exit_code": 0,
24+
"test_file": "BenchMarkTests",
25+
"start": int(start_time.timestamp()),
26+
"end": int(end_time.timestamp()),
27+
"elapsed": elapsed_secs,
28+
}
29+
report = {"failures": 0, "results": [results]}
30+
31+
LOGGER.info("report.json\n%s", json.dumps(report, indent=2))
32+
33+
with open("report.json", "w", newline="\n") as fid: # noqa: PTH123
34+
json.dump(report, fid)
35+
36+
37+
def run_command(cmd: str | list[str], **kwargs) -> None:
38+
if isinstance(cmd, list):
39+
cmd = " ".join(cmd)
40+
LOGGER.info("Running command '%s'...", cmd)
41+
kwargs.setdefault("check", True)
42+
try:
43+
subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603
44+
except subprocess.CalledProcessError as e:
45+
LOGGER.error(e.output)
46+
LOGGER.error(str(e))
47+
sys.exit(e.returncode)
48+
LOGGER.info("Running command '%s'... done.", cmd)
49+
50+
51+
os.chdir("tests/performance")
52+
53+
start_time = datetime.now()
54+
run_command(["python manage.py test"])
55+
handle_perf(start_time)

tests/performance/perftest/tests.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@
2121
OUTPUT_FILE = os.environ.get("OUTPUT_FILE")
2222

2323
NUM_ITERATIONS = 10
24-
MIN_ITERATION_TIME = 30
25-
MAX_ITERATION_TIME = 60
26-
NUM_DOCS = 10000
24+
MIN_ITERATION_TIME = 5
25+
MAX_ITERATION_TIME = 10
26+
NUM_DOCS = 10
2727

2828
result_data: list = []
2929

0 commit comments

Comments
 (0)