Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

replace flaky test retry utility #790

Merged
merged 8 commits into from
Jan 29, 2025
23 changes: 14 additions & 9 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -131,15 +131,20 @@ jobs:
- name: Run Tests
run: make stop ${{ matrix.test-case }}

# disable due to confusing and unhelpful messages created by codecov
# (see )https://github.com/codecov/feedback/issues/304#issuecomment-2492675117)
# - name: Upload test results to Codecov
# if: ${{ !cancelled() && success() && matrix.test-case == 'test-coverage-only' }}
# uses: codecov/test-results-action@v1
# with:
# files: reports/coverage-junit.xml,!./cache
# flags: ${{ matrix.python-version }}
# token: ${{ secrets.CODECOV_TOKEN }}
# manually invoke reporting in case of test failure to still generate them
# otherwise, they would have been generated automatically following the successful coverage run
- name: Handle Failed Coverage Report
if: ${{ failure() && matrix.test-case == 'test-coverage-only' }}
run: make coverage-reports
# flaky test analysis, which includes failed tests if applicable
- name: Upload test results to Codecov
if: ${{ !cancelled() && matrix.test-case == 'test-coverage-only' }}
uses: codecov/test-results-action@v1
with:
files: reports/coverage-junit.xml,!./cache
flags: ${{ matrix.python-version }}
token: ${{ secrets.CODECOV_TOKEN }}
# coverage test analysis
- name: Upload coverage report
uses: codecov/codecov-action@v2
if: ${{ success() && matrix.test-case == 'test-coverage-only' }}
Expand Down
3 changes: 2 additions & 1 deletion CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ Changes:

Fixes:
------
- No change.
- Fix ``weaver.cli.RequestAuthHandler`` and its derived classes erroneously invoking ``request_auth`` method when
both the ``url`` and ``token`` are omitted, leading to invalid ``requests`` call under ``weaver.utils.request_extra``.

.. _changes_6.1.1:

Expand Down
34 changes: 23 additions & 11 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -381,6 +381,7 @@ ifeq ($(filter $(TEST_VERBOSITY),"--capture"),)
TEST_VERBOSITY := $(TEST_VERBOSITY) --capture tee-sys
endif
endif
TEST_XARGS ?=

# autogen tests variants with pre-install of dependencies using the '-only' target references
TESTS := unit func cli workflow online offline no-tb14 spec coverage
Expand All @@ -397,56 +398,56 @@ test-all: install-dev test-only ## run all tests (including long running tests)
.PHONY: test-only
test-only: mkdir-reports ## run all tests but without prior validation of installed dependencies
@echo "Running all tests (including slow and online tests)..."
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
--junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-unit-only
test-unit-only: mkdir-reports ## run unit tests (skip long running and online tests)
@echo "Running unit tests (skip slow and online tests)..."
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
-m "not slow and not online and not functional" --junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-func-only
test-func-only: mkdir-reports ## run functional tests (online and usage specific)
@echo "Running functional tests..."
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
-m "functional" --junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-cli-only
test-cli-only: mkdir-reports ## run WeaverClient and CLI tests
@echo "Running CLI tests..."
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
-m "cli" --junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-workflow-only
test-workflow-only: mkdir-reports ## run EMS workflow End-2-End tests
@echo "Running workflow tests..."
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
-m "workflow" --junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-online-only
test-online-only: mkdir-reports ## run online tests (running instance required)
@echo "Running online tests (running instance required)..."
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
-m "online" --junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-offline-only
test-offline-only: mkdir-reports ## run offline tests (not marked as online)
@echo "Running offline tests (not marked as online)..."
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
-m "not online" --junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-no-tb14-only
test-no-tb14-only: mkdir-reports ## run all tests except ones marked for 'Testbed-14'
@echo "Running all tests except ones marked for 'Testbed-14'..."
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
-m "not testbed14" --junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-spec-only
test-spec-only: mkdir-reports ## run tests with custom specification (pytest format) [make SPEC='<spec>' test-spec]
@echo "Running custom tests from input specification..."
@[ "${SPEC}" ] || ( echo ">> 'SPEC' is not set"; exit 1 )
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) \
@bash -c '$(CONDA_CMD) pytest tests $(TEST_VERBOSITY) $(TEST_XARGS) \
-k "${SPEC}" --junitxml "$(REPORTS_DIR)/test-results.xml"'

.PHONY: test-smoke
Expand All @@ -455,11 +456,22 @@ test-smoke: docker-test ## alias to 'docker-test' executing smoke test of bu
.PHONY: test-docker
test-docker: docker-test ## alias to 'docker-test' execution smoke test of built docker images

# NOTE:
# if any test fails during coverage run, pytest exit code will be propagated to allow reporting of the failure
# this will cause coverage analysis reporting to be skipped from early exit from the failure
# if coverage reporting is still needed although failed tests occurred, call 'coverage-reports' target separately
.PHONY: test-coverage-only
test-coverage-only: mkdir-reports ## run all tests using coverage analysis
test-coverage-only: mkdir-reports coverage-run coverage-reports ## run all tests with coverage analysis and reports

.PHONY: coverage-run
coverage-run: mkdir-reports ## run all tests using coverage analysis
@echo "Running coverage analysis..."
@bash -c '$(CONDA_CMD) coverage run --rcfile="$(APP_ROOT)/setup.cfg" \
"$$(which pytest)" "$(APP_ROOT)/tests" --junitxml="$(REPORTS_DIR)/coverage-junit.xml" || true'
"$$(which pytest)" "$(APP_ROOT)/tests" $(TEST_XARGS) --junitxml="$(REPORTS_DIR)/coverage-junit.xml"'

.PHONY: coverage-reports
coverage-reports: mkdir-reports ## generate coverage reports
@echo "Generate coverage reports..."
@bash -c '$(CONDA_CMD) coverage xml --rcfile="$(APP_ROOT)/setup.cfg" -i -o "$(REPORTS_DIR)/coverage.xml"'
@bash -c '$(CONDA_CMD) coverage report --rcfile="$(APP_ROOT)/setup.cfg" -i -m'
@bash -c '$(CONDA_CMD) coverage html --rcfile="$(APP_ROOT)/setup.cfg" -d "$(REPORTS_DIR)/coverage"'
Expand Down
1 change: 1 addition & 0 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -460,6 +460,7 @@ def doc_redirect_include(file_path):
"data-types", # https://spec.openapis.org/oas/v3.1.0
"defusedxmllxml", # https://github.com/tiran/defusedxml/tree/main
"ncml-to-stac", # https://github.com/crim-ca/ncml2stac/tree/main#ncml-to-stac
"issuecomment-[0-9]+", # links to specific GitHub comments
]
linkcheck_request_headers = {
"https://github.com/": {
Expand Down
11 changes: 7 additions & 4 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@
# pylint>=2.5 requires astroid>=2.4
# install/update sometime fails randomly, so enforce it
astroid
bandit
# pin bandit, many issues in latest versions
# - https://github.com/PyCQA/bandit/issues/1226
# - https://github.com/PyCQA/bandit/issues/1227
bandit<1.8
bump2version
codacy-coverage
coverage
Expand All @@ -21,11 +24,11 @@ mypy
parameterized
path!=16.12.0,!=17.0.0 # patch pytest-shutil (https://github.com/man-group/pytest-plugins/issues/224)
pluggy>=0.7
# FIXME: bad interpolation of 'setup.cfg' for pytest 'log_format' (https://github.com/pytest-dev/pytest/issues/10019)
pytest<7
pytest
pytest-httpserver>=1.0.7 # support werkzeug>=3
pytest-server-fixtures
pytest-rerunfailures
#pytest-rerunfailures
pytest-retry
pydocstyle
# FIXME: pylint-quotes failing with pylint==3 (https://github.com/edaniszewski/pylint-quotes/issues/29)
# FIXME: use temporary unofficial version working with pylint>3 (https://github.com/edaniszewski/pylint-quotes/pull/30)
Expand Down
10 changes: 8 additions & 2 deletions tests/functional/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import mock
import pytest
import responses
import yaml
from owslib.ows import DEFAULT_OWS_NAMESPACE
from owslib.wps import WPSException
Expand Down Expand Up @@ -562,7 +563,7 @@ def test_execute_inputs_invalid(self):
]:
self.run_execute_inputs_schema_variant(invalid_inputs_schema, expect_success=False)

@pytest.mark.flaky(reruns=2, reruns_delay=1)
@pytest.mark.flaky(retries=2, delay=1)
def test_execute_manual_monitor_status_and_download_results(self):
"""
Test a typical case of :term:`Job` execution, result retrieval and download, but with manual monitoring.
Expand Down Expand Up @@ -1097,7 +1098,7 @@ def add_docker_pull_ref(cwl, ref):
cwl["requirements"][CWL_REQUIREMENT_APP_DOCKER] = {"dockerPull": ref}
return cwl

@pytest.mark.flaky(reruns=2, reruns_delay=1)
@pytest.mark.flaky(retries=2, delay=1)
def test_deploy_docker_auth_username_password_valid(self):
"""
Test that username and password arguments can be provided simultaneously for docker login.
Expand Down Expand Up @@ -1869,13 +1870,18 @@ def test_execute_subscriber_options(self):
"""
proc = self.test_process["Echo"]
with contextlib.ExitStack() as stack_exec:
req_mock = stack_exec.enter_context(responses.RequestsMock())
for mock_exec_proc in mocked_execute_celery():
stack_exec.enter_context(mock_exec_proc)

test_email_started = "[email protected]"
test_email_failed = "[email protected]"
test_callback_started = "https://server.com/started"
test_callback_success = "https://server.com/success"

req_mock.add_callback(responses.POST, test_callback_started, callback=lambda _: (200, {}, ""))
req_mock.add_callback(responses.POST, test_callback_success, callback=lambda _: (200, {}, ""))

lines = mocked_sub_requests(
self.app, run_command,
[
Expand Down
2 changes: 1 addition & 1 deletion tests/functional/test_wps_package.py
Original file line number Diff line number Diff line change
Expand Up @@ -2671,7 +2671,7 @@ def test_execute_with_browsable_directory(self):
assert all(file.startswith(cwl_stage_dir) for file in output_listing)
assert all(any(file.endswith(dir_file) for file in output_listing) for dir_file in expect_http_files)

@pytest.mark.flaky(reruns=2, reruns_delay=1)
@pytest.mark.flaky(retries=2, delay=1)
def test_execute_with_json_listing_directory(self):
"""
Test that HTTP returning JSON list of directory contents retrieves children files for the process.
Expand Down
2 changes: 1 addition & 1 deletion tests/processes/test_wps_package.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def __init__(self, shell_command, arguments=None, with_message_input=True):
super(MockProcess, self).__init__(body)


@pytest.mark.flaky(reruns=2, reruns_delay=1)
@pytest.mark.flaky(retries=2, delay=1)
def test_stdout_stderr_logging_for_commandline_tool_success(caplog):
"""
Execute a process and assert that stdout is correctly logged to log file upon successful process execution.
Expand Down
33 changes: 29 additions & 4 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
WeaverClient,
main as weaver_cli
)
from weaver.exceptions import AuthenticationError
from weaver.formats import ContentEncoding, ContentType


Expand Down Expand Up @@ -408,7 +409,7 @@ def test_auth_handler_basic():

def test_auth_handler_bearer():
req = WebTestRequest({})
auth = BearerAuthHandler(identity=str(uuid.uuid4()))
auth = BearerAuthHandler(identity=str(uuid.uuid4()), url="https://example.com")
token = str(uuid.uuid4())
with mock.patch(
"requests.Session.request",
Expand All @@ -435,7 +436,7 @@ def test_auth_handler_bearer_explicit_token_matches_request_token():
req_request_token = WebTestRequest({})
token = str(uuid.uuid4())
auth_explicit_token = BearerAuthHandler(token=token)
auth_request_token = BearerAuthHandler(identity=str(uuid.uuid4()))
auth_request_token = BearerAuthHandler(identity=str(uuid.uuid4()), url="https://example.com")
with mock.patch(
"requests.Session.request",
side_effect=lambda *_, **__: mocked_auth_response("access_token", token)
Expand All @@ -450,7 +451,7 @@ def test_auth_handler_bearer_explicit_token_matches_request_token():

def test_auth_handler_cookie():
req = WebTestRequest({})
auth = CookieAuthHandler(identity=str(uuid.uuid4()))
auth = CookieAuthHandler(identity=str(uuid.uuid4()), url="https://example.com")
token = str(uuid.uuid4())
with mock.patch(
"requests.Session.request",
Expand Down Expand Up @@ -506,7 +507,7 @@ def test_auth_handler_cookie_explicit_token_matches_request_token():
req_request_token = WebTestRequest({})
token = str(uuid.uuid4())
auth_explicit_token = CookieAuthHandler(token=token)
auth_request_token = CookieAuthHandler(identity=str(uuid.uuid4()))
auth_request_token = CookieAuthHandler(identity=str(uuid.uuid4()), url="https://example.com")
with mock.patch(
"requests.Session.request",
side_effect=lambda *_, **__: mocked_auth_response("access_token", token)
Expand All @@ -519,6 +520,30 @@ def test_auth_handler_cookie_explicit_token_matches_request_token():
assert resp_explicit_token.headers["Cookie"] == resp_request_token.headers["Cookie"]


def test_auth_request_handler_no_url_or_token_init():
with pytest.raises(AuthenticationError):
BearerAuthHandler(identity=str(uuid.uuid4()))

try:
BearerAuthHandler(token=str(uuid.uuid4())) # OK
BearerAuthHandler(url="https://example.com") # OK
except Exception as exc:
pytest.fail(msg=f"Expected no init error from valid combinations. Got [{exc}]")


def test_auth_request_handler_no_url_ignored_request():
req = WebTestRequest({})
auth = BearerAuthHandler(
identity=str(uuid.uuid4()),
url="https://example.com", # URL must be passed to avoid error
)
auth.url = None # reset after init check
with mock.patch("requests.Session.request") as mock_request:
resp = auth(req) # type: ignore
mock_request.assert_not_called()
assert not resp.headers, "No headers should have been added since URL could not be resolved."


def test_upload_file_not_found():
with tempfile.NamedTemporaryFile() as tmp_file_deleted:
pass # delete on close
Expand Down
2 changes: 1 addition & 1 deletion tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -655,7 +655,7 @@ def mock_sleep(delay):
assert all(called == expect for called, expect in zip(sleep_counter["called_with"], intervals))


@pytest.mark.flaky(reruns=2, reruns_delay=1)
@pytest.mark.flaky(retries=2, delay=1)
def test_request_extra_zero_values():
"""
Test that zero-value ``retries`` and ``backoff`` are not ignored.
Expand Down
30 changes: 16 additions & 14 deletions tests/wps_restapi/test_status_codes.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import uuid

import pytest
from parameterized import parameterized

from tests.utils import get_test_weaver_app, setup_config_with_mongodb
from weaver.formats import ContentType
Expand Down Expand Up @@ -45,23 +46,24 @@ class StatusCodeTestCase(unittest.TestCase):

headers = {"Accept": ContentType.APP_JSON}

def setUp(self):
@classmethod
def setUpClass(cls):
config = setup_config_with_mongodb()
self.testapp = get_test_weaver_app(config)
cls.testapp = get_test_weaver_app(config)

def test_200(self):
for uri in TEST_PUBLIC_ROUTES:
resp = self.testapp.get(uri, expect_errors=True, headers=self.headers)
self.assertEqual(200, resp.status_code, f"route {uri} did not return 200")
@parameterized.expand(TEST_PUBLIC_ROUTES)
def test_200(self, uri):
resp = self.testapp.get(uri, expect_errors=True, headers=self.headers)
self.assertEqual(200, resp.status_code, f"route {uri} did not return 200")

@pytest.mark.xfail(reason="Not working if not behind proxy. Protected implementation to be done.")
@parameterized.expand(TEST_FORBIDDEN_ROUTES)
@unittest.expectedFailure
def test_401(self):
for uri in TEST_FORBIDDEN_ROUTES:
resp = self.testapp.get(uri, expect_errors=True, headers=self.headers)
self.assertEqual(401, resp.status_code, f"route {uri} did not return 401")
def test_401(self, uri):
resp = self.testapp.get(uri, expect_errors=True, headers=self.headers)
self.assertEqual(401, resp.status_code, f"route {uri} did not return 401")

def test_404(self):
for uri in TEST_NOTFOUND_ROUTES:
resp = self.testapp.get(uri, expect_errors=True, headers=self.headers)
self.assertEqual(404, resp.status_code, f"route {uri} did not return 404")
@parameterized.expand(TEST_NOTFOUND_ROUTES)
def test_404(self, uri):
resp = self.testapp.get(uri, expect_errors=True, headers=self.headers)
self.assertEqual(404, resp.status_code, f"route {uri} did not return 404")
Loading
Loading