From 2acb1dfe9982efc346bab50f53c46b1900b9070a Mon Sep 17 00:00:00 2001 From: Duncan Watson Date: Fri, 24 Nov 2023 13:45:46 +0000 Subject: [PATCH 1/5] EES-4117 - enabling UI test retries in the pipeline. Updated run_tests.py script to take a "rerun-attempts" argument and enable X number of failing suite retries before stopping. Updated pipeline to use new "rerun-attempts" option. Updated the tracking of failing test suites to be file-based rather than variable-based to allow for reruns using consistent set of failing suites, and fed this into Slack reports also. Updated the use of SeleniumLibrary and element_finder to refetch to allow fresh values to be obtained in subsequent reruns. --- .gitignore | 1 + azure-pipelines-ui-tests.dfe.yml | 40 +-- tests/robot-tests/run_tests.py | 320 ++++++++++-------- .../robot-tests/scripts/run_tests_pipeline.py | 13 +- .../robot-tests/tests/libs/admin-utilities.py | 26 +- tests/robot-tests/tests/libs/fail_fast.py | 26 +- .../robot-tests/tests/libs/file_operations.py | 7 +- .../tests/libs/public-utilities.py | 40 ++- .../tests/libs/selenium_elements.py | 14 + tests/robot-tests/tests/libs/slack.py | 43 +-- tests/robot-tests/tests/libs/utilities.py | 82 ++--- tests/robot-tests/tests/libs/visual.py | 31 +- 12 files changed, 329 insertions(+), 314 deletions(-) create mode 100644 tests/robot-tests/tests/libs/selenium_elements.py diff --git a/.gitignore b/.gitignore index 070be9a1033..a8b41c3524f 100644 --- a/.gitignore +++ b/.gitignore @@ -40,6 +40,7 @@ src/explore-education-statistics-frontend/*.xml !/tests/robot-tests/.env.example /tests/robot-tests/IDENTITY_* /tests/robot-tests/.pabotsuitenames +/tests/robot-tests/.failing_suites /tests/robot-tests/geckodriver.log /tests/robot-tests/debug.log **/*/jwt diff --git a/azure-pipelines-ui-tests.dfe.yml b/azure-pipelines-ui-tests.dfe.yml index b3bf0489a31..8c617990c29 100644 --- a/azure-pipelines-ui-tests.dfe.yml +++ b/azure-pipelines-ui-tests.dfe.yml @@ -32,7 +32,6 @@ jobs: - task: UsePythonVersion@0 displayName: Use Python 3.10 - # retryCountOnTaskFailure: 2 timeoutInMinutes: 5 inputs: versionSpec: 3.10 @@ -49,19 +48,12 @@ jobs: displayName: Public UI tests inputs: scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - arguments: --admin-pass "test" --analyst-pass "test" --expiredinvite-pass "test" --env "dev" --file "tests/general_public/" --processes 2 + arguments: --admin-pass "test" --analyst-pass "test" --expiredinvite-pass "test" --env "dev" --file "tests/general_public/" --processes 2 --rerun-attempts 3 workingDirectory: tests/robot-tests env: SLACK_BOT_TOKEN: $(ees-test-SLACK-BOT-TOKEN) SLACK_TEST_REPORT_WEBHOOK_URL: $(ees-test-SLACK-TEST-REPORT-WEBHOOK-URL) - #- task: PythonScript@0 - # displayName: Public UI tests - rerun failed suites - # condition: not(succeeded()) - # inputs: - # scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - # arguments: --admin-pass "test" --analyst-pass "test" --expiredinvite-pass "test" --env "dev" --file "tests/general_public/" --processes 2 --rerun-failed-suites - # workingDirectory: tests/robot-tests - task: PublishTestResults@2 displayName: Publish Test Results @@ -92,7 +84,6 @@ jobs: - task: UsePythonVersion@0 displayName: Use Python 3.10 - # retryCountOnTaskFailure: 2 timeoutInMinutes: 5 inputs: versionSpec: 3.10 @@ -110,20 +101,13 @@ jobs: condition: succeededOrFailed() inputs: scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public_2" --processes 2 + arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public_2" --processes 2 --rerun-attempts 3 # The magic incantation '"$(variable)"'was added by Mark to resolve an issue with Analyst password that contained ampersands. workingDirectory: tests/robot-tests env: SLACK_BOT_TOKEN: $(ees-test-SLACK-BOT-TOKEN) SLACK_TEST_REPORT_WEBHOOK_URL: $(ees-test-SLACK-TEST-REPORT-WEBHOOK-URL) - #- task: PythonScript@0 - # displayName: 'Publish release and amend UI tests - rerun failed suites' - # condition: not(succeeded()) - # inputs: - # scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - # arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public_2" --processes 2 --rerun-failed-suites - # workingDirectory: tests/robot-tests - task: PublishTestResults@2 displayName: Publish Test Results @@ -153,7 +137,6 @@ jobs: - task: UsePythonVersion@0 displayName: Use Python 3.10 - # retryCountOnTaskFailure: 2 timeoutInMinutes: 5 inputs: versionSpec: 3.10 @@ -171,19 +154,12 @@ jobs: condition: succeededOrFailed() inputs: scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin" --processes 2 + arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin" --processes 2 --rerun-attempts 3 workingDirectory: tests/robot-tests env: SLACK_BOT_TOKEN: $(ees-test-SLACK-BOT-TOKEN) SLACK_TEST_REPORT_WEBHOOK_URL: $(ees-test-SLACK-TEST-REPORT-WEBHOOK-URL) - #- task: PythonScript@0 - # displayName: Admin UI tests - rerun failed suites - # condition: not(succeeded()) - # inputs: - # scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - # arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin" --processes 2 --rerun-failed-suites - # workingDirectory: tests/robot-tests - task: PublishTestResults@2 displayName: Publish Test Results @@ -215,7 +191,6 @@ jobs: - task: UsePythonVersion@0 displayName: Use Python 3.10 - # retryCountOnTaskFailure: 2 timeoutInMinutes: 5 inputs: versionSpec: 3.10 @@ -232,19 +207,12 @@ jobs: displayName: Admin public UI tests inputs: scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public" --processes 2 + arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public" --processes 2 --rerun-attempts 3 workingDirectory: tests/robot-tests env: SLACK_BOT_TOKEN: $(ees-test-SLACK-BOT-TOKEN) SLACK_TEST_REPORT_WEBHOOK_URL: $(ees-test-SLACK-TEST-REPORT-WEBHOOK-URL) - #- task: PythonScript@0 - # displayName: Admin public UI tests - rerun failed suites - # condition: not(succeeded()) - # inputs: - # scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - # arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public" --processes 2 --rerun-failed-suites - # workingDirectory: tests/robot-tests - task: PublishTestResults@2 displayName: Publish Test Results diff --git a/tests/robot-tests/run_tests.py b/tests/robot-tests/run_tests.py index 51580bb6419..37618264570 100755 --- a/tests/robot-tests/run_tests.py +++ b/tests/robot-tests/run_tests.py @@ -17,15 +17,19 @@ import requests from dotenv import load_dotenv -from pabot.pabot import main as pabot_run_cli +from pabot.pabot import main_program as pabot_run_cli from robot import rebot_cli as robot_rebot_cli from robot import run_cli as robot_run_cli from scripts.get_webdriver import get_webdriver from tests.libs.create_emulator_release_files import ReleaseFilesGenerator +from tests.libs.fail_fast import failing_suites_filename from tests.libs.logger import get_logger from tests.libs.setup_auth_variables import setup_auth_variables from tests.libs.slack import SlackService +pabot_suite_names_filename = ".pabotsuitenames" +results_foldername = "test-results" + current_dir = Path(__file__).absolute().parent os.chdir(current_dir) @@ -117,6 +121,7 @@ action="store_true", help="rerun failed test suites and merge results into original run results", ) +parser.add_argument("--rerun-attempts", dest="rerun_attempts", type=int, default=0, help="Number of rerun attempts") parser.add_argument( "--print-keywords", dest="print_keywords", @@ -200,48 +205,6 @@ # Install chromedriver and add it to PATH get_webdriver(args.chromedriver_version or None) -output_file = "rerun.xml" if args.rerun_failed_tests or args.rerun_failed_suites else "output.xml" - - -# Set robotArgs -robotArgs = [ - "--outputdir", - "test-results/", - "--output", - output_file, - "--exclude", - "Failing", - "--exclude", - "UnderConstruction", - "--exclude", - "BootstrapData", - "--exclude", - "VisualTesting", -] - -robotArgs += ["-v", f"timeout:{os.getenv('TIMEOUT')}", "-v", f"implicit_wait:{os.getenv('IMPLICIT_WAIT')}"] - -if args.fail_fast: - robotArgs += ["--exitonfailure"] - -if args.rerun_failed_tests: - robotArgs += ["--rerunfailed", "test-results/output.xml"] - -if args.rerun_failed_suites: - robotArgs += ["--rerunfailedsuites", "test-results/output.xml"] - -if args.tags: - robotArgs += ["--include", args.tags] - -if args.print_keywords: - robotArgs += ["--listener", "listeners/KeywordListener.py"] - -if args.ci: - robotArgs += ["--xunit", "xunit"] - # NOTE(mark): Ensure secrets aren't visible in CI logs/reports - robotArgs += ["--removekeywords", "name:operatingsystem.environment variable should be set"] - robotArgs += ["--removekeywords", "name:common.user goes to url"] # To hide basic auth credentials - def admin_request(method, endpoint, body=None): assert method and endpoint @@ -301,10 +264,28 @@ def create_test_theme(): return admin_request("POST", "/api/themes", {"title": "Test theme", "summary": "Test theme summary"}) -def create_test_topic(): +def create_test_topic(run_id: str): + setup_authentication() + + if args.env in ["local", "dev"]: + get_themes_resp = get_test_themes() + test_theme_id = None + test_theme_name = "Test theme" + + for theme in get_themes_resp.json(): + if theme["title"] == test_theme_name: + test_theme_id = theme["id"] + break + if not test_theme_id: + create_theme_resp = create_test_theme() + test_theme_id = create_theme_resp.json()["id"] + + os.environ["TEST_THEME_NAME"] = test_theme_name + os.environ["TEST_THEME_ID"] = test_theme_id + assert os.getenv("TEST_THEME_ID") is not None - topic_name = f'UI test topic {os.getenv("RUN_IDENTIFIER")}' + topic_name = f"UI test topic {run_id}" resp = admin_request("POST", "/api/topics", {"title": topic_name, "themeId": os.getenv("TEST_THEME_ID")}) os.environ["TEST_TOPIC_NAME"] = topic_name @@ -338,118 +319,183 @@ def setup_authentication(clear_existing=False): ) -# Auth not required with general_public tests -if args.tests and "general_public" not in args.tests: - setup_authentication() +def create_robot_arguments(rerunning_failed: bool) -> []: + robot_args = [ + "--outputdir", + "test-results/", + "--exclude", + "Failing", + "--exclude", + "UnderConstruction", + "--exclude", + "BootstrapData", + "--exclude", + "VisualTesting", + ] + robot_args += ["-v", f"timeout:{os.getenv('TIMEOUT')}", "-v", f"implicit_wait:{os.getenv('IMPLICIT_WAIT')}"] + if args.fail_fast: + robot_args += ["--exitonfailure"] + if args.tags: + robot_args += ["--include", args.tags] + if args.print_keywords: + robot_args += ["--listener", "listeners/KeywordListener.py"] + if args.ci: + robot_args += ["--xunit", "xunit"] + # NOTE(mark): Ensure secrets aren't visible in CI logs/reports + robot_args += ["--removekeywords", "name:operatingsystem.environment variable should be set"] + robot_args += ["--removekeywords", "name:common.user goes to url"] # To hide basic auth credentials + if args.env == "local": + robot_args += ["--include", "Local"] + robot_args += ["--exclude", "NotAgainstLocal"] + # seed Azure storage emulator release files + generator = ReleaseFilesGenerator() + generator.create_public_release_files() + generator.create_private_release_files() + if args.env == "dev": + robot_args += ["--include", "Dev"] + robot_args += ["--exclude", "NotAgainstDev"] + if args.env == "test": + robot_args += ["--include", "Test", "--exclude", "NotAgainstTest", "--exclude", "AltersData"] + # fmt off + if args.env == "preprod": + robot_args += ["--include", "Preprod", "--exclude", "AltersData", "--exclude", "NotAgainstPreProd"] + # fmt on + if args.env == "prod": + robot_args += ["--include", "Prod", "--exclude", "AltersData", "--exclude", "NotAgainstProd"] + if args.visual: + robot_args += ["-v", "headless:0"] + else: + robot_args += ["-v", "headless:1"] + if os.getenv("RELEASE_COMPLETE_WAIT"): + robot_args += ["-v", f"release_complete_wait:{os.getenv('RELEASE_COMPLETE_WAIT')}"] + if os.getenv("FAIL_TEST_SUITES_FAST"): + robot_args += ["-v", f"FAIL_TEST_SUITES_FAST:{os.getenv('FAIL_TEST_SUITES_FAST')}"] + if args.prompt_to_continue: + robot_args += ["-v", "prompt_to_continue_on_failure:1"] + if args.debug: + robot_args += ["--loglevel", "DEBUG"] + robot_args += ["-v", "browser:" + args.browser] + # We want to add arguments on the first rerun attempt, but on subsequent attempts, we just want + # to change rerunfailedsuites xml file we use + if rerunning_failed: + robot_args += ["--rerunfailedsuites", f"test-results/output.xml", "--output", "rerun.xml"] + else: + robot_args += ["--output", "output.xml"] - # Tests that alter data only occur on local and dev environments - if args.env in ["local", "dev"]: - # add randomness to prevent multiple simultaneous run_tests.py generating the same runIdentifier value - randomStr = "".join([random.choice(string.ascii_lowercase + string.digits) for n in range(6)]) - runIdentifier = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S-" + randomStr) + robot_args += [args.tests] - os.environ["RUN_IDENTIFIER"] = runIdentifier - logger.info(f"Starting tests with RUN_IDENTIFIER: {runIdentifier}") + return robot_args - get_themes_resp = get_test_themes() - test_theme_id = None - test_theme_name = "Test theme" - for theme in get_themes_resp.json(): - if theme["title"] == test_theme_name: - test_theme_id = theme["id"] - break - if not test_theme_id: - create_theme_resp = create_test_theme() - test_theme_id = create_theme_resp.json()["id"] +def get_failing_suites() -> []: + if Path(failing_suites_filename).exists(): + return open(failing_suites_filename, "r").readlines() + return [] - os.environ["TEST_THEME_NAME"] = test_theme_name - os.environ["TEST_THEME_ID"] = test_theme_id - create_test_topic() - -if args.env == "local": - robotArgs += ["--include", "Local"] - robotArgs += ["--exclude", "NotAgainstLocal"] - # seed Azure storage emulator release files - generator = ReleaseFilesGenerator() - generator.create_public_release_files() - generator.create_private_release_files() - -if args.env == "dev": - robotArgs += ["--include", "Dev"] - robotArgs += ["--exclude", "NotAgainstDev"] - -if args.env == "test": - robotArgs += ["--include", "Test", "--exclude", "NotAgainstTest", "--exclude", "AltersData"] -# fmt off -if args.env == "preprod": - robotArgs += ["--include", "Preprod", "--exclude", "AltersData", "--exclude", "NotAgainstPreProd"] -# fmt on -if args.env == "prod": - robotArgs += ["--include", "Prod", "--exclude", "AltersData", "--exclude", "NotAgainstProd"] - -if args.visual: - robotArgs += ["-v", "headless:0"] -else: - robotArgs += ["-v", "headless:1"] +if not os.path.exists("test-results/downloads"): + os.makedirs("test-results/downloads") -if os.getenv("RELEASE_COMPLETE_WAIT"): - robotArgs += ["-v", f"release_complete_wait:{os.getenv('RELEASE_COMPLETE_WAIT')}"] -if os.getenv("FAIL_TEST_SUITES_FAST"): - robotArgs += ["-v", f"FAIL_TEST_SUITES_FAST:{os.getenv('FAIL_TEST_SUITES_FAST')}"] +def create_run_identifier(): + # Add randomness to prevent multiple simultaneous run_tests.py generating the same run_identifier value + random_str = "".join([random.choice(string.ascii_lowercase + string.digits) for n in range(6)]) + return datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S-" + random_str) -if args.prompt_to_continue: - robotArgs += ["-v", "prompt_to_continue_on_failure:1"] -if args.debug: - robotArgs += ["--loglevel", "DEBUG"] +def merge_test_reports(): + merge_args = [ + "--outputdir", + "test-results/", + "-o", + "output.xml", + "--prerebotmodifier", + "report-modifiers/CheckForAtLeastOnePassingRunPrerebotModifier.py", + "--merge", + "test-results/output.xml", + "test-results/rerun.xml", + ] + robot_rebot_cli(merge_args, exit=False) -robotArgs += ["-v", "browser:" + args.browser] -robotArgs += [args.tests] +def clear_files_before_test_run(rerunning_failures: bool): + # Remove any existing test results if running from scratch. Leave in place if re-running failures + # as we'll need the old results to merge in with the rerun results. + if not rerunning_failures and Path(results_foldername).exists(): + shutil.rmtree(results_foldername) -# Remove any existing test results if running from scratch -if not args.rerun_failed_tests and not args.rerun_failed_suites and Path("test-results").exists(): - shutil.rmtree("test-results") + # Remove any prior failing suites so the new test run is not marking any running test suites as + # failed already. + if Path(failing_suites_filename).exists(): + os.remove(failing_suites_filename) -if not os.path.exists("test-results/downloads"): - os.makedirs("test-results/downloads") -try: - # Run tests + # If running with Pabot, remove any existing Pabot suites file as a pre-existing one can otherwise + # cause single suites to run multiple times in parallel. + if Path(pabot_suite_names_filename).exists(): + os.remove(pabot_suite_names_filename) + + +def run_tests(rerunning_failures: bool): + logger.info(f"Starting tests with RUN_IDENTIFIER: {run_identifier}") if args.interp == "robot": - robot_run_cli(robotArgs) + robot_run_cli(create_robot_arguments(rerunning_failures), exit=False) elif args.interp == "pabot": - if args.processes: - robotArgs = ["--processes", int(args.processes)] + robotArgs + pabot_run_cli(create_robot_arguments(rerunning_failures)) - pabot_run_cli(robotArgs) + +test_run_index = -1 + +try: + # Run tests + while args.rerun_attempts is None or test_run_index < args.rerun_attempts: + test_run_index += 1 + + rerunning_failed_suites = args.rerun_failed_suites or test_run_index > 0 + + # Perform any cleanup before the test run. + clear_files_before_test_run(rerunning_failed_suites) + + # Create a unique run identifier so that this test run's data will be unique. + run_identifier = create_run_identifier() + os.environ["RUN_IDENTIFIER"] = run_identifier + + # Create a Test Topic under which all of this test run's data will be created. + needs_test_topic = args.tests and "general_public" not in args.tests + + if needs_test_topic: + create_test_topic(run_identifier) + + # Run the tests. + run_tests(rerunning_failed_suites) + + # If we're rerunning failures, merge the former run's results with this run's + # results. + if rerunning_failed_suites: + merge_test_reports() + + # Tear down any data created by this test run unless we've disabled teardown. + if needs_test_topic and not args.disable_teardown: + logger.info("Tearing down tests...") + delete_test_topic() + + # If all tests passed, return early. + if not get_failing_suites(): + break finally: - if not args.disable_teardown: - logger.info("Tearing down tests...") - delete_test_topic() - - if args.rerun_failed_tests or args.rerun_failed_suites: - logger.info("Combining rerun test results with original test results") - merge_options = [ - "--outputdir", - "test-results/", - "-o", - "output.xml", - "--prerebotmodifier", - "report-modifiers/CheckForAtLeastOnePassingRunPrerebotModifier.py", - "--merge", - "test-results/output.xml", - "test-results/rerun.xml", - ] - robot_rebot_cli(merge_options, exit=False) - - logger.info(f"\nLog available at: file://{os.getcwd()}{os.sep}test-results{os.sep}log.html") + logger.info(f"Log available at: file://{os.getcwd()}{os.sep}test-results{os.sep}log.html") logger.info(f"Report available at: file://{os.getcwd()}{os.sep}test-results{os.sep}report.html") - logger.info("\nTests finished!") + + logger.info(f"Number of test runs: {test_run_index + 1}") + + failing_suites = get_failing_suites() + + if failing_suites: + logger.info(f"Failing suites:") + [logger.info(r" * file://" + suite) for suite in failing_suites] + else: + logger.info("\nAll tests passed!") if args.enable_slack_notifications: slack_service = SlackService() - slack_service.send_test_report(args.env, args.tests) + slack_service.send_test_report(args.env, args.tests, failing_suites, test_run_index) diff --git a/tests/robot-tests/scripts/run_tests_pipeline.py b/tests/robot-tests/scripts/run_tests_pipeline.py index b703e46db84..701a24d3642 100644 --- a/tests/robot-tests/scripts/run_tests_pipeline.py +++ b/tests/robot-tests/scripts/run_tests_pipeline.py @@ -30,8 +30,8 @@ def run_tests_pipeline(): run_tests_command = f"pipenv run python run_tests.py --admin-pass {args.admin_pass} --analyst-pass {args.analyst_pass} --expiredinvite-pass {args.expiredinvite_pass} --env {args.env} --file {args.file} --ci --processes {args.processes} --enable-slack-notifications" - if args.rerun_failed_suites: - run_tests_command += " --rerun-failed-suites" + if args.rerun_attempts: + run_tests_command += f" --rerun-attempts {str(args.rerun_attempts)}" subprocess.check_call(run_tests_command, shell=True) @@ -58,13 +58,6 @@ def run_tests_pipeline(): parser.add_argument("--processes", dest="processes", help="number of processes to run", required=True) - parser.add_argument( - "--rerun-failed-suites", - dest="rerun_failed_suites", - help="rerun any failed suites from a previous run", - required=False, - action="store_true", - ) - + parser.add_argument("--rerun-attempts", dest="rerun_attempts", type=int, help="Number of rerun attempts") args = parser.parse_args() run_tests_pipeline() diff --git a/tests/robot-tests/tests/libs/admin-utilities.py b/tests/robot-tests/tests/libs/admin-utilities.py index 2e57d1eadd0..5e230bad718 100644 --- a/tests/robot-tests/tests/libs/admin-utilities.py +++ b/tests/robot-tests/tests/libs/admin-utilities.py @@ -2,20 +2,18 @@ import time import requests -from robot.libraries.BuiltIn import BuiltIn from selenium.common import NoSuchElementException from selenium.webdriver.common.by import By from tests.libs.logger import get_logger +from tests.libs.selenium_elements import sl from tests.libs.setup_auth_variables import setup_auth_variables from tests.libs.utilities import set_cookie_from_json, set_to_local_storage logger = get_logger(__name__) -sl = BuiltIn().get_library_instance("SeleniumLibrary") - def raise_assertion_error(err_msg): - sl.failure_occurred() + sl().failure_occurred() logger.warn(err_msg) raise AssertionError(err_msg) @@ -55,7 +53,7 @@ def user_signs_in_as(user: str): user, email=os.getenv(f"{user}_EMAIL"), password=os.getenv(f"{user}_PASSWORD"), - driver=sl.driver, + driver=sl().driver, identity_provider=os.getenv("IDENTITY_PROVIDER"), ) @@ -69,13 +67,13 @@ def user_signs_in_as(user: str): ) set_cookie_from_json(cookie_token) - sl.go_to(admin_url) + sl().go_to(admin_url) except Exception as e: raise_assertion_error(e) def get_theme_id_from_url(): - url = sl.get_location() + url = sl().get_location() assert "/themes/" in url, "URL does not contain /themes" result = url[len(os.getenv("ADMIN_URL")) :].lstrip("/").split("/") assert result[0] == "themes", 'String "themes" should be 1st element in list' @@ -90,7 +88,7 @@ def get_release_guid_from_release_status_page_url(url): def data_csv_number_contains_xpath(num, xpath): try: - elem = sl.driver.find_element(By.XPATH, f'//*[@id="dataFileUploadForm"]/dl[{num}]') + elem = sl().driver.find_element(By.XPATH, f'//*[@id="dataFileUploadForm"]/dl[{num}]') except BaseException: raise_assertion_error(f'Cannot find data file number "{num}"') try: @@ -101,7 +99,7 @@ def data_csv_number_contains_xpath(num, xpath): def data_file_number_contains_xpath(num, xpath): try: - elem = sl.driver.find_element(By.XPATH, f'//*[@id="fileUploadForm"]/dl[{num}]') + elem = sl().driver.find_element(By.XPATH, f'//*[@id="fileUploadForm"]/dl[{num}]') except BaseException: raise_assertion_error(f'Cannot find data file number "{num}"') try: @@ -114,23 +112,23 @@ def user_waits_for_release_process_status_to_be(status, timeout): max_time = time.time() + int(timeout) while time.time() < max_time: try: - sl.driver.find_element(By.ID, f"release-process-status-Failed") + sl().driver.find_element(By.ID, f"release-process-status-Failed") raise_assertion_error("Release process status FAILED!") except BaseException: pass try: - sl.driver.find_element(By.ID, f"release-process-status-{status}") + sl().driver.find_element(By.ID, f"release-process-status-{status}") return except BaseException: - sl.reload_page() # Necessary if release previously scheduled + sl().reload_page() # Necessary if release previously scheduled time.sleep(3) raise_assertion_error(f"Release process status wasn't {status} after {timeout} seconds!") def user_checks_dashboard_theme_topic_dropdowns_exist(): try: - sl.driver.find_element(By.ID, "publicationsReleases-themeTopic-themeId") - sl.driver.find_element(By.ID, "publicationsReleases-themeTopic-topicId") + sl().driver.find_element(By.ID, "publicationsReleases-themeTopic-themeId") + sl().driver.find_element(By.ID, "publicationsReleases-themeTopic-topicId") except NoSuchElementException: return False diff --git a/tests/robot-tests/tests/libs/fail_fast.py b/tests/robot-tests/tests/libs/fail_fast.py index 0249b99d940..2c015b88f06 100644 --- a/tests/robot-tests/tests/libs/fail_fast.py +++ b/tests/robot-tests/tests/libs/fail_fast.py @@ -4,17 +4,28 @@ scripts, firstly to record that a test suite is failing, and then again on subsequent Tests starting to see if they should continue to run or if they should fail immediately and therefore fail the test suite immediately. """ +import os.path + from robot.libraries.BuiltIn import BuiltIn from tests.libs.logger import get_logger +from tests.libs.selenium_elements import sl + +failing_suites_filename = ".failing_suites" -sl = BuiltIn().get_library_instance("SeleniumLibrary") -FAILING_SUITES = set() logger = get_logger(__name__) +def _get_current_test_suite() -> str: + return BuiltIn().get_variable_value("${SUITE SOURCE}") + + def current_test_suite_failing_fast() -> bool: test_suite = _get_current_test_suite() - return test_suite in FAILING_SUITES + if os.path.isfile(failing_suites_filename): + file = open(failing_suites_filename, "r") + failing_suites = file.readlines() + return test_suite in failing_suites + return False def record_failing_test_suite(): @@ -22,7 +33,8 @@ def record_failing_test_suite(): logger.warn( f"Recording test suite '{test_suite}' as failing - subsequent tests will automatically fail in this suite" ) - FAILING_SUITES.add(test_suite) + file = open(failing_suites_filename, "w") + file.writelines([test_suite]) def fail_test_fast_if_required(): @@ -30,10 +42,6 @@ def fail_test_fast_if_required(): _raise_assertion_error(f"Test suite {_get_current_test_suite()} is already failing. Failing this test fast.") -def _get_current_test_suite() -> str: - return BuiltIn().get_variable_value("${SUITE SOURCE}") - - def _raise_assertion_error(err_msg): - sl.failure_occurred() + sl().failure_occurred() raise AssertionError(err_msg) diff --git a/tests/robot-tests/tests/libs/file_operations.py b/tests/robot-tests/tests/libs/file_operations.py index 0d125311f0b..1ff8f8a42ed 100644 --- a/tests/robot-tests/tests/libs/file_operations.py +++ b/tests/robot-tests/tests/libs/file_operations.py @@ -2,10 +2,7 @@ import zipfile import requests -from robot.libraries.BuiltIn import BuiltIn - -sl = BuiltIn().get_library_instance("SeleniumLibrary") - +from tests.libs.selenium_elements import sl requests.sessions.HTTPAdapter(pool_connections=50, pool_maxsize=50, max_retries=3) session = requests.Session() @@ -14,7 +11,7 @@ def download_file(link_locator, file_name): if not os.path.exists("test-results/downloads"): os.makedirs("test-results/downloads") - link_url = sl.get_element_attribute(link_locator, "href") + link_url = sl().get_element_attribute(link_locator, "href") r = session.get(link_url, allow_redirects=True, stream=True) with open(f"test-results/downloads/{file_name}", "wb") as f: f.write(r.content) diff --git a/tests/robot-tests/tests/libs/public-utilities.py b/tests/robot-tests/tests/libs/public-utilities.py index 7d006b5c8c7..77ebf19343e 100644 --- a/tests/robot-tests/tests/libs/public-utilities.py +++ b/tests/robot-tests/tests/libs/public-utilities.py @@ -1,56 +1,54 @@ import os import re -from robot.libraries.BuiltIn import BuiltIn from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.common.by import By - -sl = BuiltIn().get_library_instance("SeleniumLibrary") +from tests.libs.selenium_elements import sl def raise_assertion_error(err_msg): - sl.capture_page_screenshot() + sl().capture_page_screenshot() raise AssertionError(err_msg) def cookie_should_not_exist(name): - for cookie in sl.driver.get_cookies(): + for cookie in sl().driver.get_cookies(): if cookie["name"] == name: raise_assertion_error(f"Cookie {name} exists when it shouldn't!") def cookie_should_have_value(name, value): - for cookie in sl.driver.get_cookies(): + for cookie in sl().driver.get_cookies(): if cookie["name"] == name and cookie["value"] == value: return raise_assertion_error(f"Couldn't find cookie {name} with value {value}") def cookie_names_should_be_on_page(): - cookies = sl.driver.get_cookies() + cookies = sl().driver.get_cookies() for cookie in cookies: if cookie["name"] in ["_hjIncludedInSample", "_hjid"]: continue try: - sl.page_should_contain(cookie["name"]) + sl().page_should_contain(cookie["name"]) except BaseException: raise_assertion_error(f"Page should contain text \"{cookie['name']}\"!") def user_checks_number_of_other_releases_is_correct(number): - elems = sl.driver.find_elements(By.XPATH, '(.//*[@data-testid="other-release-item"])') + elems = sl().driver.find_elements(By.XPATH, '(.//*[@data-testid="other-release-item"])') if len(elems) != int(number): raise_assertion_error(f'Found "{len(elems)}" other releases, not "{int(number)}"') def user_checks_other_release_is_shown_in_position(release_name, position): try: - sl.driver.find_element(By.XPATH, f'.//*[@data-testid="other-release-item" and a/text()="{release_name}"]') + sl().driver.find_element(By.XPATH, f'.//*[@data-testid="other-release-item" and a/text()="{release_name}"]') except BaseException: raise_assertion_error(f'No other release "{release_name}" found') try: - elem = sl.driver.find_element(By.XPATH, f'(.//a[../@data-testid="other-release-item"])[{position}]') + elem = sl().driver.find_element(By.XPATH, f'(.//a[../@data-testid="other-release-item"])[{position}]') except BaseException: raise_assertion_error(f"There are less than {position} other releases listed!") @@ -62,7 +60,7 @@ def user_checks_other_release_is_shown_in_position(release_name, position): # Table tool def user_checks_generated_permalink_is_valid(): - elem = sl.driver.find_element(By.CSS_SELECTOR, '[data-testid="permalink-generated-url"]') + elem = sl().driver.find_element(By.CSS_SELECTOR, '[data-testid="permalink-generated-url"]') url_without_basic_auth = re.sub(r".*@", "", os.environ["PUBLIC_URL"]) url_without_http = re.sub(r"^https?:\/\/", "", url_without_basic_auth) current_url_without_http = re.sub(r"^https?:\/\/", "", elem.get_attribute("value")) @@ -77,23 +75,23 @@ def user_reorders_table_headers(drag_selector, drop_selector): drop_elem = None if drag_selector.startswith("css:"): drag_selector = drag_selector[4:] - sl.wait_until_page_contains_element(f"css:{drag_selector}") - drag_elem = sl.driver.find_element(By.CSS_SELECTOR, drag_selector) + sl().wait_until_page_contains_element(f"css:{drag_selector}") + drag_elem = sl().driver.find_element(By.CSS_SELECTOR, drag_selector) if drop_selector.startswith("css:"): drop_selector = drop_selector[4:] - sl.wait_until_page_contains_element(f"css:{drop_selector}") - drop_elem = sl.driver.find_element(By.CSS_SELECTOR, drop_selector) + sl().wait_until_page_contains_element(f"css:{drop_selector}") + drop_elem = sl().driver.find_element(By.CSS_SELECTOR, drop_selector) if drag_selector.startswith("xpath:"): drag_selector = drag_selector[6:] - sl.wait_until_page_contains_element(f"xpath:{drag_selector}") - drag_elem = sl.driver.find_element(By.XPATH, drag_selector) + sl().wait_until_page_contains_element(f"xpath:{drag_selector}") + drag_elem = sl().driver.find_element(By.XPATH, drag_selector) if drop_selector.startswith("xpath:"): drop_selector = drop_selector[6:] - sl.wait_until_page_contains_element(f"xpath:{drop_selector}") - drop_elem = sl.driver.find_element(By.XPATH, drop_selector) + sl().wait_until_page_contains_element(f"xpath:{drop_selector}") + drop_elem = sl().driver.find_element(By.XPATH, drop_selector) # https://github.com/react-dnd/react-dnd/issues/1195#issuecomment-456370983 - action = ActionChains(sl.driver) + action = ActionChains(sl().driver) action.click_and_hold(drag_elem).perform() action.move_to_element(drop_elem).perform() action.move_by_offset(0, 0).pause(0.01).perform() diff --git a/tests/robot-tests/tests/libs/selenium_elements.py b/tests/robot-tests/tests/libs/selenium_elements.py new file mode 100644 index 00000000000..17740136ab3 --- /dev/null +++ b/tests/robot-tests/tests/libs/selenium_elements.py @@ -0,0 +1,14 @@ +from robot.libraries.BuiltIn import BuiltIn +from SeleniumLibrary.keywords.waiting import WaitingKeywords + + +def sl(): + return BuiltIn().get_library_instance("SeleniumLibrary") + + +def element_finder(): + return sl()._element_finder + + +def waiting(): + return WaitingKeywords(sl()) diff --git a/tests/robot-tests/tests/libs/slack.py b/tests/robot-tests/tests/libs/slack.py index 90bf5845897..bacbd188c80 100644 --- a/tests/robot-tests/tests/libs/slack.py +++ b/tests/robot-tests/tests/libs/slack.py @@ -24,49 +24,38 @@ def __init__(self): if env_var is None: raise AssertionError(f"{env_var} is not set") - def _build_attachments(self, env: str, suite: str): + def _build_attachments(self, env: str, suites_ran: str, suites_failed: [], run_index: int): with open(f"{PATH}{os.sep}output.xml", "rb") as report: contents = report.read() soup = BeautifulSoup(contents, features="xml") - test = soup.find("total").find("stat") + tests = soup.find("total").find("stat") - failed_tests = int(test["fail"]) - passed_tests = int(test["pass"]) + failed_tests = int(tests["fail"]) + passed_tests = int(tests["pass"]) - failed_tests_field = ({},) + failed_test_suites_field = ({},) - if failed_tests > 0: - failed_tests_field = {"title": "Failed tests", "value": failed_tests} + if suites_failed: + failed_test_suites_field = {"title": "Failed test suites", "value": "\n * ".join(suites_failed)} return [ { "pretext": "All results", - "color": "danger" if failed_tests else "good", - "mrkdwn_in": ["pretext"], + "color": "danger" if suites_failed else "good", + "mrkdwn_in": ["pretext", "Failed test suites"], "fields": [ {"title": "Environment", "value": env}, - {"title": "Suite", "value": suite.replace("tests/", "")}, + {"title": "Suite", "value": suites_ran.replace("tests/", "")}, {"title": "Total test cases", "value": passed_tests + failed_tests}, - failed_tests_field, + {"title": "Total runs", "value": run_index + 1}, + failed_test_suites_field, ], } ] - def _tests_failed(self): - with open(f"{PATH}{os.sep}output.xml", "rb") as report: - contents = report.read() - - soup = BeautifulSoup(contents, features="xml") - test = soup.find("total").find("stat") - - failed_tests = int(test["fail"]) - - if failed_tests > 0: - return True - - def send_test_report(self, env: str, suite: str): - attachments = self._build_attachments(env, suite) + def send_test_report(self, env: str, suites_ran: str, suites_failed: [], run_index: int): + attachments = self._build_attachments(env, suites_ran, suites_failed, run_index) webhook_url = self.report_webhook_url slack_bot_token = self.slack_bot_token @@ -78,12 +67,12 @@ def send_test_report(self, env: str, suite: str): logger.info("Sent UI test statistics to #build") - if self._tests_failed(): + if suites_failed: client = WebClient(token=slack_bot_token) date = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S") - report_name = f"UI-test-report-{suite.replace('tests/', '')}-{env}-{date}.zip" + report_name = f"UI-test-report-{suites_ran.replace('tests/', '')}-{env}-{date}.zip" shutil.make_archive(report_name.replace(".zip", ""), "zip", PATH) try: diff --git a/tests/robot-tests/tests/libs/utilities.py b/tests/robot-tests/tests/libs/utilities.py index 57d24ee0eb2..eca9471891f 100644 --- a/tests/robot-tests/tests/libs/utilities.py +++ b/tests/robot-tests/tests/libs/utilities.py @@ -12,13 +12,10 @@ from robot.libraries.BuiltIn import BuiltIn from selenium.webdriver.common.by import By from selenium.webdriver.remote.webelement import WebElement -from SeleniumLibrary.keywords.waiting import WaitingKeywords from SeleniumLibrary.utils import is_noney from tests.libs.logger import get_logger +from tests.libs.selenium_elements import element_finder, sl, waiting -sl = BuiltIn().get_library_instance("SeleniumLibrary") -element_finder = sl._element_finder -waiting = WaitingKeywords(sl) logger = get_logger(__name__) # Should only initialise some parts once e.g. registration @@ -49,8 +46,8 @@ def _find_by_testid(parent_locator: object, criteria: str, tag: str, constraints # Register locator strategies - element_finder.register("label", _find_by_label, persist=True) - element_finder.register("testid", _find_by_testid, persist=True) + element_finder().register("label", _find_by_label, persist=True) + element_finder().register("testid", _find_by_testid, persist=True) utilities_init.initialised = True @@ -63,19 +60,26 @@ def enable_basic_auth_headers(): if public_auth_user and public_auth_password: token = base64.b64encode(f"{public_auth_user}:{public_auth_password}".encode()) - sl.driver.execute_cdp_cmd("Network.enable", {}) - sl.driver.execute_cdp_cmd( - "Network.setExtraHTTPHeaders", {"headers": {"Authorization": f"Basic {token.decode()}"}} - ) + try: + # Must refetch sl() on rerun or sl().driver is None! + # sl() = BuiltIn().get_library_instance("SeleniumLibrary") + assert sl().driver is not None, "sl().driver is None" + sl().driver.execute_cdp_cmd("Network.enable", {}) + + sl().driver.execute_cdp_cmd( + "Network.setExtraHTTPHeaders", {"headers": {"Authorization": f"Basic {token.decode()}"}} + ) + except Exception as e: + BuiltIn().log_to_console("Exception: ", e) def disable_basic_auth_headers(): # Must be disabled to visit admin frontend - sl.driver.execute_cdp_cmd("Network.disable", {}) + sl().driver.execute_cdp_cmd("Network.disable", {}) def raise_assertion_error(err_msg): - sl.failure_occurred() + sl().failure_occurred() raise AssertionError(err_msg) @@ -87,10 +91,10 @@ def user_waits_until_parent_contains_element( def parent_contains_matching_element() -> bool: parent_el = _get_parent_webelement_from_locator(parent_locator, timeout, error) - return element_finder.find(child_locator, required=False, parent=parent_el) is not None + return element_finder().find(child_locator, required=False, parent=parent_el) is not None if is_noney(count): - return waiting._wait_until( + return waiting()._wait_until( parent_contains_matching_element, "Parent '%s' did not contain '%s' in ." % (parent_locator, child_locator), timeout, @@ -101,9 +105,9 @@ def parent_contains_matching_element() -> bool: def parent_contains_matching_elements() -> bool: parent_el = _get_parent_webelement_from_locator(parent_locator, timeout, error) - return len(sl.find_elements(child_locator, parent=parent_el)) == count + return len(sl().find_elements(child_locator, parent=parent_el)) == count - waiting._wait_until( + waiting()._wait_until( parent_contains_matching_elements, "Parent '%s' did not contain %s '%s' element(s) within ." % (parent_locator, count, child_locator), timeout, @@ -125,10 +129,10 @@ def user_waits_until_parent_does_not_contain_element( def parent_does_not_contain_matching_element() -> bool: parent_el = _get_parent_webelement_from_locator(parent_locator, timeout, error) - return element_finder.find(child_locator, required=False, parent=parent_el) is None + return element_finder().find(child_locator, required=False, parent=parent_el) is None if is_noney(count): - return waiting._wait_until( + return waiting()._wait_until( parent_does_not_contain_matching_element, "Parent '%s' should not have contained '%s' in ." % (parent_locator, child_locator), timeout, @@ -139,9 +143,9 @@ def parent_does_not_contain_matching_element() -> bool: def parent_does_not_contain_matching_elements() -> bool: parent_el = _get_parent_webelement_from_locator(parent_locator, timeout, error) - return len(sl.find_elements(child_locator, parent=parent_el)) != count + return len(sl().find_elements(child_locator, parent=parent_el)) != count - waiting._wait_until( + waiting()._wait_until( parent_does_not_contain_matching_elements, "Parent '%s' should not have contained %s '%s' element(s) within ." % (parent_locator, count, child_locator), @@ -188,26 +192,26 @@ def get_child_elements(parent_locator: object, child_locator: str): try: child_locator = _normalise_child_locator(child_locator) parent_el = _get_parent_webelement_from_locator(parent_locator) - return element_finder.find_elements(child_locator, parent=parent_el) + return element_finder().find_elements(child_locator, parent=parent_el) except Exception as err: logger.warn(f"Error whilst executing utilities.py get_child_elements() - {err}") raise_assertion_error(err) def user_sets_focus_to_element(selector): - sl.wait_until_page_contains_element(selector) - sl.set_focus_to_element(selector) + sl().wait_until_page_contains_element(selector) + sl().set_focus_to_element(selector) def set_to_local_storage(key: str, value: str): - sl.execute_javascript(f"localStorage.setItem('{key}', '{value}');") + sl().execute_javascript(f"localStorage.setItem('{key}', '{value}');") def set_cookie_from_json(cookie_json): cookie_dict = json.loads(cookie_json) del cookie_dict["domain"] - sl.driver.add_cookie(cookie_dict) + sl().driver.add_cookie(cookie_dict) def format_uk_to_local_datetime(uk_local_datetime: str, strf: str) -> str: @@ -231,7 +235,7 @@ def format_datetime(datetime: datetime, strf: str) -> str: def user_should_be_at_top_of_page(): - (x, y) = sl.get_window_position() + (x, y) = sl().get_window_position() if y != 0: raise_assertion_error(f"Windows position Y is {y} not 0! User should be at the top of the page!") @@ -255,12 +259,12 @@ def capture_screenshots_and_html(): def capture_html(): - html = sl.get_source() + html = sl().get_source() current_time_millis = round(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) html_file = open(f"test-results/captured-html-{current_time_millis}.html", "w", encoding="utf-8") html_file.write(html) html_file.close() - logger.warn(f"Captured HTML of {sl.get_location()} HTML saved to file://{os.path.realpath(html_file.name)}") + logger.warn(f"Captured HTML of {sl().get_location()} HTML saved to file://{os.path.realpath(html_file.name)}") def user_gets_row_number_with_heading(heading: str, table_locator: str = "css:table"): @@ -270,7 +274,7 @@ def user_gets_row_number_with_heading(heading: str, table_locator: str = "css:ta def user_gets_row_with_group_and_indicator(group: str, indicator: str, table_selector: str = "css:table"): - table_elem = sl.get_webelement(table_selector) + table_elem = sl().get_webelement(table_selector) elems = table_elem.find_elements( By.XPATH, f'.//tbody/tr/th[text()="{group}"]/../self::tr | .//tbody/tr/th[text()="{group}"]/../following-sibling::tr', @@ -305,12 +309,12 @@ def remove_substring_from_right_of_string(string, substring): def user_clicks_element_if_exists(selector): - if element_finder.find(selector, required=False) is not None: - sl.click_element(selector) + if element_finder().find(selector, required=False) is not None: + sl().click_element(selector) def user_is_on_admin_dashboard(admin_url: str) -> bool: - current_url = sl.get_location() + current_url = sl().get_location() url_parts = urlparse(current_url) left_part = f"{url_parts.scheme}://{url_parts.netloc}{url_parts.path}" if left_part.endswith("/"): @@ -321,10 +325,10 @@ def user_is_on_admin_dashboard(admin_url: str) -> bool: def user_is_on_admin_dashboard_with_theme_and_topic_selected(admin_url: str, theme: str, topic: str) -> bool: if not user_is_on_admin_dashboard(admin_url): return False - selected_theme = sl.get_selected_list_label("id:publicationsReleases-themeTopic-themeId") + selected_theme = sl().get_selected_list_label("id:publicationsReleases-themeTopic-themeId") if selected_theme != theme: return False - selected_topic = sl.get_selected_list_label("id:publicationsReleases-themeTopic-topicId") + selected_topic = sl().get_selected_list_label("id:publicationsReleases-themeTopic-topicId") return selected_topic == topic @@ -333,7 +337,7 @@ def user_navigates_to_admin_dashboard_if_needed(admin_url: str): if user_is_on_admin_dashboard(admin_url): return - sl.go_to(admin_url) + sl().go_to(admin_url) def is_webelement(variable: object) -> bool: @@ -343,10 +347,10 @@ def is_webelement(variable: object) -> bool: def _normalise_child_locator(child_locator: str) -> str: if isinstance(child_locator, str): # the below substitution is necessary in order to correctly find the parent's descendants. Without the - # preceding dot, the double forward slash breaks out of the parent container and returns the xpath query + # preceding dot, the double forward sl()ash breaks out of the parent container and returns the xpath query # to the root of the DOM, leading to false positives or incorrectly found DOM elements. The below # substitution covers both child selectors beginning with "xpath://" and "//", as the double forward - # slashes without the "xpath:" prefix are inferred as being xpath expressions. + # sl()ashes without the "xpath:" prefix are inferred as being xpath expressions. return re.sub(r"^(xpath:)?//", "xpath:.//", child_locator) raise_assertion_error(f"Child locator was not a str - {child_locator}") @@ -354,8 +358,8 @@ def _normalise_child_locator(child_locator: str) -> str: def _get_parent_webelement_from_locator(parent_locator: object, timeout: int = None, error: str = "") -> WebElement: if isinstance(parent_locator, str): - sl.wait_until_page_contains_element(parent_locator, timeout=timeout, error=error) - return sl.find_element(parent_locator) + sl().wait_until_page_contains_element(parent_locator, timeout=timeout, error=error) + return sl().find_element(parent_locator) elif isinstance(parent_locator, WebElement): return parent_locator else: diff --git a/tests/robot-tests/tests/libs/visual.py b/tests/robot-tests/tests/libs/visual.py index 26d3667827b..ba3417669a3 100644 --- a/tests/robot-tests/tests/libs/visual.py +++ b/tests/robot-tests/tests/libs/visual.py @@ -1,62 +1,61 @@ import os -from robot.libraries.BuiltIn import BuiltIn from selenium.webdriver.remote.webelement import WebElement from tests.libs.logger import get_logger +from tests.libs.selenium_elements import sl -sl = BuiltIn().get_library_instance("SeleniumLibrary") logger = get_logger(__name__) def with_no_overflow(func): def wrapper(*args, **kwargs): - head_html = sl.driver.execute_script("return document.head.innerHTML;") - sl.driver.execute_script("document.head.innerHTML += ''") + head_html = sl().driver.execute_script("return document.head.innerHTML;") + sl().driver.execute_script("document.head.innerHTML += ''") try: return func(*args, **kwargs) finally: - sl.driver.execute_script("document.head.innerHTML = arguments[0];", head_html) + sl().driver.execute_script("document.head.innerHTML = arguments[0];", head_html) return wrapper def with_maximised_browser(func): def wrapper(*args, **kwargs): - currentWindow = sl.get_window_size() - page_width = sl.driver.execute_script("return document.documentElement.scrollWidth;") + 100 - page_height = sl.driver.execute_script("return document.documentElement.scrollHeight;") + 100 + currentWindow = sl().get_window_size() + page_width = sl().driver.execute_script("return document.documentElement.scrollWidth;") + 100 + page_height = sl().driver.execute_script("return document.documentElement.scrollHeight;") + 100 original_width = currentWindow[0] original_height = currentWindow[1] - sl.set_window_size(page_width, page_height) + sl().set_window_size(page_width, page_height) try: return func(*args, **kwargs) finally: - sl.set_window_size(original_width, original_height) + sl().set_window_size(original_width, original_height) return wrapper def highlight_element(element: WebElement): - sl.driver.execute_script("arguments[0].scrollIntoView();", element) - sl.driver.execute_script("arguments[0].style.border = 'red 4px solid';", element) + sl().driver.execute_script("arguments[0].scrollIntoView();", element) + sl().driver.execute_script("arguments[0].style.border = 'red 4px solid';", element) def capture_screenshot(): - screenshot_location = sl.capture_page_screenshot() + screenshot_location = sl().capture_page_screenshot() logger.warn( - f"Captured current screenshot at URL '{sl.get_location()}' Screenshot saved to file://{screenshot_location}" + f"Captured current screenshot at URL '{sl().get_location()}' Screenshot saved to file://{screenshot_location}" ) @with_maximised_browser def capture_large_screenshot(): - screenshot_location = sl.capture_page_screenshot() + screenshot_location = sl().capture_page_screenshot() logger.warn( - f"Captured enlarged screenshot at URL '{sl.get_location()}' Screenshot saved to file://{screenshot_location}" + f"Captured enlarged screenshot at URL '{sl().get_location()}' Screenshot saved to file://{screenshot_location}" ) From 7aa06b28e1137ebbafadc519472778995ec3ec0f Mon Sep 17 00:00:00 2001 From: Duncan Watson Date: Fri, 24 Nov 2023 12:03:57 +0000 Subject: [PATCH 2/5] EES-4117 - rewrite of run_tests.py to break it into more manageable modules --- .../Security/PermissionsControllerTests.cs | 37 +- .../Fixtures/CacheServiceTestFixture.cs | 2 +- tests/robot-tests/admin_api.py | 220 ++++++++ tests/robot-tests/args_and_variables.py | 178 +++++++ tests/robot-tests/run_tests.py | 473 +++++------------- 5 files changed, 534 insertions(+), 376 deletions(-) create mode 100644 tests/robot-tests/admin_api.py create mode 100644 tests/robot-tests/args_and_variables.py diff --git a/src/GovUk.Education.ExploreEducationStatistics.Admin.Tests/Controllers/Api/Security/PermissionsControllerTests.cs b/src/GovUk.Education.ExploreEducationStatistics.Admin.Tests/Controllers/Api/Security/PermissionsControllerTests.cs index 4e7b7dec496..70282096d07 100644 --- a/src/GovUk.Education.ExploreEducationStatistics.Admin.Tests/Controllers/Api/Security/PermissionsControllerTests.cs +++ b/src/GovUk.Education.ExploreEducationStatistics.Admin.Tests/Controllers/Api/Security/PermissionsControllerTests.cs @@ -12,10 +12,11 @@ namespace GovUk.Education.ExploreEducationStatistics.Admin.Tests.Controllers.Api.Security; +[Collection(CacheServiceTestFixture.CacheServiceTests)] public class PermissionsControllerTests : IClassFixture> { private readonly WebApplicationFactory _testApp; - + public PermissionsControllerTests(TestApplicationFactory testApp) { _testApp = testApp; @@ -27,9 +28,9 @@ public async Task GetGlobalPermissions_AuthenticatedUser() var client = _testApp .SetUser(AuthenticatedUser()) .CreateClient(); - + var response = await client.GetAsync("/api/permissions/access"); - + response.AssertOk(new GlobalPermissionsViewModel( CanAccessSystem: true, CanAccessAnalystPages: false, @@ -39,16 +40,16 @@ public async Task GetGlobalPermissions_AuthenticatedUser() IsBauUser: false, IsApprover: false)); } - + [Fact] public async Task GetGlobalPermissions_BauUser() { var client = _testApp .SetUser(BauUser()) .CreateClient(); - + var response = await client.GetAsync("/api/permissions/access"); - + response.AssertOk(new GlobalPermissionsViewModel( CanAccessSystem: true, CanAccessAnalystPages: true, @@ -60,7 +61,7 @@ public async Task GetGlobalPermissions_BauUser() // individual Approver roles on Releases or Publications. IsApprover: false)); } - + [Fact] public async Task GetGlobalPermissions_AnalystUser_NotReleaseOrPublicationApprover() { @@ -76,7 +77,7 @@ public async Task GetGlobalPermissions_AnalystUser_NotReleaseOrPublicationApprov UserId = user.GetUserId(), Role = ReleaseRole.Contributor }); - + // Add test data that gives the user access to a Publication without being an Approver. context.UserPublicationRoles.Add(new UserPublicationRole { @@ -85,9 +86,9 @@ public async Task GetGlobalPermissions_AnalystUser_NotReleaseOrPublicationApprov }); }) .CreateClient(); - + var response = await client.GetAsync("/api/permissions/access"); - + response.AssertOk(new GlobalPermissionsViewModel( CanAccessSystem: true, CanAccessAnalystPages: true, @@ -98,7 +99,7 @@ public async Task GetGlobalPermissions_AnalystUser_NotReleaseOrPublicationApprov // Expect this to be false if the user isn't an approver of any kind IsApprover: false)); } - + [Fact] public async Task GetGlobalPermissions_AnalystUser_ReleaseApprover() { @@ -115,9 +116,9 @@ public async Task GetGlobalPermissions_AnalystUser_ReleaseApprover() }); }) .CreateClient(); - + var response = await client.GetAsync("/api/permissions/access"); - + response.AssertOk(new GlobalPermissionsViewModel( CanAccessSystem: true, CanAccessAnalystPages: true, @@ -145,9 +146,9 @@ public async Task GetGlobalPermissions_AnalystUser_PublicationApprover() }); }) .CreateClient(); - + var response = await client.GetAsync("/api/permissions/access"); - + response.AssertOk(new GlobalPermissionsViewModel( CanAccessSystem: true, CanAccessAnalystPages: true, @@ -165,9 +166,9 @@ public async Task GetGlobalPermissions_PreReleaseUser() var client = _testApp .SetUser(PreReleaseUser()) .CreateClient(); - + var response = await client.GetAsync("/api/permissions/access"); - + response.AssertOk(new GlobalPermissionsViewModel( CanAccessSystem: true, CanAccessAnalystPages: false, @@ -177,7 +178,7 @@ public async Task GetGlobalPermissions_PreReleaseUser() IsBauUser: false, IsApprover: false)); } - + [Fact] public async Task GetGlobalPermissions_UnauthenticatedUser() { diff --git a/src/GovUk.Education.ExploreEducationStatistics.Common.Tests/Fixtures/CacheServiceTestFixture.cs b/src/GovUk.Education.ExploreEducationStatistics.Common.Tests/Fixtures/CacheServiceTestFixture.cs index fe56728a7b4..55dedf6ac03 100644 --- a/src/GovUk.Education.ExploreEducationStatistics.Common.Tests/Fixtures/CacheServiceTestFixture.cs +++ b/src/GovUk.Education.ExploreEducationStatistics.Common.Tests/Fixtures/CacheServiceTestFixture.cs @@ -12,7 +12,7 @@ namespace GovUk.Education.ExploreEducationStatistics.Common.Tests.Fixtures /// public class CacheServiceTestFixture : IDisposable { - protected const string CacheServiceTests = "Cache service tests"; + public const string CacheServiceTests = "Cache service tests"; protected static readonly Mock BlobCacheService = new(MockBehavior.Strict); protected static readonly Mock PublicBlobCacheService = new(MockBehavior.Strict); diff --git a/tests/robot-tests/admin_api.py b/tests/robot-tests/admin_api.py new file mode 100644 index 00000000000..9cb6f29b2aa --- /dev/null +++ b/tests/robot-tests/admin_api.py @@ -0,0 +1,220 @@ +import json +import os +from pathlib import Path +from typing import Tuple + +import requests +from scripts.get_auth_tokens import get_identity_info +from tests.libs.logger import get_logger + +# TODO - there's plenty of duplication between this file and admin_api.py and setup_auth_variables.py in tests/libs. +# Would be good to attempt to consolidate. +# There is also a web of dependencies between Python scripts in tests, tests/scripts and tests/libs. Would +# be good to consolidate these into a better structure. + +logger = get_logger(__name__) + +test_theme_name = "Test theme" + + +def send_admin_request(method, endpoint, body=None, fail_on_reauthenticate=False): + """ + This method makes an authenticated request to the Admin API. + + If no prior authentication tokens are available when this method is called, they will + be obtained using the BAU user's credentials. + + If authentication tokens exist already but are no longer valid, new tokens will be + acquired and the requrest retried. + + If an error HTTP status code is encountered, an error will be thrown. + """ + + assert method and endpoint + assert os.getenv("ADMIN_URL") is not None + + if method == "POST": + assert body is not None, "POST requests require a body" + + requests.sessions.HTTPAdapter(pool_connections=50, pool_maxsize=50, max_retries=3) + session = requests.Session() + + # To prevent InsecureRequestWarning + requests.packages.urllib3.disable_warnings() + + response = send_request_with_retry_on_auth_failure(session, method, endpoint, body, fail_on_reauthenticate) + + if response.status_code == 400 and response.text.find("SlugNotUnique") != -1: + raise Exception(f"SlugNotUnique for {body}") + else: + assert response.status_code < 300, f"Admin request responded with {response.status_code} and {response.text}" + return response + + +def send_authenticated_api_request(session, method, endpoint, body): + """ + This method makes an request to the Admin API, and requires that authentication tokens + have been fetched prior to being called. + """ + + jwt_token = json.loads(os.getenv("IDENTITY_LOCAL_STORAGE_ADMIN"))["access_token"] + return session.request( + method, + url=f'{os.getenv("ADMIN_URL")}{endpoint}', + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {jwt_token}", + }, + stream=True, + json=body, + verify=False, + ) + + +def send_request_with_retry_on_auth_failure(session, method, endpoint, body, fail_on_reauthenticate=True): + """ + This method makes an request to the given Admin API endpoint. + + If no prior authentication tokens are available when this method is called, they will + be obtained using the BAU user's credentials. + + If authentication tokens exist already but are no longer valid, new tokens will be + acquired and the requrest retried. + + If an error HTTP status code is encountered, an error will be thrown. + """ + + if os.getenv("IDENTITY_LOCAL_STORAGE_ADMIN") is None: + setup_bau_authentication(clear_existing=True) + + response = send_authenticated_api_request(session, method, endpoint, body) + + if response.status_code not in {401, 403}: + return response + + logger.info("Attempting re-authentication...") + + # Delete identify files and re-attempt to fetch them + setup_bau_authentication(clear_existing=True) + response = send_authenticated_api_request(session, method, endpoint, body) + + if fail_on_reauthenticate: + assert response.status_code not in {401, 403}, "Failed to reauthenticate." + + return response + + +def get_test_themes(): + return send_admin_request("GET", "/api/themes") + + +def create_test_theme(): + return send_admin_request("POST", "/api/themes", {"title": test_theme_name, "summary": "Test theme summary"}) + + +def get_test_theme_id(): + get_themes_resp = get_test_themes() + + for theme in get_themes_resp.json(): + if theme["title"] == test_theme_name: + return theme["id"] + + return None + + +def create_test_topic(run_id: str): + test_theme_id = get_test_theme_id() + + if not test_theme_id: + create_theme_resp = create_test_theme() + test_theme_id = create_theme_resp.json()["id"] + + os.environ["TEST_THEME_NAME"] = test_theme_name + os.environ["TEST_THEME_ID"] = test_theme_id + + topic_name = f"UI test topic {run_id}" + response = send_admin_request("POST", "/api/topics", {"title": topic_name, "themeId": os.getenv("TEST_THEME_ID")}) + + os.environ["TEST_TOPIC_NAME"] = topic_name + os.environ["TEST_TOPIC_ID"] = response.json()["id"] + + +def delete_test_topic(): + if os.getenv("TEST_TOPIC_ID") is not None: + send_admin_request("DELETE", f'/api/topics/{os.getenv("TEST_TOPIC_ID")}') + + +def setup_bau_authentication(clear_existing=False): + setup_auth_variables( + user="ADMIN", + email=os.getenv("ADMIN_EMAIL"), + password=os.getenv("ADMIN_PASSWORD"), + clear_existing=clear_existing, + identity_provider=os.getenv("IDENTITY_PROVIDER"), + ) + + +def setup_analyst_authentication(clear_existing=False): + setup_auth_variables( + user="ANALYST", + email=os.getenv("ANALYST_EMAIL"), + password=os.getenv("ANALYST_PASSWORD"), + clear_existing=clear_existing, + identity_provider=os.getenv("IDENTITY_PROVIDER"), + ) + + +def setup_auth_variables( + user, email, password, identity_provider, clear_existing=False, driver=None +) -> Tuple[str, str]: + assert user, "user param must be set" + assert email, "email param must be set" + assert password, "password param must be set" + + local_storage_name = f"IDENTITY_LOCAL_STORAGE_{user}" + cookie_name = f"IDENTITY_COOKIE_{user}" + + local_storage_file = Path(f"{local_storage_name}.json") + cookie_file = Path(f"{cookie_name}.json") + + if clear_existing: + local_storage_file.unlink(True) + cookie_file.unlink(True) + + admin_url = os.getenv("ADMIN_URL") + assert admin_url, "ADMIN_URL env variable must be set" + + authenticated = False + + if local_storage_file.exists() and cookie_file.exists(): + os.environ[local_storage_name] = local_storage_file.read_text() + os.environ[cookie_name] = cookie_file.read_text() + + response = send_admin_request("GET", "/api/permissions/access", fail_on_reauthenticate=False) + + if response.status_code == 200: + authenticated = True + else: + authenticated = False + logger.warn("Found invalid authentication information in local files! Attempting to reauthenticate.") + + if not authenticated: + logger.info(f"Logging in to obtain {user} authentication information...") + + os.environ[local_storage_name], os.environ[cookie_name] = get_identity_info( + url=admin_url, email=email, password=password, driver=driver, identity_provider=identity_provider + ) + + # Cache auth info to files for efficiency + local_storage_file.write_text(os.environ[local_storage_name]) + cookie_file.write_text(os.environ[cookie_name]) + + logger.info("Done!") + + local_storage_token = os.getenv(local_storage_name) + cookie_token = os.getenv(cookie_name) + + assert local_storage_token, f"{local_storage_name} env variable was not set" + assert cookie_token, f"{cookie_name} env variable was not set" + + return local_storage_token, cookie_token diff --git a/tests/robot-tests/args_and_variables.py b/tests/robot-tests/args_and_variables.py new file mode 100644 index 00000000000..f227a08e99d --- /dev/null +++ b/tests/robot-tests/args_and_variables.py @@ -0,0 +1,178 @@ +import argparse +import os + +from dotenv import load_dotenv + + +# Create a parser for our CLI arguments. +def create_argument_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="pipenv run python run_tests.py", + description="Use this script to run the UI tests, locally or as part of the CI pipeline, against the environment of your choosing", + ) + parser.add_argument( + "-b", + "--browser", + dest="browser", + default="chrome", + choices=["chrome", "firefox", "ie"], + help="name of the browser you wish to run the tests with (NOTE: Only chromedriver is automatically installed!)", + ) + parser.add_argument( + "-i", + "--interp", + dest="interp", + default="pabot", + choices=["pabot", "robot"], + help="interpreter to use to run the tests", + ) + parser.add_argument( + "--processes", dest="processes", help="how many processes should be used when using the pabot interpreter" + ) + parser.add_argument( + "-e", + "--env", + dest="env", + default="test", + choices=["local", "dev", "test", "preprod", "prod", "ci"], + help="the environment to run the tests against", + ) + parser.add_argument( + "-f", + "--file", + dest="tests", + metavar="{file/dir}", + default="tests/", + help="test suite or folder of tests suites you wish to run", + ) + parser.add_argument( + "-t", "--tags", dest="tags", nargs="?", metavar="{tag(s)}", help="specify tests you wish to run by tag" + ) + parser.add_argument( + "-v", "--visual", dest="visual", action="store_true", help="display browser window that the tests run in" + ) + parser.add_argument( + "--ci", dest="ci", action="store_true", help="specify that the test are running as part of the CI pipeline" + ) + parser.add_argument( + "--chromedriver", + dest="chromedriver_version", + metavar="{version}", + help="specify which version of chromedriver to use", + ) + parser.add_argument( + "--disable-teardown", + dest="disable_teardown", + help="disable tearing down of any test data after completion", + action="store_true", + ) + parser.add_argument( + "--rerun-failed-tests", + dest="rerun_failed_tests", + action="store_true", + help="rerun individual failed tests and merge results into original run results", + ) + parser.add_argument( + "--rerun-failed-suites", + dest="rerun_failed_suites", + action="store_true", + help="rerun failed test suites and merge results into original run results", + ) + parser.add_argument("--rerun-attempts", dest="rerun_attempts", type=int, default=0, help="Number of rerun attempts") + parser.add_argument( + "--print-keywords", + dest="print_keywords", + action="store_true", + help="choose to print out keywords as they are started", + ) + parser.add_argument( + "--enable-slack-notifications", + dest="enable_slack_notifications", + action="store_true", + help="enable Slack notifications to be sent for test reports", + ) + parser.add_argument( + "--prompt-to-continue", + dest="prompt_to_continue", + action="store_true", + help="get prompted to continue with test execution upon a failure", + ) + parser.add_argument("--fail-fast", dest="fail_fast", action="store_true", help="stop test execution on failure") + parser.add_argument( + "--custom-env", + dest="custom_env", + default=None, + help="load a custom .env file (must be in ~/robot-tests directory)", + ) + parser.add_argument( + "--debug", + dest="debug", + action="store_true", + help="get debug-level logging in report.html, including Python tracebacks", + ) + + """ + NOTE(mark): The admin and analyst passwords to access the Admin app are + stored in the CI pipeline as secret variables, which means they cannot be accessed as normal + environment variables, and instead must be passed as an argument to this script. + """ + parser.add_argument("--admin-pass", dest="admin_pass", default=None, help="manually specify the admin password") + parser.add_argument( + "--analyst-pass", dest="analyst_pass", default=None, help="manually specify the analyst password" + ) + parser.add_argument( + "--expiredinvite-pass", + dest="expiredinvite_pass", + default=None, + help="manually specify the expiredinvite user password", + ) + + return parser + + +def initialise() -> argparse.Namespace: + args = create_argument_parser().parse_args() + load_environment_variables(args) + store_credential_environment_variables(args) + return args + + +def load_environment_variables(arguments: argparse.Namespace): + if arguments.custom_env: + load_dotenv(arguments.custom_env) + else: + load_dotenv(".env." + arguments.env) + + validate_environment_variables() + + +def validate_environment_variables(): + required_env_vars = [ + "TIMEOUT", + "IMPLICIT_WAIT", + "PUBLIC_URL", + "ADMIN_URL", + "PUBLIC_AUTH_USER", + "PUBLIC_AUTH_PASSWORD", + "RELEASE_COMPLETE_WAIT", + "WAIT_MEDIUM", + "WAIT_LONG", + "WAIT_SMALL", + "FAIL_TEST_SUITES_FAST", + "IDENTITY_PROVIDER", + "WAIT_CACHE_EXPIRY", + "EXPIRED_INVITE_USER_EMAIL", + "PUBLISHER_FUNCTIONS_URL", + ] + + for env_var in required_env_vars: + assert os.getenv(env_var) is not None, f"Environment variable {env_var} is not set" + + +def store_credential_environment_variables(arguments: argparse.Namespace): + if arguments.admin_pass: + os.environ["ADMIN_PASSWORD"] = arguments.admin_pass + if arguments.analyst_pass: + os.environ["ANALYST_PASSWORD"] = arguments.analyst_pass + if arguments.expiredinvite_pass: + os.environ["EXPIRED_INVITE_USER_PASSWORD"] = arguments.expiredinvite_pass diff --git a/tests/robot-tests/run_tests.py b/tests/robot-tests/run_tests.py index 37618264570..319d5fcbbf1 100755 --- a/tests/robot-tests/run_tests.py +++ b/tests/robot-tests/run_tests.py @@ -8,15 +8,14 @@ import argparse import datetime -import json import os import random import shutil import string from pathlib import Path -import requests -from dotenv import load_dotenv +import admin_api as admin_api +import args_and_variables as args_and_variables from pabot.pabot import main_program as pabot_run_cli from robot import rebot_cli as robot_rebot_cli from robot import run_cli as robot_run_cli @@ -24,305 +23,35 @@ from tests.libs.create_emulator_release_files import ReleaseFilesGenerator from tests.libs.fail_fast import failing_suites_filename from tests.libs.logger import get_logger -from tests.libs.setup_auth_variables import setup_auth_variables from tests.libs.slack import SlackService pabot_suite_names_filename = ".pabotsuitenames" results_foldername = "test-results" -current_dir = Path(__file__).absolute().parent -os.chdir(current_dir) - - logger = get_logger(__name__) -# This is super awkward but we have to explicitly -# add the current directory to PYTHONPATH otherwise -# the subprocesses started by pabot will not be able -# to locate lib modules correctly for some reason. -pythonpath = os.getenv("PYTHONPATH") - -if pythonpath: - os.environ["PYTHONPATH"] += f":{str(current_dir)}" -else: - os.environ["PYTHONPATH"] = str(current_dir) - - -# Parse arguments -parser = argparse.ArgumentParser( - prog="pipenv run python run_tests.py", - description="Use this script to run the UI tests, locally or as part of the CI pipeline, against the environment of your choosing", -) -parser.add_argument( - "-b", - "--browser", - dest="browser", - default="chrome", - choices=["chrome", "firefox", "ie"], - help="name of the browser you wish to run the tests with (NOTE: Only chromedriver is automatically installed!)", -) -parser.add_argument( - "-i", - "--interp", - dest="interp", - default="pabot", - choices=["pabot", "robot"], - help="interpreter to use to run the tests", -) -parser.add_argument( - "--processes", dest="processes", help="how many processes should be used when using the pabot interpreter" -) -parser.add_argument( - "-e", - "--env", - dest="env", - default="test", - choices=["local", "dev", "test", "preprod", "prod", "ci"], - help="the environment to run the tests against", -) -parser.add_argument( - "-f", - "--file", - dest="tests", - metavar="{file/dir}", - default="tests/", - help="test suite or folder of tests suites you wish to run", -) -parser.add_argument( - "-t", "--tags", dest="tags", nargs="?", metavar="{tag(s)}", help="specify tests you wish to run by tag" -) -parser.add_argument( - "-v", "--visual", dest="visual", action="store_true", help="display browser window that the tests run in" -) -parser.add_argument( - "--ci", dest="ci", action="store_true", help="specify that the test are running as part of the CI pipeline" -) -parser.add_argument( - "--chromedriver", - dest="chromedriver_version", - metavar="{version}", - help="specify which version of chromedriver to use", -) -parser.add_argument( - "--disable-teardown", - dest="disable_teardown", - help="disable tearing down of any test data after completion", - action="store_true", -) -parser.add_argument( - "--rerun-failed-tests", - dest="rerun_failed_tests", - action="store_true", - help="rerun individual failed tests and merge results into original run results", -) -parser.add_argument( - "--rerun-failed-suites", - dest="rerun_failed_suites", - action="store_true", - help="rerun failed test suites and merge results into original run results", -) -parser.add_argument("--rerun-attempts", dest="rerun_attempts", type=int, default=0, help="Number of rerun attempts") -parser.add_argument( - "--print-keywords", - dest="print_keywords", - action="store_true", - help="choose to print out keywords as they are started", -) -parser.add_argument( - "--enable-slack-notifications", - dest="enable_slack_notifications", - action="store_true", - help="enable Slack notifications to be sent for test reports", -) -parser.add_argument( - "--prompt-to-continue", - dest="prompt_to_continue", - action="store_true", - help="get prompted to continue with test execution upon a failure", -) -parser.add_argument("--fail-fast", dest="fail_fast", action="store_true", help="stop test execution on failure") -parser.add_argument( - "--custom-env", dest="custom_env", default=None, help="load a custom .env file (must be in ~/robot-tests directory)" -) -parser.add_argument( - "--debug", - dest="debug", - action="store_true", - help="get debug-level logging in report.html, including Python tracebacks", -) -""" -NOTE(mark): The admin and analyst passwords to access the Admin app are -stored in the CI pipeline as secret variables, which means they cannot be accessed as normal -environment variables, and instead must be passed as an argument to this script. -""" -parser.add_argument("--admin-pass", dest="admin_pass", default=None, help="manually specify the admin password") -parser.add_argument("--analyst-pass", dest="analyst_pass", default=None, help="manually specify the analyst password") -parser.add_argument( - "--expiredinvite-pass", - dest="expiredinvite_pass", - default=None, - help="manually specify the expiredinvite user password", -) -args = parser.parse_args() - -if args.custom_env: - load_dotenv(args.custom_env) -else: - load_dotenv(".env." + args.env) - - -required_env_vars = [ - "TIMEOUT", - "IMPLICIT_WAIT", - "PUBLIC_URL", - "ADMIN_URL", - "PUBLIC_AUTH_USER", - "PUBLIC_AUTH_PASSWORD", - "RELEASE_COMPLETE_WAIT", - "WAIT_MEDIUM", - "WAIT_LONG", - "WAIT_SMALL", - "FAIL_TEST_SUITES_FAST", - "IDENTITY_PROVIDER", - "WAIT_CACHE_EXPIRY", - "EXPIRED_INVITE_USER_EMAIL", - "PUBLISHER_FUNCTIONS_URL", -] - -for env_var in required_env_vars: - assert os.getenv(env_var) is not None, f"Environment variable {env_var} is not set" - -if args.admin_pass: - os.environ["ADMIN_PASSWORD"] = args.admin_pass - -if args.analyst_pass: - os.environ["ANALYST_PASSWORD"] = args.analyst_pass - -if args.expiredinvite_pass: - os.environ["EXPIRED_INVITE_USER_PASSWORD"] = args.expiredinvite_pass - -# Install chromedriver and add it to PATH -get_webdriver(args.chromedriver_version or None) - - -def admin_request(method, endpoint, body=None): - assert method and endpoint - assert os.getenv("ADMIN_URL") is not None - assert os.getenv("IDENTITY_LOCAL_STORAGE_ADMIN") is not None - - if method == "POST": - assert body is not None, "POST requests require a body" - - requests.sessions.HTTPAdapter(pool_connections=50, pool_maxsize=50, max_retries=3) - session = requests.Session() - - # To prevent InsecureRequestWarning - requests.packages.urllib3.disable_warnings() - - jwt_token = json.loads(os.getenv("IDENTITY_LOCAL_STORAGE_ADMIN"))["access_token"] - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {jwt_token}", - } - response = session.request( - method, url=f'{os.getenv("ADMIN_URL")}{endpoint}', headers=headers, stream=True, json=body, verify=False - ) - - if response.status_code in {401, 403}: - logger.info("Attempting re-authentication...") - - # Delete identify files and re-attempt to fetch them - setup_authentication(clear_existing=True) - jwt_token = json.loads(os.environ["IDENTITY_LOCAL_STORAGE_ADMIN"])["access_token"] - response = session.request( - method, - url=f'{os.getenv("ADMIN_URL")}{endpoint}', - headers={ - "Content-Type": "application/json", - "Authorization": f"Bearer {jwt_token}", - }, - stream=True, - json=body, - verify=False, - ) - - assert response.status_code not in {401, 403}, "Failed to reauthenticate." - - if response.status_code == 400 and response.text.find("SlugNotUnique") != -1: - raise Exception(f"SlugNotUnique for {body}") +def setup_python_path(): + # This is super awkward but we have to explicitly + # add the current directory to PYTHONPATH otherwise + # the subprocesses started by pabot will not be able + # to locate lib modules correctly for some reason. + pythonpath = os.getenv("PYTHONPATH") + if pythonpath: + os.environ["PYTHONPATH"] += f":{str(current_dir)}" else: - assert response.status_code < 300, f"Admin request responded with {response.status_code} and {response.text}" - return response - - -def get_test_themes(): - return admin_request("GET", "/api/themes") - - -def create_test_theme(): - return admin_request("POST", "/api/themes", {"title": "Test theme", "summary": "Test theme summary"}) - - -def create_test_topic(run_id: str): - setup_authentication() - - if args.env in ["local", "dev"]: - get_themes_resp = get_test_themes() - test_theme_id = None - test_theme_name = "Test theme" - - for theme in get_themes_resp.json(): - if theme["title"] == test_theme_name: - test_theme_id = theme["id"] - break - if not test_theme_id: - create_theme_resp = create_test_theme() - test_theme_id = create_theme_resp.json()["id"] - - os.environ["TEST_THEME_NAME"] = test_theme_name - os.environ["TEST_THEME_ID"] = test_theme_id - - assert os.getenv("TEST_THEME_ID") is not None - - topic_name = f"UI test topic {run_id}" - resp = admin_request("POST", "/api/topics", {"title": topic_name, "themeId": os.getenv("TEST_THEME_ID")}) - - os.environ["TEST_TOPIC_NAME"] = topic_name - os.environ["TEST_TOPIC_ID"] = resp.json()["id"] - + os.environ["PYTHONPATH"] = str(current_dir) -def delete_test_topic(): - if os.getenv("TEST_TOPIC_ID") is not None: - admin_request("DELETE", f'/api/topics/{os.getenv("TEST_TOPIC_ID")}') +def install_chromedriver(chromedriver_version: str): + # Install chromedriver and add it to PATH + get_webdriver(chromedriver_version) -def setup_authentication(clear_existing=False): - # Don't need BAU user if running general_public tests - if "general_public" not in args.tests: - setup_auth_variables( - user="ADMIN", - email=os.getenv("ADMIN_EMAIL"), - password=os.getenv("ADMIN_PASSWORD"), - clear_existing=clear_existing, - identity_provider=os.getenv("IDENTITY_PROVIDER"), - ) - # Don't need analyst user if running admin/bau or admin_and_public/bau tests - if f"{os.sep}bau" not in args.tests: - setup_auth_variables( - user="ANALYST", - email=os.getenv("ANALYST_EMAIL"), - password=os.getenv("ANALYST_PASSWORD"), - clear_existing=clear_existing, - identity_provider=os.getenv("IDENTITY_PROVIDER"), - ) - - -def create_robot_arguments(rerunning_failed: bool) -> []: +def create_robot_arguments(arguments: argparse.Namespace, rerunning_failed: bool) -> []: robot_args = [ "--outputdir", - "test-results/", + f"{results_foldername}/", "--exclude", "Failing", "--exclude", @@ -333,36 +62,36 @@ def create_robot_arguments(rerunning_failed: bool) -> []: "VisualTesting", ] robot_args += ["-v", f"timeout:{os.getenv('TIMEOUT')}", "-v", f"implicit_wait:{os.getenv('IMPLICIT_WAIT')}"] - if args.fail_fast: + if arguments.fail_fast: robot_args += ["--exitonfailure"] - if args.tags: - robot_args += ["--include", args.tags] - if args.print_keywords: + if arguments.tags: + robot_args += ["--include", arguments.tags] + if arguments.print_keywords: robot_args += ["--listener", "listeners/KeywordListener.py"] - if args.ci: + if arguments.ci: robot_args += ["--xunit", "xunit"] # NOTE(mark): Ensure secrets aren't visible in CI logs/reports robot_args += ["--removekeywords", "name:operatingsystem.environment variable should be set"] robot_args += ["--removekeywords", "name:common.user goes to url"] # To hide basic auth credentials - if args.env == "local": + if arguments.env == "local": robot_args += ["--include", "Local"] robot_args += ["--exclude", "NotAgainstLocal"] # seed Azure storage emulator release files generator = ReleaseFilesGenerator() generator.create_public_release_files() generator.create_private_release_files() - if args.env == "dev": + if arguments.env == "dev": robot_args += ["--include", "Dev"] robot_args += ["--exclude", "NotAgainstDev"] - if args.env == "test": + if arguments.env == "test": robot_args += ["--include", "Test", "--exclude", "NotAgainstTest", "--exclude", "AltersData"] # fmt off - if args.env == "preprod": + if arguments.env == "preprod": robot_args += ["--include", "Preprod", "--exclude", "AltersData", "--exclude", "NotAgainstPreProd"] # fmt on - if args.env == "prod": + if arguments.env == "prod": robot_args += ["--include", "Prod", "--exclude", "AltersData", "--exclude", "NotAgainstProd"] - if args.visual: + if arguments.visual: robot_args += ["-v", "headless:0"] else: robot_args += ["-v", "headless:1"] @@ -370,19 +99,19 @@ def create_robot_arguments(rerunning_failed: bool) -> []: robot_args += ["-v", f"release_complete_wait:{os.getenv('RELEASE_COMPLETE_WAIT')}"] if os.getenv("FAIL_TEST_SUITES_FAST"): robot_args += ["-v", f"FAIL_TEST_SUITES_FAST:{os.getenv('FAIL_TEST_SUITES_FAST')}"] - if args.prompt_to_continue: + if arguments.prompt_to_continue: robot_args += ["-v", "prompt_to_continue_on_failure:1"] - if args.debug: + if arguments.debug: robot_args += ["--loglevel", "DEBUG"] - robot_args += ["-v", "browser:" + args.browser] + robot_args += ["-v", "browser:" + arguments.browser] # We want to add arguments on the first rerun attempt, but on subsequent attempts, we just want # to change rerunfailedsuites xml file we use if rerunning_failed: - robot_args += ["--rerunfailedsuites", f"test-results/output.xml", "--output", "rerun.xml"] + robot_args += ["--rerunfailedsuites", f"{results_foldername}/output.xml", "--output", "rerun.xml"] else: robot_args += ["--output", "output.xml"] - robot_args += [args.tests] + robot_args += [arguments.tests] return robot_args @@ -393,10 +122,6 @@ def get_failing_suites() -> []: return [] -if not os.path.exists("test-results/downloads"): - os.makedirs("test-results/downloads") - - def create_run_identifier(): # Add randomness to prevent multiple simultaneous run_tests.py generating the same run_identifier value random_str = "".join([random.choice(string.ascii_lowercase + string.digits) for n in range(6)]) @@ -406,14 +131,14 @@ def create_run_identifier(): def merge_test_reports(): merge_args = [ "--outputdir", - "test-results/", + f"{results_foldername}/", "-o", "output.xml", "--prerebotmodifier", "report-modifiers/CheckForAtLeastOnePassingRunPrerebotModifier.py", "--merge", - "test-results/output.xml", - "test-results/rerun.xml", + f"{results_foldername}/output.xml", + f"{results_foldername}/rerun.xml", ] robot_rebot_cli(merge_args, exit=False) @@ -435,67 +160,101 @@ def clear_files_before_test_run(rerunning_failures: bool): os.remove(pabot_suite_names_filename) -def run_tests(rerunning_failures: bool): - logger.info(f"Starting tests with RUN_IDENTIFIER: {run_identifier}") - if args.interp == "robot": - robot_run_cli(create_robot_arguments(rerunning_failures), exit=False) - elif args.interp == "pabot": - pabot_run_cli(create_robot_arguments(rerunning_failures)) +def execute_tests(arguments: argparse.Namespace, rerunning_failures: bool): + if arguments.interp == "robot": + robot_run_cli(create_robot_arguments(arguments, rerunning_failures), exit=False) + elif arguments.interp == "pabot": + pabot_run_cli(create_robot_arguments(arguments, rerunning_failures)) -test_run_index = -1 +def setup_user_authentication(tests: str): + if not tests or f"{os.sep}admin" in tests: + admin_api.setup_bau_authentication() + admin_api.setup_analyst_authentication() -try: - # Run tests - while args.rerun_attempts is None or test_run_index < args.rerun_attempts: - test_run_index += 1 - rerunning_failed_suites = args.rerun_failed_suites or test_run_index > 0 +def run(): + args = args_and_variables.initialise() - # Perform any cleanup before the test run. - clear_files_before_test_run(rerunning_failed_suites) + # If running all tests, or admin, admin_and_public or admin_and_public_2 suites, these + # change data on environments and require test themes, test topics and user authentication. + data_changing_tests = args.tests == f"tests{os.sep}" or f"{os.sep}admin" in args.tests - # Create a unique run identifier so that this test run's data will be unique. - run_identifier = create_run_identifier() - os.environ["RUN_IDENTIFIER"] = run_identifier + if data_changing_tests and args.env not in ["local", "dev"]: + raise Exception(f"Cannot run tests that change data on environment {args.env}") - # Create a Test Topic under which all of this test run's data will be created. - needs_test_topic = args.tests and "general_public" not in args.tests + install_chromedriver(args.chromedriver_version) - if needs_test_topic: - create_test_topic(run_identifier) + if data_changing_tests: + setup_user_authentication(args.tests) - # Run the tests. - run_tests(rerunning_failed_suites) + test_run_index = -1 - # If we're rerunning failures, merge the former run's results with this run's - # results. - if rerunning_failed_suites: - merge_test_reports() + logger.info(f"Running Robot tests with {args.rerun_attempts} rerun attempts for any failing suites") - # Tear down any data created by this test run unless we've disabled teardown. - if needs_test_topic and not args.disable_teardown: - logger.info("Tearing down tests...") - delete_test_topic() + try: + # Run tests + while args.rerun_attempts is None or test_run_index < args.rerun_attempts: + test_run_index += 1 - # If all tests passed, return early. - if not get_failing_suites(): - break + rerunning_failed_suites = args.rerun_failed_suites or test_run_index > 0 -finally: - logger.info(f"Log available at: file://{os.getcwd()}{os.sep}test-results{os.sep}log.html") - logger.info(f"Report available at: file://{os.getcwd()}{os.sep}test-results{os.sep}report.html") + # Perform any cleanup before the test run. + clear_files_before_test_run(rerunning_failed_suites) - logger.info(f"Number of test runs: {test_run_index + 1}") + if not Path(f"{results_foldername}/downloads").exists(): + os.makedirs(f"{results_foldername}/downloads") - failing_suites = get_failing_suites() + # Create a unique run identifier so that this test run's data will be unique. + run_identifier = create_run_identifier() + os.environ["RUN_IDENTIFIER"] = run_identifier - if failing_suites: - logger.info(f"Failing suites:") - [logger.info(r" * file://" + suite) for suite in failing_suites] - else: - logger.info("\nAll tests passed!") + # Create a Test Topic under which all of this test run's data will be created. + if data_changing_tests: + admin_api.create_test_topic(run_identifier) + + # Run the tests. + logger.info(f"Performing test run {test_run_index + 1} with unique identifier {run_identifier}") + execute_tests(args, rerunning_failed_suites) + + # If we're rerunning failures, merge the former run's results with this run's + # results. + if rerunning_failed_suites: + logger.info(f"Merging results from test run {test_run_index + 1} with previous run's report") + merge_test_reports() + + # Tear down any data created by this test run unless we've disabled teardown. + if data_changing_tests and not args.disable_teardown: + logger.info("Tearing down test data...") + admin_api.delete_test_topic() + + # If all tests passed, return early. + if not get_failing_suites(): + break + + finally: + logger.info(f"Log available at: file://{os.getcwd()}{os.sep}{results_foldername}{os.sep}log.html") + logger.info(f"Report available at: file://{os.getcwd()}{os.sep}{results_foldername}{os.sep}report.html") + + logger.info(f"Number of test runs: {test_run_index + 1}") + + failing_suites = get_failing_suites() + + if failing_suites: + logger.info(f"Failing suites:") + [logger.info(r" * file://" + suite) for suite in failing_suites] + else: + logger.info("\nAll tests passed!") + + if args.enable_slack_notifications: + slack_service = SlackService() + slack_service.send_test_report(args.env, args.tests, failing_suites, test_run_index) + + +current_dir = Path(__file__).absolute().parent +os.chdir(current_dir) + +setup_python_path() - if args.enable_slack_notifications: - slack_service = SlackService() - slack_service.send_test_report(args.env, args.tests, failing_suites, test_run_index) +# Run the tests! +run() From 86956aa4e1d886a39219bf1240d0844a43187d45 Mon Sep 17 00:00:00 2001 From: Duncan Watson Date: Fri, 24 Nov 2023 14:01:42 +0000 Subject: [PATCH 3/5] EES-4117 - updating Slack UI test notifications to correct markdown issue in failedd suites listing, and increasing number of rerun attempts in pipeline to establish better stability. --- azure-pipelines-ui-tests.dfe.yml | 8 ++++---- tests/robot-tests/run_tests.py | 9 --------- tests/robot-tests/tests/libs/slack.py | 4 ++-- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/azure-pipelines-ui-tests.dfe.yml b/azure-pipelines-ui-tests.dfe.yml index 8c617990c29..ff0ae6e0164 100644 --- a/azure-pipelines-ui-tests.dfe.yml +++ b/azure-pipelines-ui-tests.dfe.yml @@ -48,7 +48,7 @@ jobs: displayName: Public UI tests inputs: scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - arguments: --admin-pass "test" --analyst-pass "test" --expiredinvite-pass "test" --env "dev" --file "tests/general_public/" --processes 2 --rerun-attempts 3 + arguments: --admin-pass "test" --analyst-pass "test" --expiredinvite-pass "test" --env "dev" --file "tests/general_public/" --processes 2 --rerun-attempts 5 workingDirectory: tests/robot-tests env: SLACK_BOT_TOKEN: $(ees-test-SLACK-BOT-TOKEN) @@ -101,7 +101,7 @@ jobs: condition: succeededOrFailed() inputs: scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public_2" --processes 2 --rerun-attempts 3 + arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public_2" --processes 2 --rerun-attempts 5 # The magic incantation '"$(variable)"'was added by Mark to resolve an issue with Analyst password that contained ampersands. workingDirectory: tests/robot-tests env: @@ -154,7 +154,7 @@ jobs: condition: succeededOrFailed() inputs: scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin" --processes 2 --rerun-attempts 3 + arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin" --processes 2 --rerun-attempts 5 workingDirectory: tests/robot-tests env: SLACK_BOT_TOKEN: $(ees-test-SLACK-BOT-TOKEN) @@ -207,7 +207,7 @@ jobs: displayName: Admin public UI tests inputs: scriptPath: tests/robot-tests/scripts/run_tests_pipeline.py - arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public" --processes 2 --rerun-attempts 3 + arguments: --admin-pass '"$(ees-test-ADMIN-PASSWORD)"' --analyst-pass '"$(ees-test-ANALYST-PASSWORD)"' --expiredinvite-pass '"$(ees-test-expiredinvite-password)"' --env "dev" --file "tests/admin_and_public" --processes 2 --rerun-attempts 5 workingDirectory: tests/robot-tests env: SLACK_BOT_TOKEN: $(ees-test-SLACK-BOT-TOKEN) diff --git a/tests/robot-tests/run_tests.py b/tests/robot-tests/run_tests.py index 319d5fcbbf1..1373bae0b3e 100755 --- a/tests/robot-tests/run_tests.py +++ b/tests/robot-tests/run_tests.py @@ -167,12 +167,6 @@ def execute_tests(arguments: argparse.Namespace, rerunning_failures: bool): pabot_run_cli(create_robot_arguments(arguments, rerunning_failures)) -def setup_user_authentication(tests: str): - if not tests or f"{os.sep}admin" in tests: - admin_api.setup_bau_authentication() - admin_api.setup_analyst_authentication() - - def run(): args = args_and_variables.initialise() @@ -185,9 +179,6 @@ def run(): install_chromedriver(args.chromedriver_version) - if data_changing_tests: - setup_user_authentication(args.tests) - test_run_index = -1 logger.info(f"Running Robot tests with {args.rerun_attempts} rerun attempts for any failing suites") diff --git a/tests/robot-tests/tests/libs/slack.py b/tests/robot-tests/tests/libs/slack.py index bacbd188c80..95a2aa55a8a 100644 --- a/tests/robot-tests/tests/libs/slack.py +++ b/tests/robot-tests/tests/libs/slack.py @@ -38,12 +38,12 @@ def _build_attachments(self, env: str, suites_ran: str, suites_failed: [], run_i failed_test_suites_field = ({},) if suites_failed: - failed_test_suites_field = {"title": "Failed test suites", "value": "\n * ".join(suites_failed)} + failed_test_suites_field = {"title": "Failed test suites", "value": "\n".join(suites_failed)} return [ { "pretext": "All results", "color": "danger" if suites_failed else "good", - "mrkdwn_in": ["pretext", "Failed test suites"], + "mrkdwn_in": ["pretext"], "fields": [ {"title": "Environment", "value": env}, {"title": "Suite", "value": suites_ran.replace("tests/", "")}, From 5d36371c9c8fad5036e6dc4fd1e1a315ce578203 Mon Sep 17 00:00:00 2001 From: Duncan Watson Date: Fri, 24 Nov 2023 16:40:45 +0000 Subject: [PATCH 4/5] EES-4117 - got all Related Dashboard interactions to use specific autosaving text block keywords, to avoid issues with our FormEditor.tsx onBlur() delay. Added additional descriptions to keywords and test steps around autosaving blocks. --- .../tests/admin/bau/release_status.robot | 3 +-- .../admin_and_public/bau/publish_content.robot | 11 ++++++----- .../tests/libs/admin/manage-content-common.robot | 16 +++++++++++++++- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/tests/robot-tests/tests/admin/bau/release_status.robot b/tests/robot-tests/tests/admin/bau/release_status.robot index 8ef7454db38..52b0d19a165 100644 --- a/tests/robot-tests/tests/admin/bau/release_status.robot +++ b/tests/robot-tests/tests/admin/bau/release_status.robot @@ -114,8 +114,7 @@ Add text block with content to Test section two Add content to text block in Related dashboards section user opens accordion section View related dashboard(s) id:data-accordion - user adds content to autosaving text block id:related-dashboards-content - ... Related dashboards test text + user adds content to related dashboards text block Related dashboards test text Validate checklist errors and warnings after adding content to text blocks user edits release status diff --git a/tests/robot-tests/tests/admin_and_public/bau/publish_content.robot b/tests/robot-tests/tests/admin_and_public/bau/publish_content.robot index 0e7e13c0c86..956bf72b268 100644 --- a/tests/robot-tests/tests/admin_and_public/bau/publish_content.robot +++ b/tests/robot-tests/tests/admin_and_public/bau/publish_content.robot @@ -6,6 +6,7 @@ Resource ../../libs/public-common.robot Suite Setup user signs in as bau1 Suite Teardown user closes the browser +Test Setup fail test fast if required Force Tags Admin Local Dev AltersData @@ -36,11 +37,7 @@ Add Related dashboards section to release content user waits until page contains accordion section View related dashboard(s) user opens accordion section View related dashboard(s) id:data-accordion - user starts editing text block id:related-dashboards-content - user presses keys Related dashboards test text - user clicks button Save & close - user waits until page finishes loading - user waits until element contains id:related-dashboards-content Edit block + user adds content to related dashboards text block Related dashboards test text Add an accordion section to release content user clicks button Add new section @@ -58,6 +55,10 @@ Add text block with link to absence glossary entry to accordion section user clicks element id:glossarySearch-option-0 user clicks button Insert user waits until modal is not visible Insert glossary link + # Note that the way that interacting with the Glossary Search popup affects the user's focus means + # that the usual behaviour of the autosaving CK Editor text blocks does not lend itself to using + # the "user saves autosaving text block" keyword here - therefore we're OK just to use a standard + # button click to save here. user clicks button Save & close user waits until page finishes loading user waits until parent contains button ${block} Edit block diff --git a/tests/robot-tests/tests/libs/admin/manage-content-common.robot b/tests/robot-tests/tests/libs/admin/manage-content-common.robot index 1921d80463d..fda5e26e593 100644 --- a/tests/robot-tests/tests/libs/admin/manage-content-common.robot +++ b/tests/robot-tests/tests/libs/admin/manage-content-common.robot @@ -313,6 +313,10 @@ user adds content to headlines text block [Arguments] ${content} user adds content to autosaving text block id:releaseHeadlines ${content} +user adds content to related dashboards text block + [Arguments] ${content} + user adds content to autosaving text block id:related-dashboards-content ${content} + user adds content to accordion section text block [Arguments] ... ${section_name} @@ -426,12 +430,22 @@ user saves autosaving text block # EES-3501 - moving focus out of the autosave textarea to give the onBlur() with the 100ms timeout in # FormEditor.tsx a chance to process prior to processing the form submission when we click "Save & close". + # + # A problem would occur if a user was able to click the "Save & close" button in the time between us + # losing focus from the text block (which triggers the delayed onBlur()) and the user clicking the + # "Save & close" button. If they were able to do that within the 100ms delay between the focus leaving the + # text block and the delayed onBlur() occurring, it would leave the page saying "1 content block has + # unsaved changes" and with an internal state that indicates we still have dirty text blocks, despite + # actually not having any unsaved changes. + # + # Given this, we wait for 500ms in order to give the onBlur() a chance to execute. user presses keys TAB - sleep 0.2 + sleep 0.5 user clicks button Save & close ${parent} user waits until page finishes loading user waits until parent does not contain button ${parent} Save & close %{WAIT_SMALL} + user waits until element contains ${parent} Edit block %{WAIT_SMALL} user checks accordion section text block contains [Arguments] From 2085598ee7d30577852eddbecc8a9c092fa09415 Mon Sep 17 00:00:00 2001 From: Duncan Watson Date: Fri, 24 Nov 2023 17:24:34 +0000 Subject: [PATCH 5/5] EES-4117 - slight change to refetching selenium elements behaviour to make it a bit clearer as to why we need to reinitialise the selenium elements prior to each test run --- tests/robot-tests/run_tests.py | 6 +++++ .../tests/libs/selenium_elements.py | 26 ++++++++++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/tests/robot-tests/run_tests.py b/tests/robot-tests/run_tests.py index 1373bae0b3e..fa4d9e8c0d0 100755 --- a/tests/robot-tests/run_tests.py +++ b/tests/robot-tests/run_tests.py @@ -16,6 +16,7 @@ import admin_api as admin_api import args_and_variables as args_and_variables +import tests.libs.selenium_elements as selenium_elements from pabot.pabot import main_program as pabot_run_cli from robot import rebot_cli as robot_rebot_cli from robot import run_cli as robot_run_cli @@ -188,6 +189,11 @@ def run(): while args.rerun_attempts is None or test_run_index < args.rerun_attempts: test_run_index += 1 + # Ensure all SeleniumLibrary elements and keywords are updated to use a branch new + # Selenium instance for every test (re)run. + if test_run_index > 0: + selenium_elements.clear_instances() + rerunning_failed_suites = args.rerun_failed_suites or test_run_index > 0 # Perform any cleanup before the test run. diff --git a/tests/robot-tests/tests/libs/selenium_elements.py b/tests/robot-tests/tests/libs/selenium_elements.py index 17740136ab3..ecc9fc531c4 100644 --- a/tests/robot-tests/tests/libs/selenium_elements.py +++ b/tests/robot-tests/tests/libs/selenium_elements.py @@ -1,14 +1,34 @@ from robot.libraries.BuiltIn import BuiltIn from SeleniumLibrary.keywords.waiting import WaitingKeywords +sl_instance = None +element_finder_instance = None +waiting_instance = None + def sl(): - return BuiltIn().get_library_instance("SeleniumLibrary") + global sl_instance + if sl_instance is None: + sl_instance = BuiltIn().get_library_instance("SeleniumLibrary") + return sl_instance def element_finder(): - return sl()._element_finder + global element_finder_instance + if element_finder_instance is None: + element_finder_instance = sl()._element_finder + return element_finder_instance def waiting(): - return WaitingKeywords(sl()) + global waiting_instance + if waiting_instance is None: + waiting_instance = WaitingKeywords(sl()) + return waiting_instance + + +def clear_instances(): + global sl_instance, element_finder_instance, waiting_instance + sl_instance = None + element_finder_instance = None + waiting_instance = None