diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 186d803a3..c0a538ff1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -30,7 +30,6 @@ jobs: release-doc: name: "Publish documentation for release ${{github.ref_name}}" runs-on: ubuntu-latest - needs: [release-coverage] steps: - uses: actions/checkout@v4 with: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f12dda006..f54fcb50c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -91,6 +91,7 @@ repos: name: Check typing with mypy args: - --config-file=pyproject.toml + - --explicit-package-bases additional_dependencies: - anta[cli] - types-PyYAML diff --git a/anta/cli/debug/utils.py b/anta/cli/debug/utils.py index c8ead5a5c..ea9680a2e 100644 --- a/anta/cli/debug/utils.py +++ b/anta/cli/debug/utils.py @@ -42,13 +42,11 @@ def debug_options(f: Callable[..., Any]) -> Callable[..., Any]: @functools.wraps(f) def wrapper( ctx: click.Context, - *args: tuple[Any], + *args: Any, # noqa: ANN401 inventory: AntaInventory, device: str, - **kwargs: Any, - ) -> Any: - # TODO: @gmuloc - tags come from context https://github.com/aristanetworks/anta/issues/584 - # ruff: noqa: ARG001 + **kwargs: Any, # noqa: ANN401 + ) -> Callable[..., Any]: if (d := inventory.get(device)) is None: logger.error("Device '%s' does not exist in Inventory", device) ctx.exit(ExitCode.USAGE_ERROR) diff --git a/anta/cli/get/commands.py b/anta/cli/get/commands.py index e34be2cc9..f6d4331be 100644 --- a/anta/cli/get/commands.py +++ b/anta/cli/get/commands.py @@ -7,10 +7,9 @@ from __future__ import annotations import asyncio -import json import logging from pathlib import Path -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING import click import requests @@ -115,8 +114,6 @@ def from_ansible(ctx: click.Context, output: Path, ansible_group: str, ansible_i @click.option("--connected/--not-connected", help="Display inventory after connection has been created", default=False, required=False) def inventory(inventory: AntaInventory, tags: set[str] | None, *, connected: bool) -> None: """Show inventory loaded in ANTA.""" - # TODO: @gmuloc - tags come from context - we cannot have everything.. - # ruff: noqa: ARG001 logger.debug("Requesting devices for tags: %s", tags) console.print("Current inventory content is:", style="white on blue") @@ -129,13 +126,13 @@ def inventory(inventory: AntaInventory, tags: set[str] | None, *, connected: boo @click.command @inventory_options -def tags(inventory: AntaInventory, **kwargs: Any) -> None: +def tags(inventory: AntaInventory, tags: set[str] | None) -> None: # noqa: ARG001 """Get list of configured tags in user inventory.""" - tags: set[str] = set() + t: set[str] = set() for device in inventory.values(): - tags.update(device.tags) - console.print("Tags found:") - console.print_json(json.dumps(sorted(tags), indent=2)) + t.update(device.tags) + console.print("Tags defined in inventory:") + console.print_json(data=sorted(t), indent=2) @click.command diff --git a/anta/cli/get/utils.py b/anta/cli/get/utils.py index 42fb4d89b..32965eb41 100644 --- a/anta/cli/get/utils.py +++ b/anta/cli/get/utils.py @@ -59,11 +59,11 @@ def inventory_output_options(f: Callable[..., Any]) -> Callable[..., Any]: @functools.wraps(f) def wrapper( ctx: click.Context, - *args: tuple[Any], + *args: Any, # noqa: ANN401 output: Path, overwrite: bool, - **kwargs: dict[str, Any], - ) -> Any: + **kwargs: Any, # noqa: ANN401 + ) -> Callable[..., Any]: # Boolean to check if the file is empty output_is_not_empty = output.exists() and output.stat().st_size != 0 # Check overwrite when file is not empty diff --git a/anta/cli/nrfu/commands.py b/anta/cli/nrfu/commands.py index d1a72a01f..eed5b2d64 100644 --- a/anta/cli/nrfu/commands.py +++ b/anta/cli/nrfu/commands.py @@ -27,10 +27,19 @@ help="Group result by test or device.", required=False, ) -def table(ctx: click.Context, group_by: Literal["device", "test"] | None) -> None: +@click.option( + "--expand-atomic", + "-x", + default=False, + show_envvar=True, + is_flag=True, + show_default=True, + help="Flag to indicate if atomic results should be rendered", +) +def table(ctx: click.Context, group_by: Literal["device", "test"] | None, expand_atomic: bool) -> None: """ANTA command to check network state with table results.""" run_tests(ctx) - print_table(ctx, group_by=group_by) + print_table(ctx, expand_atomic, group_by) exit_with_code(ctx) @@ -53,10 +62,19 @@ def json(ctx: click.Context, output: pathlib.Path | None) -> None: @click.command() @click.pass_context -def text(ctx: click.Context) -> None: +@click.option( + "--expand-atomic", + "-x", + default=False, + show_envvar=True, + is_flag=True, + show_default=True, + help="Flag to indicate if atomic results should be rendered", +) +def text(ctx: click.Context, expand_atomic: bool) -> None: """ANTA command to check network state with text results.""" run_tests(ctx) - print_text(ctx) + print_text(ctx, expand_atomic) exit_with_code(ctx) diff --git a/anta/cli/nrfu/utils.py b/anta/cli/nrfu/utils.py index 60c0d2976..776a6ad63 100644 --- a/anta/cli/nrfu/utils.py +++ b/anta/cli/nrfu/utils.py @@ -80,7 +80,7 @@ def print_settings( console.print() -def print_table(ctx: click.Context, group_by: Literal["device", "test"] | None = None) -> None: +def print_table(ctx: click.Context, expand_atomic: bool, group_by: Literal["device", "test"] | None) -> None: """Print result in a table.""" reporter = ReportTable() console.print() @@ -90,8 +90,10 @@ def print_table(ctx: click.Context, group_by: Literal["device", "test"] | None = console.print(reporter.report_summary_devices(results)) elif group_by == "test": console.print(reporter.report_summary_tests(results)) + elif expand_atomic: + console.print(reporter.report_expanded(results)) else: - console.print(reporter.report_all(results)) + console.print(reporter.report(results)) def print_json(ctx: click.Context, output: pathlib.Path | None = None) -> None: @@ -112,16 +114,18 @@ def print_json(ctx: click.Context, output: pathlib.Path | None = None) -> None: ctx.exit(ExitCode.USAGE_ERROR) -def print_text(ctx: click.Context) -> None: +def print_text(ctx: click.Context, expand_atomic: bool) -> None: """Print results as simple text.""" console.print() - for test in _get_result_manager(ctx).results: - if len(test.messages) <= 1: - message = test.messages[0] if len(test.messages) == 1 else "" - console.print(f"{test.name} :: {test.test} :: [{test.result}]{test.result.upper()}[/{test.result}]({message})", highlight=False) - else: # len(test.messages) > 1 - console.print(f"{test.name} :: {test.test} :: [{test.result}]{test.result.upper()}[/{test.result}]", highlight=False) - console.print("\n".join(f" {message}" for message in test.messages), highlight=False) + for result in _get_result_manager(ctx).results: + console.print(f"{result.name} :: {result.test} :: [{result.result}]{result.result.upper()}[/{result.result}]", highlight=False) + if result.messages and not expand_atomic: + console.print("\n".join(f" {message}" for message in result.messages), highlight=False) + if expand_atomic: + for r in result.atomic_results: + console.print(f" {r.description} :: [{r.result}]{r.result.upper()}[/{r.result}]", highlight=False) + if r.messages: + console.print("\n".join(f" {message}" for message in r.messages), highlight=False) def print_jinja(results: ResultManager, template: pathlib.Path, output: pathlib.Path | None = None) -> None: diff --git a/anta/cli/utils.py b/anta/cli/utils.py index e740f8c56..852b26e34 100644 --- a/anta/cli/utils.py +++ b/anta/cli/utils.py @@ -90,8 +90,8 @@ class AliasedGroup(click.Group): From Click documentation. """ - def get_command(self, ctx: click.Context, cmd_name: str) -> Any: - """Todo: document code.""" + def get_command(self, ctx: click.Context, cmd_name: str) -> click.Command | None: + """Try to find a command name based on a prefix.""" rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv @@ -103,12 +103,11 @@ def get_command(self, ctx: click.Context, cmd_name: str) -> Any: ctx.fail(f"Too many matches: {', '.join(sorted(matches))}") return None - def resolve_command(self, ctx: click.Context, args: Any) -> Any: - """Todo: document code.""" - # always return the full command name + def resolve_command(self, ctx: click.Context, args: list[str]) -> tuple[str | None, click.Command | None, list[str]]: + """Return the full command name as first tuple element.""" _, cmd, args = super().resolve_command(ctx, args) if not cmd: - return None, None, None + return None, None, [] return cmd.name, cmd, args @@ -194,7 +193,7 @@ def core_options(f: Callable[..., Any]) -> Callable[..., Any]: @functools.wraps(f) def wrapper( ctx: click.Context, - *args: tuple[Any], + *args: Any, # noqa: ANN401 inventory: Path, username: str, password: str | None, @@ -204,8 +203,8 @@ def wrapper( timeout: float, insecure: bool, disable_cache: bool, - **kwargs: dict[str, Any], - ) -> Any: + **kwargs: Any, # noqa: ANN401 + ) -> Callable[..., Any]: # If help is invoke somewhere, do not parse inventory if ctx.obj.get("_anta_help"): return f(*args, inventory=None, **kwargs) @@ -266,10 +265,10 @@ def inventory_options(f: Callable[..., Any]) -> Callable[..., Any]: @functools.wraps(f) def wrapper( ctx: click.Context, - *args: tuple[Any], + *args: Any, # noqa: ANN401 tags: set[str] | None, - **kwargs: dict[str, Any], - ) -> Any: + **kwargs: Any, # noqa: ANN401 + ) -> Callable[..., Any]: # If help is invoke somewhere, do not parse inventory if ctx.obj.get("_anta_help"): return f(*args, tags=tags, **kwargs) @@ -308,11 +307,11 @@ def catalog_options(f: Callable[..., Any]) -> Callable[..., Any]: @functools.wraps(f) def wrapper( ctx: click.Context, - *args: tuple[Any], + *args: Any, # noqa: ANN401 catalog: Path, catalog_format: str, - **kwargs: dict[str, Any], - ) -> Any: + **kwargs: Any, # noqa: ANN401 + ) -> Callable[..., Any]: # If help is invoke somewhere, do not parse catalog if ctx.obj.get("_anta_help"): return f(*args, catalog=None, **kwargs) diff --git a/anta/constants.py b/anta/constants.py index ae131dd1a..28ab947f7 100644 --- a/anta/constants.py +++ b/anta/constants.py @@ -43,5 +43,6 @@ r".* does not support IP", r"IS-IS (.*) is disabled because: .*", r"No source interface .*", + r"There seem to be no power supplies connected.", ] """List of known EOS errors that should set a test status to 'failure' with the error message.""" diff --git a/anta/input_models/connectivity.py b/anta/input_models/connectivity.py index 53581ea3c..0ebb91f09 100644 --- a/anta/input_models/connectivity.py +++ b/anta/input_models/connectivity.py @@ -18,6 +18,8 @@ class Host(BaseModel): """Model for a remote host to ping.""" model_config = ConfigDict(extra="forbid") + description: str | None = None + """Description of the remote destination.""" destination: IPv4Address """IPv4 address to ping.""" source: IPv4Address | Interface @@ -32,15 +34,8 @@ class Host(BaseModel): """Enable do not fragment bit in IP header. Defaults to False.""" def __str__(self) -> str: - """Return a human-readable string representation of the Host for reporting. - - Examples - -------- - Host 10.1.1.1 (src: 10.2.2.2, vrf: mgmt, size: 100B, repeat: 2) - - """ - df_status = ", df-bit: enabled" if self.df_bit else "" - return f"Host {self.destination} (src: {self.source}, vrf: {self.vrf}, size: {self.size}B, repeat: {self.repeat}{df_status})" + """Return a human-readable string representation of the Host for reporting.""" + return f"Destination {self.destination}{f' ({self.description})' if self.description is not None else ''} from {self.source} in VRF {self.vrf}" class LLDPNeighbor(BaseModel): diff --git a/anta/inventory/__init__.py b/anta/inventory/__init__.py index f98c42f29..7a9b4ec40 100644 --- a/anta/inventory/__init__.py +++ b/anta/inventory/__init__.py @@ -60,7 +60,7 @@ def _update_disable_cache(kwargs: dict[str, Any], *, inventory_disable_cache: bo def _parse_hosts( inventory_input: AntaInventoryInput, inventory: AntaInventory, - **kwargs: dict[str, Any], + **kwargs: Any, # noqa: ANN401 ) -> None: """Parse the host section of an AntaInventoryInput and add the devices to the inventory. @@ -92,7 +92,7 @@ def _parse_hosts( def _parse_networks( inventory_input: AntaInventoryInput, inventory: AntaInventory, - **kwargs: dict[str, Any], + **kwargs: Any, # noqa: ANN401 ) -> None: """Parse the network section of an AntaInventoryInput and add the devices to the inventory. @@ -129,7 +129,7 @@ def _parse_networks( def _parse_ranges( inventory_input: AntaInventoryInput, inventory: AntaInventory, - **kwargs: dict[str, Any], + **kwargs: Any, # noqa: ANN401 ) -> None: """Parse the range section of an AntaInventoryInput and add the devices to the inventory. diff --git a/anta/models.py b/anta/models.py index 71f33ebd9..8d2789716 100644 --- a/anta/models.py +++ b/anta/models.py @@ -19,7 +19,7 @@ from anta.constants import KNOWN_EOS_ERRORS from anta.custom_types import REGEXP_EOS_BLACKLIST_CMDS, Revision from anta.logger import anta_log_exception, exc_to_str -from anta.result_manager.models import AntaTestStatus, TestResult +from anta.result_manager.models import TestResult if TYPE_CHECKING: from collections.abc import Coroutine @@ -448,15 +448,16 @@ def __init__( self.device: AntaDevice = device self.inputs: AntaTest.Input self.instance_commands: list[AntaCommand] = [] - self.result: TestResult = TestResult( - name=device.name, - test=self.name, - categories=self.categories, - description=self.description, - ) + self.result: TestResult = TestResult(name=device.name, test=self.name, categories=self.categories, description=self.description) self._init_inputs(inputs) - if self.result.result == AntaTestStatus.UNSET: + if hasattr(self, "inputs"): self._init_commands(eos_data) + if res_ow := self.inputs.result_overwrite: + if res_ow.categories: + self.result.categories = res_ow.categories + if res_ow.description: + self.result.description = res_ow.description + self.result.custom_field = res_ow.custom_field def _init_inputs(self, inputs: dict[str, Any] | AntaTest.Input | None) -> None: """Instantiate the `inputs` instance attribute with an `AntaTest.Input` instance to validate test inputs using the model. @@ -477,12 +478,7 @@ def _init_inputs(self, inputs: dict[str, Any] | AntaTest.Input | None) -> None: self.logger.error(message) self.result.is_error(message=message) return - if res_ow := self.inputs.result_overwrite: - if res_ow.categories: - self.result.categories = res_ow.categories - if res_ow.description: - self.result.description = res_ow.description - self.result.custom_field = res_ow.custom_field + self.result.inputs = self.inputs def _init_commands(self, eos_data: list[dict[Any, Any] | str] | None) -> None: """Instantiate the `instance_commands` instance attribute from the `commands` class attribute. @@ -615,7 +611,7 @@ def anta_test(function: F) -> Callable[..., Coroutine[Any, Any, TestResult]]: async def wrapper( self: AntaTest, eos_data: list[dict[Any, Any] | str] | None = None, - **kwargs: dict[str, Any], + **kwargs: Any, # noqa: ANN401 ) -> TestResult: """Inner function for the anta_test decorator. diff --git a/anta/reporter/__init__.py b/anta/reporter/__init__.py index 696a499a6..07ebe8cdc 100644 --- a/anta/reporter/__init__.py +++ b/anta/reporter/__init__.py @@ -7,20 +7,23 @@ from __future__ import annotations import logging +import math from dataclasses import dataclass from typing import TYPE_CHECKING, Any from jinja2 import Template from rich.table import Table +from yaml import safe_dump, safe_load from anta import RICH_COLOR_PALETTE, RICH_COLOR_THEME +from anta.result_manager.models import AtomicTestResult, TestResult from anta.tools import convert_categories if TYPE_CHECKING: import pathlib from anta.result_manager import ResultManager - from anta.result_manager.models import AntaTestStatus, TestResult + from anta.result_manager.models import AntaTestStatus logger = logging.getLogger(__name__) @@ -102,35 +105,88 @@ def _color_result(self, status: AntaTestStatus) -> str: color = RICH_COLOR_THEME.get(str(status), "") return f"[{color}]{status}" if color != "" else str(status) - def report_all(self, manager: ResultManager, title: str = "All tests results") -> Table: - """Create a table report with all tests for one or all devices. + TITLE_ALL = "All tests results" - Create table with full output: Device | Test Name | Test Status | Message(s) | Test description | Test category + def report_expanded(self, manager: ResultManager) -> Table: + """Create a table report with all tests. + + Create table with columns: Category | Test | Device | Description | Status | Message(s) | Inputs Parameters ---------- manager A ResultManager instance. - title - Title for the report. Defaults to 'All tests results'. Returns ------- Table A fully populated rich `Table`. """ - table = Table(title=title, show_lines=True) - headers = ["Device", "Test Name", "Test Status", "Message(s)", "Test description", "Test category"] + table = Table(title=ReportTable.TITLE_ALL, show_lines=True) + headers = ["Category", "Test", "Device", "Description", "Status", "Message(s)", "Inputs"] table = self._build_headers(headers=headers, table=table) - def add_line(result: TestResult) -> None: + def add_line(result: TestResult | AtomicTestResult, name: str | None = None) -> None: + categories = device = test = None + if isinstance(result, TestResult): + categories = ", ".join(convert_categories(result.categories)) + device = str(result.name) + test = result.test + else: + test = name + state = self._color_result(result.result) + message = self._split_list_to_txt_list(result.messages) if len(result.messages) > 0 else "" + inputs = ( + safe_dump(safe_load(result.inputs.model_dump_json(exclude_none=True)), indent=2, width=math.inf) + if isinstance(result, AtomicTestResult) and result.inputs is not None + else None + ) # See anta.catalog.AntaCatalogFile.yaml() for explanation of this line of code. + table.add_row( + categories, + test, + device, + result.description, + state, + message, + inputs, + ) + + def add_result(result: TestResult) -> None: + add_line(result) + for index, atomic_res in enumerate(result.atomic_results): + add_line(atomic_res, f"{index+1}/{len(result.atomic_results)}") + + for result in manager.results_by_category: + add_result(result) + return table + + def report(self, manager: ResultManager) -> Table: + """Create a table report with all tests. + + Create table with columns: Category | Device | Test | Status | Message(s) + + Parameters + ---------- + manager + A ResultManager instance. + + Returns + ------- + Table + A fully populated rich `Table`. + """ + table = Table(title=ReportTable.TITLE_ALL, show_lines=True) + headers = ["Category", "Device", "Test", "Status", "Message(s)"] + table = self._build_headers(headers=headers, table=table) + + def add_result(result: TestResult) -> None: state = self._color_result(result.result) message = self._split_list_to_txt_list(result.messages) if len(result.messages) > 0 else "" categories = ", ".join(convert_categories(result.categories)) - table.add_row(str(result.name), result.test, state, message, result.description, categories) + table.add_row(categories, str(result.name), result.test, state, message) - for result in manager.results: - add_line(result) + for result in manager.results_by_category: + add_result(result) return table def report_summary_tests( diff --git a/anta/result_manager/__init__.py b/anta/result_manager/__init__.py index 17d445cf9..25b00e1b9 100644 --- a/anta/result_manager/__init__.py +++ b/anta/result_manager/__init__.py @@ -5,7 +5,6 @@ from __future__ import annotations -import json import logging import warnings from collections import defaultdict @@ -13,6 +12,8 @@ from itertools import chain from typing import Any +from pydantic import TypeAdapter + from anta.result_manager.models import AntaTestStatus, TestResult from .models import CategoryStats, DeviceStats, TestStats @@ -74,7 +75,7 @@ class ResultManager: ] """ - _result_entries: list[TestResult] + _results: list[TestResult] status: AntaTestStatus error_status: bool @@ -104,12 +105,13 @@ def __init__(self) -> None: If the status of the added test is error, the status is untouched and the error_status is set to True. """ + self.ta = TypeAdapter(list[TestResult]) self.reset() def reset(self) -> None: """Create or reset the attributes of the ResultManager instance.""" - self._result_entries: list[TestResult] = [] - self.status: AntaTestStatus = AntaTestStatus.UNSET + self._results = [] + self.status = AntaTestStatus.UNSET self.error_status = False # Initialize the statistics attributes @@ -117,12 +119,12 @@ def reset(self) -> None: def __len__(self) -> int: """Implement __len__ method to count number of results.""" - return len(self._result_entries) + return len(self._results) @property def results(self) -> list[TestResult]: """Get the list of TestResult.""" - return self._result_entries + return self._results @results.setter def results(self, value: list[TestResult]) -> None: @@ -136,12 +138,12 @@ def results(self, value: list[TestResult]) -> None: @property def dump(self) -> list[dict[str, Any]]: """Get a list of dictionary of the results.""" - return [result.model_dump() for result in self._result_entries] + return self.ta.dump_python(self._results) @property def json(self) -> str: """Get a JSON representation of the results.""" - return json.dumps(self.dump, indent=4) + return self.ta.dump_json(self._results, exclude_none=True, indent=4).decode() @property def device_stats(self) -> dict[str, DeviceStats]: @@ -183,7 +185,12 @@ def sorted_category_stats(self) -> dict[str, CategoryStats]: @cached_property def results_by_status(self) -> dict[AntaTestStatus, list[TestResult]]: """A cached property that returns the results grouped by status.""" - return {status: [result for result in self._result_entries if result.result == status] for status in AntaTestStatus} + return {status: [result for result in self._results if result.result == status] for status in AntaTestStatus} + + @cached_property + def results_by_category(self) -> list[TestResult]: + """A cached property that returns the results grouped by categories.""" + return sorted(self._results, key=lambda res: res.categories) def _update_status(self, test_status: AntaTestStatus) -> None: """Update the status of the ResultManager instance based on the test status. @@ -247,7 +254,7 @@ def _compute_stats(self) -> None: self._reset_stats() # Recompute stats for all results - for result in self._result_entries: + for result in self._results: self._update_stats(result) self._stats_in_sync = True @@ -268,12 +275,13 @@ def add(self, result: TestResult) -> None: result TestResult to add to the ResultManager instance. """ - self._result_entries.append(result) + self._results.append(result) self._update_status(result.result) self._stats_in_sync = False - # Every time a new result is added, we need to clear the cached property - self.__dict__.pop("results_by_status", None) + # Every time a new result is added, we need to clear the cached properties + for name in ["results_by_status", "results_by_category"]: + self.__dict__.pop(name, None) def get_results(self, status: set[AntaTestStatus] | None = None, sort_by: list[str] | None = None) -> list[TestResult]: """Get the results, optionally filtered by status and sorted by TestResult fields. @@ -293,7 +301,7 @@ def get_results(self, status: set[AntaTestStatus] | None = None, sort_by: list[s List of results. """ # Return all results if no status is provided, otherwise return results for multiple statuses - results = self._result_entries if status is None else list(chain.from_iterable(self.results_by_status.get(status, []) for status in status)) + results = self._results if status is None else list(chain.from_iterable(self.results_by_status.get(status, []) for status in status)) if sort_by: accepted_fields = TestResult.model_fields.keys() @@ -342,7 +350,7 @@ def sort(self, sort_by: list[str]) -> ResultManager: if not set(sort_by).issubset(set(accepted_fields)): msg = f"Invalid sort_by fields: {sort_by}. Accepted fields are: {list(accepted_fields)}" raise ValueError(msg) - self._result_entries.sort(key=lambda result: [getattr(result, field) for field in sort_by]) + self._results.sort(key=lambda result: [getattr(result, field) for field in sort_by]) return self def filter(self, hide: set[AntaTestStatus]) -> ResultManager: @@ -377,7 +385,7 @@ def filter_by_tests(self, tests: set[str]) -> ResultManager: A filtered `ResultManager`. """ manager = ResultManager() - manager.results = [result for result in self._result_entries if result.test in tests] + manager.results = [result for result in self._results if result.test in tests] return manager def filter_by_devices(self, devices: set[str]) -> ResultManager: @@ -394,7 +402,7 @@ def filter_by_devices(self, devices: set[str]) -> ResultManager: A filtered `ResultManager`. """ manager = ResultManager() - manager.results = [result for result in self._result_entries if result.name in devices] + manager.results = [result for result in self._results if result.name in devices] return manager def get_tests(self) -> set[str]: @@ -405,7 +413,7 @@ def get_tests(self) -> set[str]: set[str] Set of test names. """ - return {str(result.test) for result in self._result_entries} + return {str(result.test) for result in self._results} def get_devices(self) -> set[str]: """Get the set of all the device names. @@ -415,4 +423,4 @@ def get_devices(self) -> set[str]: set[str] Set of device names. """ - return {str(result.name) for result in self._result_entries} + return {str(result.name) for result in self._results} diff --git a/anta/result_manager/models.py b/anta/result_manager/models.py index a18ff579c..ffcf12641 100644 --- a/anta/result_manager/models.py +++ b/anta/result_manager/models.py @@ -5,10 +5,12 @@ from __future__ import annotations +from abc import ABC, abstractmethod from dataclasses import dataclass, field from enum import Enum +from typing import Any -from pydantic import BaseModel +from pydantic import BaseModel, InstanceOf, SerializeAsAny class AntaTestStatus(str, Enum): @@ -28,35 +30,12 @@ def __str__(self) -> str: return self.value -class TestResult(BaseModel): - """Describe the result of a test from a single device. - - Attributes - ---------- - name : str - Name of the device where the test was run. - test : str - Name of the test run on the device. - categories : list[str] - List of categories the TestResult belongs to. Defaults to the AntaTest categories. - description : str - Description of the TestResult. Defaults to the AntaTest description. - result : AntaTestStatus - Result of the test. Must be one of the AntaTestStatus Enum values: unset, success, failure, error or skipped. - messages : list[str] - Messages to report after the test, if any. - custom_field : str | None - Custom field to store a string for flexibility in integrating with ANTA. +class BaseTestResult(BaseModel, ABC): + """Base model for test results.""" - """ - - name: str - test: str - categories: list[str] - description: str - result: AntaTestStatus = AntaTestStatus.UNSET - messages: list[str] = [] - custom_field: str | None = None + @abstractmethod + def _set_status(self, status: AntaTestStatus, message: str | None = None) -> None: + pass def is_success(self, message: str | None = None) -> None: """Set status to success. @@ -102,25 +81,126 @@ def is_error(self, message: str | None = None) -> None: """ self._set_status(AntaTestStatus.ERROR, message) + +class AtomicTestResult(BaseTestResult): + """Describe the result of an atomic test part of a larger test related to a TestResult instance. + + Attributes + ---------- + description : str | None + Description of the AtomicTestResult. + inputs: BaseModel | None + If this AtomicTestResult is related to a specific parent test input, this field must be set. + result : AntaTestStatus + Result of the atomic test. + messages : list[str] + Messages reported by the test. + """ + + _parent: TestResult + description: str + inputs: SerializeAsAny[InstanceOf[BaseModel] | None] = None + result: AntaTestStatus = AntaTestStatus.UNSET + messages: list[str] = [] + + def __init__(self, **data: Any) -> None: # noqa: ANN401 + """Instantiate the parent TestResult private attribute.""" + if "parent" not in data: + msg = "An AtomicTestResult instance must have a parent." + raise RuntimeError(msg) + parent = data.pop("parent") + super().__init__(**data) + self._parent = parent + def _set_status(self, status: AntaTestStatus, message: str | None = None) -> None: """Set status and insert optional message. + If the parent TestResult status is UNSET and this AtomicTestResult status is SUCCESS, the parent TestResult status will be set as a SUCCESS. + If this AtomicTestResult status is FAILURE or ERROR, the parent TestResult status will be set with the same status. + Parameters ---------- status Status of the test. message Optional message. - """ self.result = status + if (self._parent.result == AntaTestStatus.UNSET and status == AntaTestStatus.SUCCESS) or status in [AntaTestStatus.FAILURE, AntaTestStatus.ERROR]: + self._parent.result = status if message is not None: self.messages.append(message) + self._parent.messages.append(message) + + +class TestResult(BaseTestResult): + """Describe the result of a test from a single device. + + Attributes + ---------- + name : str + Name of the device on which the test was run. + test : str + Name of the AntaTest subclass. + categories : list[str] + List of categories the TestResult belongs to. Defaults to the AntaTest subclass categories. + description : str + Description of the TestResult. Defaults to the AntaTest subclass description. + inputs: BaseModel + Inputs of the AntaTest instance. + custom_field : str | None + Custom field to store a string for flexibility in integrating with ANTA. + result : AntaTestStatus + Result of the test. + messages : list[str] + Messages reported by the test. + atomic_results: list[AtomicTestResult] + A list of AtomicTestResult instances which can be used to store atomic results during the test execution. + It can then be leveraged in the report to render atomic results over the test global TestResult. + """ + + name: str + test: str + categories: list[str] + description: str + inputs: SerializeAsAny[InstanceOf[BaseModel]] | None = None # A TestResult inputs can be None in case of inputs validation error + custom_field: str | None = None + result: AntaTestStatus = AntaTestStatus.UNSET + messages: list[str] = [] + atomic_results: list[AtomicTestResult] = [] def __str__(self) -> str: """Return a human readable string of this TestResult.""" return f"Test '{self.test}' (on '{self.name}'): Result '{self.result}'\nMessages: {self.messages}" + def add(self, description: str | None = None, inputs: BaseModel | None = None) -> AtomicTestResult: + """Create and add a new AtomicTestResult to this TestResult instance. + + Parameters + ---------- + description : str | None + Description of the AtomicTestResult. + inputs: BaseModel | None + If this AtomicTestResult is related to a specific parent test input, this field must be set. + """ + res = AtomicTestResult(description=description, inputs=inputs, parent=self) + self.atomic_results.append(res) + return res + + def _set_status(self, status: AntaTestStatus, message: str | None = None) -> None: + """Set status and insert optional message. + + Parameters + ---------- + status + Status of the test. + message + Optional message. + """ + self.result = status + if message is not None: + self.messages.append(message) + # Pylint does not treat dataclasses differently: https://github.com/pylint-dev/pylint/issues/9058 # pylint: disable=too-many-instance-attributes diff --git a/anta/tests/connectivity.py b/anta/tests/connectivity.py index 245baf46d..8966462cd 100644 --- a/anta/tests/connectivity.py +++ b/anta/tests/connectivity.py @@ -66,11 +66,11 @@ def render(self, template: AntaTemplate) -> list[AntaCommand]: @AntaTest.anta_test def test(self) -> None: """Main test function for VerifyReachability.""" - self.result.is_success() - for command, host in zip(self.instance_commands, self.inputs.hosts): + host_result = self.result.add(description=str(host), inputs=host) + host_result.is_success() if f"{host.repeat} received" not in command.json_output["messages"][0]: - self.result.is_failure(f"{host} - Unreachable") + host_result.is_failure(f"Unreachable {host}") class VerifyLLDPNeighbors(AntaTest): diff --git a/docs/contribution.md b/docs/contribution.md index 3d5704bfb..a2438c6be 100644 --- a/docs/contribution.md +++ b/docs/contribution.md @@ -94,43 +94,63 @@ All submodule should have its own pytest section under `tests/units/anta_tests/< ### How to write a unit test for an AntaTest subclass -The Python modules in the `tests/units/anta_tests` folder define test parameters for AntaTest subclasses unit tests. -A generic test function is written for all unit tests in `tests.units.anta_tests` module. +The Python modules in the `tests.units.anta_tests` package define test parameters for AntaTest subclasses unit tests. +A generic test function is written for all unit tests of the `AntaTest` subclasses. +In order for your unit tests to be correctly collected, you need to import the generic test function even if not used in the Python module. The `pytest_generate_tests` function definition in `conftest.py` is called during test collection. -The `pytest_generate_tests` function will parametrize the generic test function based on the `DATA` data structure defined in `tests.units.anta_tests` modules. +The `pytest_generate_tests` function will parametrize the generic test function based on the `DATA` constant defined in modules in the `tests.units.anta_tests` package. See https://docs.pytest.org/en/7.3.x/how-to/parametrize.html#basic-pytest-generate-tests-example -The `DATA` structure is a list of dictionaries used to parametrize the test. The list elements have the following keys: +The `DATA` constant is a list of dictionaries used to parametrize the test. A `TypedDict` has been defined to ease the writing of such constant and leverage `mypy` type checking. -- `name` (str): Test name as displayed by Pytest. -- `test` (AntaTest): An AntaTest subclass imported in the test module - e.g. VerifyUptime. -- `eos_data` (list[dict]): List of data mocking EOS returned data to be passed to the test. -- `inputs` (dict): Dictionary to instantiate the `test` inputs as defined in the class from `test`. -- `expected` (dict): Expected test result structure, a dictionary containing a key - `result` containing one of the allowed status (`Literal['success', 'failure', 'unset', 'skipped', 'error']`) and optionally a key `messages` which is a list(str) and each message is expected to be a substring of one of the actual messages in the TestResult object. +``` python +class AtomicResult(TypedDict): + """Expected atomic result of a unit test of an AntaTest subclass.""" -In order for your unit tests to be correctly collected, you need to import the generic test function even if not used in the Python module. + result: Literal["success", "failure", "skipped"] # The expected status of this atomic result. + description: str # The expected description of this atomic result. + messages: NotRequired[list[str]] # The expected messages of this atomic result. The strings can be a substrings of the actual messages. + inputs: NotRequired[dict[str, Any]] # The inputs registered with this atomic result. + + +class Expected(TypedDict): + """Expected result of a unit test of an AntaTest subclass.""" + + result: Literal["success", "failure", "skipped"] # The expected status of this unit test. + messages: NotRequired[list[str]] # The expected messages of the test. The strings can be a substrings of the actual messages. + atomic_results: NotRequired[list[AtomicResult]] # The list of expected atomic results. + + +class AntaUnitTest(TypedDict): + """The parameters required for a unit test of an AntaTest subclass.""" + + name: str # Test name as displayed by Pytest. + test: type[AntaTest] # An AntaTest subclass imported in the test module - e.g. VerifyUptime. + inputs: NotRequired[dict[str, Any]] # The test inputs of this unit test. + eos_data: list[dict[str, Any] | str] # List of command outputs used to mock EOS commands during this unit test. + expected: Expected # The expected result of this unit test. +``` Test example for `anta.tests.system.VerifyUptime` AntaTest. ``` python -# Import the generic test function -from tests.units.anta_tests import test - # Import your AntaTest from anta.tests.system import VerifyUptime +# Import the generic test function +from tests.units.anta_tests import test + # Define test parameters -DATA: list[dict[str, Any]] = [ +DATA: list[AntaUnitTest] = [ { - # Arbitrary test name + # Arbitrary test name. "name": "success", - # Must be an AntaTest definition + # Must be an AntaTest subclass definition "test": VerifyUptime, - # Data returned by EOS on which the AntaTest is tested + # JSON output of the 'show uptime' EOS command as defined in VerifyUptime.commands "eos_data": [{"upTime": 1186689.15, "loadAvg": [0.13, 0.12, 0.09], "users": 1, "currentTime": 1683186659.139859}], # Dictionary to instantiate VerifyUptime.Input "inputs": {"minimum": 666}, @@ -143,12 +163,94 @@ DATA: list[dict[str, Any]] = [ "eos_data": [{"upTime": 665.15, "loadAvg": [0.13, 0.12, 0.09], "users": 1, "currentTime": 1683186659.139859}], "inputs": {"minimum": 666}, # If the test returns messages, it needs to be expected otherwise test will fail. - # NB: expected messages only needs to be included in messages returned by the test. Exact match is not required. + # The expected message can be a substring of the actual message. "expected": {"result": "failure", "messages": ["Device uptime is 665.15 seconds"]}, }, ] ``` +Test example for `anta.tests.connectivity.VerifyReachability` AntaTest that contains atomic results. + +``` python +from anta.tests.connectivity import VerifyReachability +from tests.units.anta_tests import test + +DATA: list[AntaUnitTest] = [ + + { + "name": "failure-ip", + "test": VerifyReachability, + "inputs": {"hosts": [{"destination": "10.0.0.11", "source": "10.0.0.5"}, {"destination": "10.0.0.2", "source": "10.0.0.5"}]}, + "eos_data": [ + { + "messages": [ + """ping: sendmsg: Network is unreachable + ping: sendmsg: Network is unreachable + PING 10.0.0.11 (10.0.0.11) from 10.0.0.5 : 72(100) bytes of data. + + --- 10.0.0.11 ping statistics --- + 2 packets transmitted, 0 received, 100% packet loss, time 10ms + + + """, + ], + }, + { + "messages": [ + """PING 10.0.0.2 (10.0.0.2) from 10.0.0.5 : 72(100) bytes of data. + 80 bytes from 10.0.0.2: icmp_seq=1 ttl=64 time=0.247 ms + 80 bytes from 10.0.0.2: icmp_seq=2 ttl=64 time=0.072 ms + + --- 10.0.0.2 ping statistics --- + 2 packets transmitted, 2 received, 0% packet loss, time 0ms + rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms + + """, + ], + }, + ], + "expected": { + "result": "failure", + "messages": ["Unreachable Destination 10.0.0.11 from 10.0.0.5 in VRF default"], + # This test has implemented atomic results. + # Expected atomic results must be specified or the test will fail. + "atomic_results": [ + { + # Expected atomic result description + "description": "Destination 10.0.0.11 from 10.0.0.5 in VRF default", + # If the atomic result is tied to a subset of the test inputs, it needs to be expected here. + "inputs": { + "destination": "10.0.0.11", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "10.0.0.5", + "vrf": "default", + }, + # Expected atomic result status + "result": "failure", + # If the atomic result returns messages, it needs to be expected otherwise test will fail. + # The expected message can be a substring of the actual message. + "messages": ["Unreachable Destination 10.0.0.11 from 10.0.0.5 in VRF default"], + + }, + { + "description": "Destination 10.0.0.2 from 10.0.0.5 in VRF default", + "inputs": { + "destination": "10.0.0.2", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "10.0.0.5", + "vrf": "default", + }, + "result": "success", + }, + ], + }, + } +``` + ## Git Pre-commit hook ```bash diff --git a/pyproject.toml b/pyproject.toml index 1e85b01f0..16d436ba8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -391,6 +391,7 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In "tests/*" = [ "S101", # Complains about asserts in units and libs. "SLF001", # Lots of private member accessed for test purposes + "FBT002", # Sometimes we need to define boolean kwargs in fixtures ] "tests/units/*" = [ "ARG002", # Sometimes we need to declare unused arguments when a parameter is not used but declared in @pytest.mark.parametrize @@ -417,7 +418,7 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In ] "anta/cli/*" = [ "PLR0913", # CLI has many arguments defined in functions - "ANN401", # TODO: Check if we can update the Any type hints in the CLI + "FBT001", # CLI can have boolean as argument ] "anta/tests/field_notices.py" = [ "PLR2004", # Magic value used in comparison, consider replacing 2131 with a constant variable - Field notice IDs are magic values @@ -467,6 +468,7 @@ disable = [ # Any rule listed here can be disabled: https://github.com/astral-sh "reimported", "wrong-import-order", "wrong-import-position", + "too-many-public-methods", "unnecessary-lambda", "abstract-class-instantiated", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-instantiation-of-abstract-classes-abstract "unexpected-keyword-arg", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg and other rules diff --git a/tests/benchmark/test_reporter.py b/tests/benchmark/test_reporter.py index c6d0b295f..9c72ffa8e 100644 --- a/tests/benchmark/test_reporter.py +++ b/tests/benchmark/test_reporter.py @@ -24,7 +24,7 @@ def test_table_all(results: ResultManager) -> None: """Benchmark ReportTable.report_all().""" reporter = ReportTable() - reporter.report_all(results) + reporter.report(results) @pytest.mark.benchmark diff --git a/tests/benchmark/test_runner.py b/tests/benchmark/test_runner.py index 9aa54df27..d1aca4092 100644 --- a/tests/benchmark/test_runner.py +++ b/tests/benchmark/test_runner.py @@ -41,9 +41,10 @@ def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inven selected_tests = prepare_tests(inventory=inventory, catalog=catalog, tests=None, tags=None) assert selected_tests is not None + results = ResultManager() def bench() -> list[Coroutine[Any, Any, TestResult]]: - coros = get_coroutines(selected_tests=selected_tests, manager=ResultManager()) + coros = get_coroutines(selected_tests=selected_tests, manager=results) for c in coros: c.close() return coros diff --git a/tests/benchmark/utils.py b/tests/benchmark/utils.py index 2a8443033..45b8f7d39 100644 --- a/tests/benchmark/utils.py +++ b/tests/benchmark/utils.py @@ -90,7 +90,7 @@ def import_test_modules() -> Generator[ModuleType, None, None]: for test_data in module.DATA: test = test_data["test"] result_overwrite = AntaTest.Input.ResultOverwrite(custom_field=test_data["name"]) - if test_data["inputs"] is None: + if "inputs" not in test_data or test_data["inputs"] is None: inputs = test.Input(result_overwrite=result_overwrite) else: inputs = test.Input(**test_data["inputs"], result_overwrite=result_overwrite) diff --git a/tests/units/anta_tests/README.md b/tests/units/anta_tests/README.md index fa88b8fc2..90da9c5a1 100644 --- a/tests/units/anta_tests/README.md +++ b/tests/units/anta_tests/README.md @@ -4,4 +4,4 @@ ~ that can be found in the LICENSE file. --> -A guide explaining how to write the unit test can be found in the [contribution guide](../../../docs/contribution.md#unit-tests) +A guide explaining how to write a unit test can be found in the [contribution guide](../../../docs/contribution.md#unit-tests) diff --git a/tests/units/anta_tests/__init__.py b/tests/units/anta_tests/__init__.py index 9bfb5f815..a975ba935 100644 --- a/tests/units/anta_tests/__init__.py +++ b/tests/units/anta_tests/__init__.py @@ -3,10 +3,43 @@ # that can be found in the LICENSE file. """Tests for anta.tests module.""" +from __future__ import annotations + import asyncio -from typing import Any +from typing import TYPE_CHECKING, Any, Literal, TypedDict + +if TYPE_CHECKING: + from typing_extensions import NotRequired # NOTE: required to support Python < 3.11 https://peps.python.org/pep-0655/#usage-in-python-3-11 + + from anta.device import AntaDevice + from anta.models import AntaTest + + +class AtomicResult(TypedDict): + """Expected atomic result of a unit test of an AntaTest subclass.""" + + result: Literal["success", "failure", "skipped"] # TODO: Refactor tests and use AntaTestStatus + description: str + messages: NotRequired[list[str]] + inputs: NotRequired[dict[str, Any]] + -from anta.device import AntaDevice +class Expected(TypedDict): + """Expected result of a unit test of an AntaTest subclass.""" + + result: Literal["success", "failure", "skipped"] # TODO: Refactor tests and use AntaTestStatus + messages: NotRequired[list[str]] + atomic_results: NotRequired[list[AtomicResult]] + + +class AntaUnitTest(TypedDict): + """The parameters required for a unit test of an AntaTest subclass.""" + + name: str # TODO: Refactor tests and change the DATA constant type as dictionary instead of list[AntaUnitTest] to avoid test duplicates. + test: type[AntaTest] + inputs: NotRequired[dict[str, Any]] + eos_data: list[dict[str, Any] | str] + expected: Expected def test(device: AntaDevice, data: dict[str, Any]) -> None: @@ -17,6 +50,8 @@ def test(device: AntaDevice, data: dict[str, Any]) -> None: See `tests/units/anta_tests/README.md` for more information on how to use it. """ # Instantiate the AntaTest subclass + if "inputs" not in data: + data["inputs"] = None test_instance = data["test"](device, inputs=data["inputs"], eos_data=data["eos_data"]) # Run the test() method asyncio.run(test_instance.test()) @@ -24,10 +59,28 @@ def test(device: AntaDevice, data: dict[str, Any]) -> None: assert test_instance.result.result == data["expected"]["result"], f"Expected '{data['expected']['result']}' result, got '{test_instance.result.result}'" if "messages" in data["expected"]: # We expect messages in test result - assert len(test_instance.result.messages) == len(data["expected"]["messages"]) + assert len(test_instance.result.messages) == len( + data["expected"]["messages"] + ), f"Expected {len(data['expected']['messages'])} messages, got {len(test_instance.result.messages)}" # Test will pass if the expected message is included in the test result message for message, expected in zip(test_instance.result.messages, data["expected"]["messages"]): # NOTE: zip(strict=True) has been added in Python 3.10 assert expected in message else: # Test result should not have messages - assert test_instance.result.messages == [] + assert test_instance.result.messages == [], "There are untested messages" + + if "atomic_results" in data["expected"]: + assert len(test_instance.result.atomic_results) == len( + data["expected"]["atomic_results"] + ), f"Expected {len(data['expected']['atomic_results'])} atomic results, got {len(test_instance.result.atomic_results)}" + for atomic_result_model, expected_atomic_result in zip(test_instance.result.atomic_results, data["expected"]["atomic_results"]): + atomic_result = atomic_result_model.model_dump(mode="json", exclude_none=True) + if len(atomic_result["messages"]): + for message, expected in zip(atomic_result["messages"], expected_atomic_result["messages"]): # NOTE: zip(strict=True) has been added in Python 3.10 + assert expected in message + else: + del atomic_result["messages"] + assert atomic_result == expected_atomic_result + else: + # Test result should not have atomic results + assert test_instance.result.atomic_results == [], "There are untested atomic results" diff --git a/tests/units/anta_tests/routing/test_bgp.py b/tests/units/anta_tests/routing/test_bgp.py index 4d9e3c026..72f1a7b1b 100644 --- a/tests/units/anta_tests/routing/test_bgp.py +++ b/tests/units/anta_tests/routing/test_bgp.py @@ -6,7 +6,7 @@ # pylint: disable=C0302 from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING import pytest @@ -32,6 +32,9 @@ ) from tests.units.anta_tests import test +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + @pytest.mark.parametrize( ("input_dict", "expected"), @@ -49,7 +52,7 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo assert _check_bgp_neighbor_capability(input_dict) == expected -DATA: list[dict[str, Any]] = [ +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyBGPPeerCount, diff --git a/tests/units/anta_tests/routing/test_generic.py b/tests/units/anta_tests/routing/test_generic.py index a24b80b2c..526856320 100644 --- a/tests/units/anta_tests/routing/test_generic.py +++ b/tests/units/anta_tests/routing/test_generic.py @@ -6,7 +6,7 @@ from __future__ import annotations import sys -from typing import Any +from typing import TYPE_CHECKING import pytest from pydantic import ValidationError @@ -14,7 +14,10 @@ from anta.tests.routing.generic import VerifyIPv4RouteType, VerifyRoutingProtocolModel, VerifyRoutingTableEntry, VerifyRoutingTableSize from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyRoutingProtocolModel, diff --git a/tests/units/anta_tests/routing/test_isis.py b/tests/units/anta_tests/routing/test_isis.py index 9c379eae3..348e9b3f4 100644 --- a/tests/units/anta_tests/routing/test_isis.py +++ b/tests/units/anta_tests/routing/test_isis.py @@ -7,7 +7,7 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING, Any import pytest @@ -22,7 +22,10 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success only default vrf", "test": VerifyISISNeighborState, @@ -64,7 +67,6 @@ } }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -116,7 +118,6 @@ } }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -160,7 +161,6 @@ } }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["Some neighbors are not in the correct state (UP): [{'vrf': 'default', 'instance': 'CORE-ISIS', 'neighbor': 's1-p01', 'state': 'down'}]."], @@ -172,7 +172,6 @@ "eos_data": [ {"vrfs": {"default": {"isisInstances": {"CORE-ISIS": {"neighbors": {}}}}}}, ], - "inputs": None, "expected": { "result": "skipped", "messages": ["No IS-IS neighbor detected"], diff --git a/tests/units/anta_tests/routing/test_ospf.py b/tests/units/anta_tests/routing/test_ospf.py index 644cd76fa..c738bb879 100644 --- a/tests/units/anta_tests/routing/test_ospf.py +++ b/tests/units/anta_tests/routing/test_ospf.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.routing.ospf import VerifyOSPFMaxLSA, VerifyOSPFNeighborCount, VerifyOSPFNeighborState from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyOSPFNeighborState, @@ -63,7 +66,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -118,7 +120,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -135,7 +136,6 @@ "vrfs": {}, }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["no OSPF neighbor found"]}, }, { @@ -341,7 +341,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -391,7 +390,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["OSPF Instances ['1', '10'] crossed the maximum LSA threshold."], @@ -405,7 +403,6 @@ "vrfs": {}, }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["No OSPF instance found."]}, }, ] diff --git a/tests/units/anta_tests/test_aaa.py b/tests/units/anta_tests/test_aaa.py index 8589b5955..e0dd8cbad 100644 --- a/tests/units/anta_tests/test_aaa.py +++ b/tests/units/anta_tests/test_aaa.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.aaa import ( VerifyAcctConsoleMethods, @@ -18,7 +18,10 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyTacacsSourceIntf, diff --git a/tests/units/anta_tests/test_avt.py b/tests/units/anta_tests/test_avt.py index bb6c6b903..e908d5b88 100644 --- a/tests/units/anta_tests/test_avt.py +++ b/tests/units/anta_tests/test_avt.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.avt import VerifyAVTPathHealth, VerifyAVTRole, VerifyAVTSpecificPath from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyAVTPathHealth, diff --git a/tests/units/anta_tests/test_bfd.py b/tests/units/anta_tests/test_bfd.py index af1329f94..7b3aa385a 100644 --- a/tests/units/anta_tests/test_bfd.py +++ b/tests/units/anta_tests/test_bfd.py @@ -6,12 +6,15 @@ # pylint: disable=C0302 from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.bfd import VerifyBFDPeersHealth, VerifyBFDPeersIntervals, VerifyBFDPeersRegProtocols, VerifyBFDSpecificPeers from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyBFDPeersIntervals, @@ -353,7 +356,6 @@ "utcTime": 1703658481.8778424, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["No IPv4 BFD peers are configured for any VRF."], diff --git a/tests/units/anta_tests/test_configuration.py b/tests/units/anta_tests/test_configuration.py index 9e676a93f..34635326b 100644 --- a/tests/units/anta_tests/test_configuration.py +++ b/tests/units/anta_tests/test_configuration.py @@ -5,38 +5,37 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.configuration import VerifyRunningConfigDiffs, VerifyRunningConfigLines, VerifyZeroTouch from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyZeroTouch, "eos_data": [{"mode": "disabled"}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyZeroTouch, "eos_data": [{"mode": "enabled"}], - "inputs": None, "expected": {"result": "failure", "messages": ["ZTP is NOT disabled"]}, }, { "name": "success", "test": VerifyRunningConfigDiffs, "eos_data": [""], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyRunningConfigDiffs, "eos_data": ["blah blah"], - "inputs": None, "expected": {"result": "failure", "messages": ["blah blah"]}, }, { diff --git a/tests/units/anta_tests/test_connectivity.py b/tests/units/anta_tests/test_connectivity.py index 0e37e053b..21dcc3b27 100644 --- a/tests/units/anta_tests/test_connectivity.py +++ b/tests/units/anta_tests/test_connectivity.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.connectivity import VerifyLLDPNeighbors, VerifyReachability from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success-ip", "test": VerifyReachability, @@ -43,7 +46,28 @@ ], }, ], - "expected": {"result": "success"}, + "expected": { + "result": "success", + "atomic_results": [ + { + "result": "success", + "description": "Destination 10.0.0.1 from 10.0.0.5 in VRF default", + "inputs": {"destination": "10.0.0.1", "source": "10.0.0.5", "vrf": "default", "repeat": 2, "size": 100, "df_bit": False}, + }, + { + "description": "Destination 10.0.0.2 from 10.0.0.5 in VRF default", + "inputs": { + "destination": "10.0.0.2", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "10.0.0.5", + "vrf": "default", + }, + "result": "success", + }, + ], + }, }, { "name": "success-interface", @@ -77,7 +101,96 @@ ], }, ], - "expected": {"result": "success"}, + "expected": { + "result": "success", + "atomic_results": [ + { + "result": "success", + "description": "Destination 10.0.0.1 from Management0 in VRF default", + "inputs": {"destination": "10.0.0.1", "source": "Management0", "vrf": "default", "repeat": 2, "size": 100, "df_bit": False}, + }, + { + "description": "Destination 10.0.0.2 from Management0 in VRF default", + "inputs": { + "destination": "10.0.0.2", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "Management0", + "vrf": "default", + }, + "result": "success", + }, + ], + }, + }, + { + "name": "success-description", + "test": VerifyReachability, + "inputs": { + "hosts": [ + {"description": "spine1 Ethernet49/1", "destination": "10.0.0.1", "source": "Management0"}, + {"destination": "10.0.0.2", "source": "Management0"}, + ] + }, + "eos_data": [ + { + "messages": [ + """PING 10.0.0.1 (10.0.0.1) from 10.0.0.5 : 72(100) bytes of data. + 80 bytes from 10.0.0.1: icmp_seq=1 ttl=64 time=0.247 ms + 80 bytes from 10.0.0.1: icmp_seq=2 ttl=64 time=0.072 ms + + --- 10.0.0.1 ping statistics --- + 2 packets transmitted, 2 received, 0% packet loss, time 0ms + rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms + + """, + ], + }, + { + "messages": [ + """PING 10.0.0.2 (10.0.0.2) from 10.0.0.5 : 72(100) bytes of data. + 80 bytes from 10.0.0.2: icmp_seq=1 ttl=64 time=0.247 ms + 80 bytes from 10.0.0.2: icmp_seq=2 ttl=64 time=0.072 ms + + --- 10.0.0.2 ping statistics --- + 2 packets transmitted, 2 received, 0% packet loss, time 0ms + rtt min/avg/max/mdev = 0.072/0.159/0.247/0.088 ms, ipg/ewma 0.370/0.225 ms + + """, + ], + }, + ], + "expected": { + "result": "success", + "atomic_results": [ + { + "result": "success", + "description": "Destination 10.0.0.1 (spine1 Ethernet49/1) from Management0 in VRF default", + "inputs": { + "description": "spine1 Ethernet49/1", + "destination": "10.0.0.1", + "source": "Management0", + "vrf": "default", + "repeat": 2, + "size": 100, + "df_bit": False, + }, + }, + { + "description": "Destination 10.0.0.2 from Management0 in VRF default", + "inputs": { + "destination": "10.0.0.2", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "Management0", + "vrf": "default", + }, + "result": "success", + }, + ], + }, }, { "name": "success-repeat", @@ -97,7 +210,23 @@ ], }, ], - "expected": {"result": "success"}, + "expected": { + "result": "success", + "atomic_results": [ + { + "description": "Destination 10.0.0.1 from Management0 in VRF default", + "inputs": { + "destination": "10.0.0.1", + "df_bit": False, + "repeat": 1, + "size": 100, + "source": "Management0", + "vrf": "default", + }, + "result": "success", + }, + ], + }, }, { "name": "success-df-bit-size", @@ -119,7 +248,23 @@ ], }, ], - "expected": {"result": "success"}, + "expected": { + "result": "success", + "atomic_results": [ + { + "description": "Destination 10.0.0.1 from Management0 in VRF default", + "inputs": { + "destination": "10.0.0.1", + "df_bit": True, + "repeat": 5, + "size": 1500, + "source": "Management0", + "vrf": "default", + }, + "result": "success", + }, + ], + }, }, { "name": "failure-ip", @@ -153,7 +298,37 @@ ], }, ], - "expected": {"result": "failure", "messages": ["Host 10.0.0.11 (src: 10.0.0.5, vrf: default, size: 100B, repeat: 2) - Unreachable"]}, + "expected": { + "result": "failure", + "messages": ["Unreachable Destination 10.0.0.11 from 10.0.0.5 in VRF default"], + "atomic_results": [ + { + "result": "failure", + "description": "Destination 10.0.0.11 from 10.0.0.5 in VRF default", + "inputs": { + "destination": "10.0.0.11", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "10.0.0.5", + "vrf": "default", + }, + "messages": ["Unreachable Destination 10.0.0.11 from 10.0.0.5 in VRF default"], + }, + { + "description": "Destination 10.0.0.2 from 10.0.0.5 in VRF default", + "inputs": { + "destination": "10.0.0.2", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "10.0.0.5", + "vrf": "default", + }, + "result": "success", + }, + ], + }, }, { "name": "failure-interface", @@ -187,7 +362,37 @@ ], }, ], - "expected": {"result": "failure", "messages": ["Host 10.0.0.11 (src: Management0, vrf: default, size: 100B, repeat: 2) - Unreachable"]}, + "expected": { + "result": "failure", + "messages": ["Unreachable Destination 10.0.0.11 from Management0 in VRF default"], + "atomic_results": [ + { + "description": "Destination 10.0.0.11 from Management0 in VRF default", + "inputs": { + "destination": "10.0.0.11", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "Management0", + "vrf": "default", + }, + "messages": ["Unreachable Destination 10.0.0.11 from Management0 in VRF default"], + "result": "failure", + }, + { + "description": "Destination 10.0.0.2 from Management0 in VRF default", + "inputs": { + "destination": "10.0.0.2", + "df_bit": False, + "repeat": 2, + "size": 100, + "source": "Management0", + "vrf": "default", + }, + "result": "success", + }, + ], + }, }, { "name": "failure-size", @@ -209,7 +414,25 @@ ], }, ], - "expected": {"result": "failure", "messages": ["Host 10.0.0.1 (src: Management0, vrf: default, size: 1501B, repeat: 5, df-bit: enabled) - Unreachable"]}, + "expected": { + "result": "failure", + "messages": ["Unreachable Destination 10.0.0.1 from Management0 in VRF default"], + "atomic_results": [ + { + "description": "Destination 10.0.0.1 from Management0 in VRF default", + "inputs": { + "destination": "10.0.0.1", + "df_bit": True, + "repeat": 5, + "size": 1501, + "source": "Management0", + "vrf": "default", + }, + "messages": ["Unreachable Destination 10.0.0.1 from Management0 in VRF default"], + "result": "failure", + }, + ], + }, }, { "name": "success", diff --git a/tests/units/anta_tests/test_cvx.py b/tests/units/anta_tests/test_cvx.py index 6d0d2421d..1e5ca8573 100644 --- a/tests/units/anta_tests/test_cvx.py +++ b/tests/units/anta_tests/test_cvx.py @@ -5,17 +5,19 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.cvx import VerifyActiveCVXConnections, VerifyCVXClusterStatus, VerifyManagementCVX, VerifyMcsClientMounts, VerifyMcsServerMounts from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyMcsClientMounts, "eos_data": [{"mountStates": [{"path": "mcs/v1/toSwitch/28-99-3a-8f-93-7b", "type": "Mcs::DeviceConfigV1", "state": "mountStateMountComplete"}]}], - "inputs": None, "expected": {"result": "success"}, }, { @@ -29,7 +31,6 @@ ] }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -43,7 +44,6 @@ ] }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -52,14 +52,12 @@ "eos_data": [ {"mountStates": []}, ], - "inputs": None, "expected": {"result": "failure", "messages": ["MCS Client mount states are not present"]}, }, { "name": "failure-mountStatePreservedUnmounted", "test": VerifyMcsClientMounts, "eos_data": [{"mountStates": [{"path": "mcs/v1/toSwitch/28-99-3a-8f-93-7b", "type": "Mcs::DeviceConfigV1", "state": "mountStatePreservedUnmounted"}]}], - "inputs": None, "expected": {"result": "failure", "messages": ["MCS Client mount states are not valid: mountStatePreservedUnmounted"]}, }, { @@ -73,7 +71,6 @@ ] }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["MCS Client mount states are not valid: mountStatePreservedUnmounted"]}, }, { @@ -87,7 +84,6 @@ ] }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["MCS Client mount states are not valid: mountStatePreservedUnmounted"]}, }, { @@ -96,7 +92,6 @@ "eos_data": [ {"mountStates": [{"path": "blah/blah/blah", "type": "blah::blahState", "state": "mountStatePreservedUnmounted"}]}, ], - "inputs": None, "expected": {"result": "failure", "messages": ["MCS Client mount states are not present"]}, }, { @@ -110,7 +105,6 @@ ] }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["MCS Client mount states are not valid: mountStatePreservedUnmounted"]}, }, { diff --git a/tests/units/anta_tests/test_field_notices.py b/tests/units/anta_tests/test_field_notices.py index 13dd66095..906ed2544 100644 --- a/tests/units/anta_tests/test_field_notices.py +++ b/tests/units/anta_tests/test_field_notices.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.field_notices import VerifyFieldNotice44Resolution, VerifyFieldNotice72Resolution from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyFieldNotice44Resolution, @@ -25,7 +28,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -42,7 +44,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["device is running incorrect version of aboot (4.0.1)"], @@ -62,7 +63,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["device is running incorrect version of aboot (4.1.0)"], @@ -82,7 +82,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["device is running incorrect version of aboot (6.0.1)"], @@ -102,7 +101,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["device is running incorrect version of aboot (6.1.1)"], @@ -122,7 +120,6 @@ }, }, ], - "inputs": None, "expected": { "result": "skipped", "messages": ["device is not impacted by FN044"], @@ -142,7 +139,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["Aboot component not found"], @@ -161,7 +157,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success", "messages": ["FN72 is mitigated"]}, }, { @@ -177,7 +172,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success", "messages": ["FN72 is mitigated"]}, }, { @@ -193,7 +187,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success", "messages": ["FN72 is mitigated"]}, }, { @@ -209,7 +202,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success", "messages": ["FN72 is mitigated"]}, }, { @@ -225,7 +217,6 @@ }, }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["Device not exposed"]}, }, { @@ -241,7 +232,6 @@ }, }, ], - "inputs": None, "expected": { "result": "skipped", "messages": ["Platform is not impacted by FN072"], @@ -260,7 +250,6 @@ }, }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["Device not exposed"]}, }, { @@ -276,7 +265,6 @@ }, }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["Device not exposed"]}, }, { @@ -292,7 +280,6 @@ }, }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["Device not exposed"]}, }, { @@ -308,7 +295,6 @@ }, }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["Device not exposed"]}, }, { @@ -324,7 +310,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Device is exposed to FN72"]}, }, { @@ -340,7 +325,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Device is exposed to FN72"]}, }, { @@ -356,7 +340,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["Error in running test - Component FixedSystemvrm1 not found in 'show version'"], diff --git a/tests/units/anta_tests/test_flow_tracking.py b/tests/units/anta_tests/test_flow_tracking.py index 19f4d325b..fcb00f908 100644 --- a/tests/units/anta_tests/test_flow_tracking.py +++ b/tests/units/anta_tests/test_flow_tracking.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.flow_tracking import VerifyHardwareFlowTrackerStatus from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyHardwareFlowTrackerStatus, diff --git a/tests/units/anta_tests/test_greent.py b/tests/units/anta_tests/test_greent.py index 3afb240e2..6b5b8aa0b 100644 --- a/tests/units/anta_tests/test_greent.py +++ b/tests/units/anta_tests/test_greent.py @@ -5,24 +5,25 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.greent import VerifyGreenT, VerifyGreenTCounters from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyGreenTCounters, "eos_data": [{"sampleRcvd": 0, "sampleDiscarded": 0, "multiDstSampleRcvd": 0, "grePktSent": 1, "sampleSent": 0}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyGreenTCounters, "eos_data": [{"sampleRcvd": 0, "sampleDiscarded": 0, "multiDstSampleRcvd": 0, "grePktSent": 0, "sampleSent": 0}], - "inputs": None, "expected": {"result": "failure", "messages": ["GreenT counters are not incremented"]}, }, { @@ -36,7 +37,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -49,7 +49,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["No GreenT policy is created"]}, }, ] diff --git a/tests/units/anta_tests/test_hardware.py b/tests/units/anta_tests/test_hardware.py index d6993c5f2..11db0ae69 100644 --- a/tests/units/anta_tests/test_hardware.py +++ b/tests/units/anta_tests/test_hardware.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.hardware import ( VerifyAdverseDrops, @@ -18,7 +18,10 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyTransceiversManufacturers, @@ -60,7 +63,6 @@ "recoveryModeOnOverheat": "recoveryModeNA", }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -76,7 +78,6 @@ "recoveryModeOnOverheat": "recoveryModeNA", }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Device temperature exceeds acceptable limits. Current system status: 'temperatureKO'"]}, }, { @@ -106,7 +107,6 @@ "cardSlots": [], }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -136,7 +136,6 @@ "cardSlots": [], }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -173,7 +172,6 @@ "cardSlots": [], }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -203,7 +201,6 @@ "systemStatus": "coolingOk", }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -226,7 +223,6 @@ "systemStatus": "coolingKo", }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Device system cooling is not OK: 'coolingKo'"]}, }, { @@ -906,14 +902,12 @@ "name": "success", "test": VerifyAdverseDrops, "eos_data": [{"totalAdverseDrops": 0}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyAdverseDrops, "eos_data": [{"totalAdverseDrops": 10}], - "inputs": None, "expected": {"result": "failure", "messages": ["Device totalAdverseDrops counter is: '10'"]}, }, ] diff --git a/tests/units/anta_tests/test_interfaces.py b/tests/units/anta_tests/test_interfaces.py index 9e5a87190..29194c301 100644 --- a/tests/units/anta_tests/test_interfaces.py +++ b/tests/units/anta_tests/test_interfaces.py @@ -6,7 +6,7 @@ # pylint: disable=C0302 from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.interfaces import ( VerifyIllegalLACP, @@ -29,7 +29,10 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyInterfaceUtilization, @@ -812,7 +815,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -826,7 +828,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -847,7 +848,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -867,7 +867,6 @@ }, }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -889,7 +888,6 @@ "outDiscardsTotal": 0, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -905,7 +903,6 @@ "outDiscardsTotal": 0, }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -929,7 +926,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -947,7 +943,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["The following interfaces are in error disabled state: ['Management1', 'Ethernet8']"]}, }, { @@ -1240,7 +1235,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -1259,7 +1253,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["The following interfaces have none 0 storm-control drop counters {'Ethernet1': {'broadcast': 666}}"]}, }, { @@ -1282,7 +1275,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -1305,7 +1297,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["The following port-channels have inactive port(s): ['Port-Channel42']"]}, }, { @@ -1332,7 +1323,6 @@ "orphanPorts": {}, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -1359,7 +1349,6 @@ "orphanPorts": {}, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["The following port-channels have received illegal LACP packets on the following ports: [{'Port-Channel42': 'Ethernet8'}]"], @@ -1466,7 +1455,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -1486,7 +1474,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["The following SVIs are not up: ['Vlan42']"]}, }, { diff --git a/tests/units/anta_tests/test_lanz.py b/tests/units/anta_tests/test_lanz.py index 99a57712c..cdea7f834 100644 --- a/tests/units/anta_tests/test_lanz.py +++ b/tests/units/anta_tests/test_lanz.py @@ -5,24 +5,25 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.lanz import VerifyLANZ from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyLANZ, "eos_data": [{"lanzEnabled": True}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyLANZ, "eos_data": [{"lanzEnabled": False}], - "inputs": None, "expected": {"result": "failure", "messages": ["LANZ is not enabled"]}, }, ] diff --git a/tests/units/anta_tests/test_logging.py b/tests/units/anta_tests/test_logging.py index 6aeac4a21..3f3efc324 100644 --- a/tests/units/anta_tests/test_logging.py +++ b/tests/units/anta_tests/test_logging.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.logging import ( VerifyLoggingAccounting, @@ -19,7 +19,10 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyLoggingPersistent, @@ -33,7 +36,6 @@ """, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -49,7 +51,6 @@ """, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Persistent logging is disabled"]}, }, { @@ -65,7 +66,6 @@ """, ], - "inputs": None, "expected": {"result": "failure", "messages": ["No persistent logs are saved in flash"]}, }, { @@ -166,14 +166,12 @@ "2023-05-10T13:54:21.463497-05:00 NW-CORE.example.org ConfigAgent: %SYS-6-LOGMSG_INFO: " "Message from arista on command-api (10.22.1.107): ANTA VerifyLoggingLogsGeneration validation\n", ], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyLoggingLogsGeneration, "eos_data": ["", "Log Buffer:\n"], - "inputs": None, "expected": {"result": "failure", "messages": ["Logs are not generated"]}, }, { @@ -185,7 +183,6 @@ "2023-05-10T15:41:44.701810-05:00 NW-CORE.example.org ConfigAgent: %SYS-6-LOGMSG_INFO: " "Message from arista on command-api (10.22.1.107): ANTA VerifyLoggingHostname validation\n", ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -197,7 +194,6 @@ "2023-05-10T13:54:21.463497-05:00 NW-CORE ConfigAgent: %SYS-6-LOGMSG_INFO: " "Message from arista on command-api (10.22.1.107): ANTA VerifyLoggingLogsHostname validation\n", ], - "inputs": None, "expected": {"result": "failure", "messages": ["Logs are not generated with the device FQDN"]}, }, { @@ -210,7 +206,6 @@ "2023-05-10T15:42:44.680813-05:00 NW-CORE.example.org ConfigAgent: %SYS-6-LOGMSG_INFO: " "Other log\n", ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -223,7 +218,6 @@ "2023-05-10T15:42:44.680813+05:00 NW-CORE.example.org ConfigAgent: %SYS-6-LOGMSG_INFO: " "Other log\n", ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -234,7 +228,6 @@ "May 10 13:54:22 NE-CORE.example.org ConfigAgent: %SYS-6-LOGMSG_INFO: " "Message from arista on command-api (10.22.1.107): ANTA VerifyLoggingTimestamp validation\n", ], - "inputs": None, "expected": {"result": "failure", "messages": ["Logs are not generated with the appropriate timestamp format"]}, }, { @@ -244,28 +237,24 @@ "", "May 10 13:54:22 NE-CORE.example.org ConfigAgent: %SYS-6-LOGMSG_INFO: Message from arista on command-api (10.22.1.107): BLAH\n", ], - "inputs": None, "expected": {"result": "failure", "messages": ["Logs are not generated with the appropriate timestamp format"]}, }, { "name": "success", "test": VerifyLoggingAccounting, "eos_data": ["2023 May 10 15:50:31 arista command-api 10.22.1.107 stop service=shell priv-lvl=15 cmd=show aaa accounting logs | tail\n"], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyLoggingAccounting, "eos_data": ["2023 May 10 15:52:26 arista vty14 10.22.1.107 stop service=shell priv-lvl=15 cmd=show bgp summary\n"], - "inputs": None, "expected": {"result": "failure", "messages": ["AAA accounting logs are not generated"]}, }, { "name": "success", "test": VerifyLoggingErrors, "eos_data": [""], - "inputs": None, "expected": {"result": "success"}, }, { @@ -274,7 +263,6 @@ "eos_data": [ "Aug 2 19:57:42 DC1-LEAF1A Mlag: %FWK-3-SOCKET_CLOSE_REMOTE: Connection to Mlag (pid:27200) at tbt://192.168.0.1:4432/+n closed by peer (EOF)", ], - "inputs": None, "expected": {"result": "failure", "messages": ["Device has reported syslog messages with a severity of ERRORS or higher"]}, }, ] diff --git a/tests/units/anta_tests/test_mlag.py b/tests/units/anta_tests/test_mlag.py index 387c88979..d1e302788 100644 --- a/tests/units/anta_tests/test_mlag.py +++ b/tests/units/anta_tests/test_mlag.py @@ -5,17 +5,19 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.mlag import VerifyMlagConfigSanity, VerifyMlagDualPrimary, VerifyMlagInterfaces, VerifyMlagPrimaryPriority, VerifyMlagReloadDelay, VerifyMlagStatus from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyMlagStatus, "eos_data": [{"state": "active", "negStatus": "connected", "peerLinkStatus": "up", "localIntfStatus": "up"}], - "inputs": None, "expected": {"result": "success"}, }, { @@ -26,14 +28,12 @@ "state": "disabled", }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["MLAG is disabled"]}, }, { "name": "failure", "test": VerifyMlagStatus, "eos_data": [{"state": "active", "negStatus": "connected", "peerLinkStatus": "down", "localIntfStatus": "up"}], - "inputs": None, "expected": { "result": "failure", "messages": ["MLAG status is not OK: {'state': 'active', 'negStatus': 'connected', 'localIntfStatus': 'up', 'peerLinkStatus': 'down'}"], @@ -48,7 +48,6 @@ "mlagPorts": {"Disabled": 0, "Configured": 0, "Inactive": 0, "Active-partial": 0, "Active-full": 1}, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -59,7 +58,6 @@ "state": "disabled", }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["MLAG is disabled"]}, }, { @@ -71,7 +69,6 @@ "mlagPorts": {"Disabled": 0, "Configured": 0, "Inactive": 0, "Active-partial": 1, "Active-full": 1}, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["MLAG status is not OK: {'Disabled': 0, 'Configured': 0, 'Inactive': 0, 'Active-partial': 1, 'Active-full': 1}"], @@ -86,7 +83,6 @@ "mlagPorts": {"Disabled": 0, "Configured": 0, "Inactive": 1, "Active-partial": 1, "Active-full": 1}, }, ], - "inputs": None, "expected": { "result": "failure", "messages": ["MLAG status is not OK: {'Disabled': 0, 'Configured': 0, 'Inactive': 1, 'Active-partial': 1, 'Active-full': 1}"], @@ -96,7 +92,6 @@ "name": "success", "test": VerifyMlagConfigSanity, "eos_data": [{"globalConfiguration": {}, "interfaceConfiguration": {}, "mlagActive": True, "mlagConnected": True}], - "inputs": None, "expected": {"result": "success"}, }, { @@ -107,7 +102,6 @@ "mlagActive": False, }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["MLAG is disabled"]}, }, { @@ -121,7 +115,6 @@ "mlagConnected": True, }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -143,7 +136,6 @@ "mlagConnected": True, }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ diff --git a/tests/units/anta_tests/test_multicast.py b/tests/units/anta_tests/test_multicast.py index 753cd10a6..7c72141ff 100644 --- a/tests/units/anta_tests/test_multicast.py +++ b/tests/units/anta_tests/test_multicast.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.multicast import VerifyIGMPSnoopingGlobal, VerifyIGMPSnoopingVlans from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success-enabled", "test": VerifyIGMPSnoopingVlans, diff --git a/tests/units/anta_tests/test_path_selection.py b/tests/units/anta_tests/test_path_selection.py index 08377e675..7a42f1357 100644 --- a/tests/units/anta_tests/test_path_selection.py +++ b/tests/units/anta_tests/test_path_selection.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.path_selection import VerifyPathsHealth, VerifySpecificPath from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyPathsHealth, diff --git a/tests/units/anta_tests/test_profiles.py b/tests/units/anta_tests/test_profiles.py index 81ef4f9f5..4b6fe3f4c 100644 --- a/tests/units/anta_tests/test_profiles.py +++ b/tests/units/anta_tests/test_profiles.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.profiles import VerifyTcamProfile, VerifyUnifiedForwardingTableMode from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyUnifiedForwardingTableMode, diff --git a/tests/units/anta_tests/test_ptp.py b/tests/units/anta_tests/test_ptp.py index 112e33475..e46760f27 100644 --- a/tests/units/anta_tests/test_ptp.py +++ b/tests/units/anta_tests/test_ptp.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.ptp import VerifyPtpGMStatus, VerifyPtpLockStatus, VerifyPtpModeStatus, VerifyPtpOffset, VerifyPtpPortModeStatus from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyPtpModeStatus, @@ -31,21 +34,18 @@ "ptpIntfSummaries": {}, } ], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyPtpModeStatus, "eos_data": [{"ptpMode": "ptpDisabled", "ptpIntfSummaries": {}}], - "inputs": None, "expected": {"result": "failure", "messages": ["The device is not configured as a PTP Boundary Clock: 'ptpDisabled'"]}, }, { "name": "skipped", "test": VerifyPtpModeStatus, "eos_data": [{"ptpIntfSummaries": {}}], - "inputs": None, "expected": {"result": "skipped", "messages": ["PTP is not configured"]}, }, { @@ -133,7 +133,6 @@ }, } ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -157,14 +156,12 @@ }, } ], - "inputs": None, "expected": {"result": "failure", "messages": ["The device lock is more than 60s old: 157s"]}, }, { "name": "skipped", "test": VerifyPtpLockStatus, "eos_data": [{"ptpIntfSummaries": {}}], - "inputs": None, "expected": { "result": "skipped", "messages": [ @@ -201,7 +198,6 @@ ], } ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -233,7 +229,6 @@ ], } ], - "inputs": None, "expected": { "result": "failure", "messages": [("The device timing offset from master is greater than +/- 1000ns: {'Ethernet27/1': [1200, -1300]}")], @@ -248,7 +243,6 @@ "ptpMonitorData": [], }, ], - "inputs": None, "expected": {"result": "skipped", "messages": ["PTP is not configured"]}, }, { @@ -291,14 +285,12 @@ }, } ], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure-no-interfaces", "test": VerifyPtpPortModeStatus, "eos_data": [{"ptpIntfSummaries": {}}], - "inputs": None, "expected": {"result": "failure", "messages": ["No interfaces are PTP enabled"]}, }, { @@ -334,7 +326,6 @@ }, } ], - "inputs": None, "expected": {"result": "failure", "messages": ["The following interface(s) are not in a valid PTP state: '['Ethernet53', 'Ethernet1']'"]}, }, ] diff --git a/tests/units/anta_tests/test_security.py b/tests/units/anta_tests/test_security.py index 4d51c96fc..3f3aae8e6 100644 --- a/tests/units/anta_tests/test_security.py +++ b/tests/units/anta_tests/test_security.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING, Any import pytest from pydantic import ValidationError @@ -29,26 +29,26 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifySSHStatus, "eos_data": ["SSHD status for Default VRF is disabled\nSSH connection limit is 50\nSSH per host connection limit is 20\nFIPS status: disabled\n\n"], - "inputs": None, "expected": {"result": "success"}, }, { "name": "error-missing-ssh-status", "test": VerifySSHStatus, "eos_data": ["SSH per host connection limit is 20\nFIPS status: disabled\n\n"], - "inputs": None, "expected": {"result": "failure", "messages": ["Could not find SSH status in returned output."]}, }, { "name": "failure-ssh-enabled", "test": VerifySSHStatus, "eos_data": ["SSHD status for Default VRF is enabled\nSSH connection limit is 50\nSSH per host connection limit is 20\nFIPS status: disabled\n\n"], - "inputs": None, "expected": {"result": "failure", "messages": ["SSHD status for Default VRF is enabled"]}, }, { @@ -58,7 +58,6 @@ "User certificate authentication methods: none (neither trusted CA nor SSL profile configured)\n" "SSHD status for Default VRF: disabled\nSSH connection limit: 50\nSSH per host connection limit: 20\nFIPS status: disabled\n\n" ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -68,7 +67,6 @@ "User certificate authentication methods: none (neither trusted CA nor SSL profile configured)\n" "SSHD status for Default VRF: enabled\nSSH connection limit: 50\nSSH per host connection limit: 20\nFIPS status: disabled\n\n" ], - "inputs": None, "expected": {"result": "failure", "messages": ["SSHD status for Default VRF: enabled"]}, }, { @@ -117,14 +115,12 @@ "name": "success", "test": VerifyTelnetStatus, "eos_data": [{"serverState": "disabled", "vrfName": "default", "maxTelnetSessions": 20, "maxTelnetSessionsPerHost": 20}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifyTelnetStatus, "eos_data": [{"serverState": "enabled", "vrfName": "default", "maxTelnetSessions": 20, "maxTelnetSessionsPerHost": 20}], - "inputs": None, "expected": {"result": "failure", "messages": ["Telnet status for Default VRF is enabled"]}, }, { @@ -141,7 +137,6 @@ "tlsProtocol": ["1.2"], }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -158,7 +153,6 @@ "tlsProtocol": ["1.2"], }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["eAPI HTTP server is enabled globally"]}, }, { diff --git a/tests/units/anta_tests/test_services.py b/tests/units/anta_tests/test_services.py index 439b8ea4f..60e32d7fe 100644 --- a/tests/units/anta_tests/test_services.py +++ b/tests/units/anta_tests/test_services.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.services import VerifyDNSLookup, VerifyDNSServers, VerifyErrdisableRecovery, VerifyHostname from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyHostname, diff --git a/tests/units/anta_tests/test_snmp.py b/tests/units/anta_tests/test_snmp.py index 195ef298e..7f67beaab 100644 --- a/tests/units/anta_tests/test_snmp.py +++ b/tests/units/anta_tests/test_snmp.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.snmp import ( VerifySnmpContact, @@ -18,7 +18,10 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifySnmpStatus, diff --git a/tests/units/anta_tests/test_software.py b/tests/units/anta_tests/test_software.py index f0e5ea94d..3c184e939 100644 --- a/tests/units/anta_tests/test_software.py +++ b/tests/units/anta_tests/test_software.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.software import VerifyEOSExtensions, VerifyEOSVersion, VerifyTerminAttrVersion from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyEOSVersion, @@ -87,7 +90,6 @@ {"extensions": {}, "extensionStoredDir": "flash:", "warnings": ["No extensions are available"]}, {"extensions": []}, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -97,7 +99,6 @@ {"extensions": {}, "extensionStoredDir": "flash:", "warnings": ["No extensions are available"]}, {"extensions": [""]}, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -107,7 +108,6 @@ {"extensions": {}, "extensionStoredDir": "flash:", "warnings": ["No extensions are available"]}, {"extensions": ["dummy"]}, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Missing EOS extensions: installed [] / configured: ['dummy']"]}, }, ] diff --git a/tests/units/anta_tests/test_stp.py b/tests/units/anta_tests/test_stp.py index 5de5df468..a00cb8398 100644 --- a/tests/units/anta_tests/test_stp.py +++ b/tests/units/anta_tests/test_stp.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.stp import ( VerifySTPBlockedPorts, @@ -18,7 +18,10 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifySTPMode, @@ -66,21 +69,18 @@ "name": "success", "test": VerifySTPBlockedPorts, "eos_data": [{"spanningTreeInstances": {}}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure", "test": VerifySTPBlockedPorts, "eos_data": [{"spanningTreeInstances": {"MST0": {"spanningTreeBlockedPorts": ["Ethernet10"]}, "MST10": {"spanningTreeBlockedPorts": ["Ethernet10"]}}}], - "inputs": None, "expected": {"result": "failure", "messages": ["The following ports are blocked by STP: {'MST0': ['Ethernet10'], 'MST10': ['Ethernet10']}"]}, }, { "name": "success", "test": VerifySTPCounters, "eos_data": [{"interfaces": {"Ethernet10": {"bpduSent": 99, "bpduReceived": 0, "bpduTaggedError": 0, "bpduOtherError": 0, "bpduRateLimitCount": 0}}}], - "inputs": None, "expected": {"result": "success"}, }, { @@ -94,7 +94,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["The following interfaces have STP BPDU packet errors: ['Ethernet10', 'Ethernet11']"]}, }, { diff --git a/tests/units/anta_tests/test_stun.py b/tests/units/anta_tests/test_stun.py index 1001af8a2..57b6e658e 100644 --- a/tests/units/anta_tests/test_stun.py +++ b/tests/units/anta_tests/test_stun.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.stun import VerifyStunClientTranslation, VerifyStunServer from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyStunClientTranslation, diff --git a/tests/units/anta_tests/test_system.py b/tests/units/anta_tests/test_system.py index 858b793d1..fd989f182 100644 --- a/tests/units/anta_tests/test_system.py +++ b/tests/units/anta_tests/test_system.py @@ -5,7 +5,7 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.system import ( VerifyAgentLogs, @@ -20,7 +20,10 @@ ) from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyUptime, @@ -39,7 +42,6 @@ "name": "success-no-reload", "test": VerifyReloadCause, "eos_data": [{"kernelCrashData": [], "resetCauses": [], "full": False}], - "inputs": None, "expected": {"result": "success"}, }, { @@ -58,7 +60,6 @@ "full": False, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -73,42 +74,36 @@ "full": False, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Reload cause is: 'Reload after crash.'"]}, }, { "name": "success-without-minidump", "test": VerifyCoredump, "eos_data": [{"mode": "compressedDeferred", "coreFiles": []}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "success-with-minidump", "test": VerifyCoredump, "eos_data": [{"mode": "compressedDeferred", "coreFiles": ["minidump"]}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "failure-without-minidump", "test": VerifyCoredump, "eos_data": [{"mode": "compressedDeferred", "coreFiles": ["core.2344.1584483862.Mlag.gz", "core.23101.1584483867.Mlag.gz"]}], - "inputs": None, "expected": {"result": "failure", "messages": ["Core dump(s) have been found: ['core.2344.1584483862.Mlag.gz', 'core.23101.1584483867.Mlag.gz']"]}, }, { "name": "failure-with-minidump", "test": VerifyCoredump, "eos_data": [{"mode": "compressedDeferred", "coreFiles": ["minidump", "core.2344.1584483862.Mlag.gz", "core.23101.1584483867.Mlag.gz"]}], - "inputs": None, "expected": {"result": "failure", "messages": ["Core dump(s) have been found: ['core.2344.1584483862.Mlag.gz', 'core.23101.1584483867.Mlag.gz']"]}, }, { "name": "success", "test": VerifyAgentLogs, "eos_data": [""], - "inputs": None, "expected": {"result": "success"}, }, { @@ -127,7 +122,6 @@ EntityManager::doBackoff waiting for remote sysdb version ...................ok """, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -162,7 +156,6 @@ }, }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -189,7 +182,6 @@ }, }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Device has reported a high CPU utilization: 75.2%"]}, }, { @@ -205,7 +197,6 @@ "version": "4.27.3F", }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -221,7 +212,6 @@ "version": "4.27.3F", }, ], - "inputs": None, "expected": {"result": "failure", "messages": ["Device has reported a high memory usage: 95.56%"]}, }, { @@ -235,7 +225,6 @@ /dev/loop0 461M 461M 0 100% /rootfs-i386 """, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -249,7 +238,6 @@ /dev/loop0 461M 461M 0 100% /rootfs-i386 """, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -266,7 +254,6 @@ poll interval unknown """, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -277,7 +264,6 @@ poll interval unknown """, ], - "inputs": None, "expected": {"result": "failure", "messages": ["The device is not synchronized with the configured NTP server(s): 'unsynchronised'"]}, }, { diff --git a/tests/units/anta_tests/test_vlan.py b/tests/units/anta_tests/test_vlan.py index e68bd06dc..b09a8197f 100644 --- a/tests/units/anta_tests/test_vlan.py +++ b/tests/units/anta_tests/test_vlan.py @@ -5,12 +5,15 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.vlan import VerifyVlanInternalPolicy from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyVlanInternalPolicy, diff --git a/tests/units/anta_tests/test_vxlan.py b/tests/units/anta_tests/test_vxlan.py index ce9973e71..6d7b66a41 100644 --- a/tests/units/anta_tests/test_vxlan.py +++ b/tests/units/anta_tests/test_vxlan.py @@ -5,45 +5,43 @@ from __future__ import annotations -from typing import Any +from typing import TYPE_CHECKING from anta.tests.vxlan import VerifyVxlan1ConnSettings, VerifyVxlan1Interface, VerifyVxlanConfigSanity, VerifyVxlanVniBinding, VerifyVxlanVtep from tests.units.anta_tests import test -DATA: list[dict[str, Any]] = [ +if TYPE_CHECKING: + from tests.units.anta_tests import AntaUnitTest + +DATA: list[AntaUnitTest] = [ { "name": "success", "test": VerifyVxlan1Interface, "eos_data": [{"interfaceDescriptions": {"Vxlan1": {"lineProtocolStatus": "up", "interfaceStatus": "up"}}}], - "inputs": None, "expected": {"result": "success"}, }, { "name": "skipped", "test": VerifyVxlan1Interface, "eos_data": [{"interfaceDescriptions": {"Loopback0": {"lineProtocolStatus": "up", "interfaceStatus": "up"}}}], - "inputs": None, "expected": {"result": "skipped", "messages": ["Vxlan1 interface is not configured"]}, }, { "name": "failure-down-up", "test": VerifyVxlan1Interface, "eos_data": [{"interfaceDescriptions": {"Vxlan1": {"lineProtocolStatus": "down", "interfaceStatus": "up"}}}], - "inputs": None, "expected": {"result": "failure", "messages": ["Vxlan1 interface is down/up"]}, }, { "name": "failure-up-down", "test": VerifyVxlan1Interface, "eos_data": [{"interfaceDescriptions": {"Vxlan1": {"lineProtocolStatus": "up", "interfaceStatus": "down"}}}], - "inputs": None, "expected": {"result": "failure", "messages": ["Vxlan1 interface is up/down"]}, }, { "name": "failure-down-down", "test": VerifyVxlan1Interface, "eos_data": [{"interfaceDescriptions": {"Vxlan1": {"lineProtocolStatus": "down", "interfaceStatus": "down"}}}], - "inputs": None, "expected": {"result": "failure", "messages": ["Vxlan1 interface is down/down"]}, }, { @@ -108,7 +106,6 @@ "warnings": [], }, ], - "inputs": None, "expected": {"result": "success"}, }, { @@ -173,7 +170,6 @@ "warnings": ["Your configuration contains warnings. This does not mean misconfigurations. But you may wish to re-check your configurations."], }, ], - "inputs": None, "expected": { "result": "failure", "messages": [ @@ -191,7 +187,6 @@ "name": "skipped", "test": VerifyVxlanConfigSanity, "eos_data": [{"categories": {}}], - "inputs": None, "expected": {"result": "skipped", "messages": ["VXLAN is not configured"]}, }, { diff --git a/tests/units/reporter/conftest.py b/tests/units/reporter/conftest.py index 0baa5c39b..b539c4054 100644 --- a/tests/units/reporter/conftest.py +++ b/tests/units/reporter/conftest.py @@ -3,6 +3,6 @@ # that can be found in the LICENSE file. """See https://docs.pytest.org/en/stable/reference/fixtures.html#conftest-py-sharing-fixtures-across-multiple-files.""" -from tests.units.result_manager.conftest import list_result_factory, result_manager, result_manager_factory, test_result_factory +from tests.units.result_manager.conftest import result_manager_factory_fixture, result_manager_fixture, test_result_factory_fixture -__all__ = ["list_result_factory", "result_manager", "result_manager_factory", "test_result_factory"] +__all__ = ["result_manager_factory_fixture", "result_manager_fixture", "test_result_factory_fixture"] diff --git a/tests/units/reporter/test__init__.py b/tests/units/reporter/test__init__.py index cc34cce4f..1b3b54c71 100644 --- a/tests/units/reporter/test__init__.py +++ b/tests/units/reporter/test__init__.py @@ -84,65 +84,67 @@ def test__color_result(self, status: AntaTestStatus, expected_status: str) -> No assert report._color_result(status) == expected_status @pytest.mark.parametrize( - ("title", "number_of_tests", "expected_length"), + ("results_size"), [ - pytest.param(None, 5, 5, id="all results"), - pytest.param(None, 0, 0, id="result for host1 when no host1 test"), - pytest.param(None, 5, 5, id="result for test VerifyTest3"), - pytest.param("Custom title", 5, 5, id="Change table title"), + pytest.param(5, id="5 results"), + pytest.param(0, id="no results"), ], ) - def test_report_all( + def test_report( self, - result_manager_factory: Callable[[int], ResultManager], - title: str | None, - number_of_tests: int, - expected_length: int, + result_manager_factory: Callable[..., ResultManager], + results_size: int, ) -> None: - """Test report_all.""" - manager = result_manager_factory(number_of_tests) + """Test report table.""" + manager = result_manager_factory(size=results_size) report = ReportTable() - kwargs = {"title": title} - kwargs = {k: v for k, v in kwargs.items() if v is not None} - res = report.report_all(manager, **kwargs) # type: ignore[arg-type] + res = report.report(manager) assert isinstance(res, Table) - assert res.title == (title or "All tests results") - assert res.row_count == expected_length + assert res.row_count == results_size @pytest.mark.parametrize( - ("test", "title", "number_of_tests", "expected_length"), + ("results_size", "atomic_results_size"), [ - pytest.param(None, None, 5, 5, id="all results"), - pytest.param("VerifyTest3", None, 5, 1, id="result for test VerifyTest3"), - pytest.param(None, "Custom title", 5, 5, id="Change table title"), + pytest.param(5, 0, id="5 results no atomic"), + pytest.param(0, 0, id="no results"), + pytest.param(5, 5, id="5 results 5 atomic"), ], ) - def test_report_summary_tests( + def test_report_expanded( self, - result_manager_factory: Callable[[int], ResultManager], - test: str | None, - title: str | None, - number_of_tests: int, - expected_length: int, + result_manager_factory: Callable[..., ResultManager], + results_size: int, + atomic_results_size: int, + ) -> None: + """Test report table.""" + manager = result_manager_factory(size=results_size, nb_atomic_results=atomic_results_size) + + report = ReportTable() + res = report.report_expanded(manager) + + assert isinstance(res, Table) + assert res.row_count == results_size + results_size * atomic_results_size + + @pytest.mark.parametrize( + ("results_size", "expected_length", "distinct", "tests_filter"), + [ + pytest.param(5, 1, False, None, id="5 results, same test"), + pytest.param(5, 5, True, None, id="5 results, different tests"), + pytest.param(0, 0, False, None, id="no results"), + ], + ) + def test_report_summary_tests( + self, result_manager_factory: Callable[..., ResultManager], results_size: int, expected_length: int, distinct: bool, tests_filter: list[str] | None ) -> None: """Test report_summary_tests.""" - # TODO: refactor this later... this is injecting double test results by modyfing the device name - # should be a fixture - manager = result_manager_factory(number_of_tests) - new_results = [result.model_copy() for result in manager.results] - for result in new_results: - result.name = "test_device" - result.result = AntaTestStatus.FAILURE + manager = result_manager_factory(size=results_size, distinct_tests=distinct) report = ReportTable() - kwargs = {"tests": [test] if test is not None else None, "title": title} - kwargs = {k: v for k, v in kwargs.items() if v is not None} - res = report.report_summary_tests(manager, **kwargs) # type: ignore[arg-type] + res = report.report_summary_tests(manager, tests=tests_filter) assert isinstance(res, Table) - assert res.title == (title or "Summary per test") assert res.row_count == expected_length @pytest.mark.parametrize( diff --git a/tests/units/result_manager/conftest.py b/tests/units/result_manager/conftest.py index 0586d63cb..ba3f77d42 100644 --- a/tests/units/result_manager/conftest.py +++ b/tests/units/result_manager/conftest.py @@ -12,26 +12,38 @@ from anta.device import AntaDevice from anta.result_manager import ResultManager from anta.result_manager.models import TestResult +from tests.units.test_models import FakeTestWithInput TEST_RESULTS: Path = Path(__file__).parent.resolve() / "test_files" / "test_md_report_results.json" -@pytest.fixture -def result_manager_factory(list_result_factory: Callable[[int], list[TestResult]]) -> Callable[[int], ResultManager]: - """Return a ResultManager factory that takes as input a number of tests.""" - # pylint: disable=redefined-outer-name - - def _factory(number: int = 0) -> ResultManager: - """Create a factory for list[TestResult] entry of size entries.""" +@pytest.fixture(name="result_manager_factory") +def result_manager_factory_fixture(test_result_factory: Callable[[int, int, bool, bool], TestResult]) -> Callable[[int, int, bool, bool], ResultManager]: + """Return a function that creates a ResultManager instance.""" + + def _create(size: int = 0, nb_atomic_results: int = 0, distinct_tests: bool = False, distinct_devices: bool = False) -> ResultManager: + """ResultManager factory. + + Parameters + ---------- + size + Size of the ResultManager. + nb_atomic_results + Number of atomic results for each TestResult instance. + distinct_tests + Whether or not to use the index in the test name. + distinct_devices + Whether or not to use the index in the device name. + """ result_manager = ResultManager() - result_manager.results = list_result_factory(number) + result_manager.results = [test_result_factory(i, nb_atomic_results, distinct_tests, distinct_devices) for i in range(size)] return result_manager - return _factory + return _create -@pytest.fixture -def result_manager() -> ResultManager: +@pytest.fixture(name="result_manager") +def result_manager_fixture() -> ResultManager: """Return a ResultManager with 30 random tests loaded from a JSON file. Devices: DC1-SPINE1, DC1-LEAF1A @@ -55,31 +67,35 @@ def result_manager() -> ResultManager: return manager -@pytest.fixture -def test_result_factory(device: AntaDevice) -> Callable[[int], TestResult]: - """Return a anta.result_manager.models.TestResult object.""" - # pylint: disable=redefined-outer-name - - def _create(index: int = 0) -> TestResult: - """Actual Factory.""" - return TestResult( - name=device.name, - test=f"VerifyTest{index}", +@pytest.fixture(name="test_result_factory") +def test_result_factory_fixture(device: AntaDevice) -> Callable[[int, int, bool, bool], TestResult]: + """Return a function that creates a TestResult instance.""" + + def _create(index: int = 0, nb_atomic_results: int = 0, distinct_tests: bool = False, distinct_devices: bool = False) -> TestResult: + """TestResult factory. + + Parameters + ---------- + index + Index of the TestResult instance, used to create distinct device and test names (if applicable) and a unique input for the test. + nb_atomic_results + Number of atomic results for each TestResult instance. + distinct_tests + Whether or not to use the index in the test name. + distinct_devices + Whether or not to use the index in the device name. + """ + test = FakeTestWithInput(device=device, inputs={"string": f"Test instance {index}"}) + res = TestResult( + name=device.name if not distinct_devices else f"{device.name}{index}", + test=test.name if not distinct_tests else f"{test.name}{index}", + inputs=test.inputs, categories=["test"], - description=f"Verifies Test {index}", + description=test.description, custom_field=None, ) + for i in range(nb_atomic_results): + res.add(description=f"{test.name}{index}AtomicTestResult{i}", inputs=test.inputs) + return res return _create - - -@pytest.fixture -def list_result_factory(test_result_factory: Callable[[int], TestResult]) -> Callable[[int], list[TestResult]]: - """Return a list[TestResult] with 'size' TestResult instantiated using the test_result_factory fixture.""" - # pylint: disable=redefined-outer-name - - def _factory(size: int = 0) -> list[TestResult]: - """Create a factory for list[TestResult] entry of size entries.""" - return [test_result_factory(i) for i in range(size)] - - return _factory diff --git a/tests/units/result_manager/test__init__.py b/tests/units/result_manager/test__init__.py index c84e39b9a..f6329ee8f 100644 --- a/tests/units/result_manager/test__init__.py +++ b/tests/units/result_manager/test__init__.py @@ -20,20 +20,16 @@ from anta.result_manager.models import TestResult -# pylint: disable=too-many-public-methods class TestResultManager: """Test ResultManager class.""" - # not testing __init__ as nothing is going on there + # TODO: test __init__() and reset() - def test__len__(self, list_result_factory: Callable[[int], list[TestResult]]) -> None: + def test__len__(self, result_manager_factory: Callable[[int], ResultManager]) -> None: """Test __len__.""" - list_result = list_result_factory(3) - result_manager = ResultManager() - assert len(result_manager) == 0 for i in range(3): - result_manager.add(list_result[i]) - assert len(result_manager) == i + 1 + result_manager = result_manager_factory(i) + assert len(result_manager) == i def test_results_getter(self, result_manager_factory: Callable[[int], ResultManager]) -> None: """Test ResultManager.results property getter.""" @@ -44,19 +40,19 @@ def test_results_getter(self, result_manager_factory: Callable[[int], ResultMana for e in res: assert isinstance(e, models.TestResult) - def test_results_setter(self, list_result_factory: Callable[[int], list[TestResult]], result_manager_factory: Callable[[int], ResultManager]) -> None: + def test_results_setter(self, test_result_factory: Callable[..., TestResult], result_manager_factory: Callable[[int], ResultManager]) -> None: """Test ResultManager.results property setter.""" result_manager = result_manager_factory(3) assert len(result_manager) == 3 - tests = list_result_factory(5) + tests = [test_result_factory(i) for i in range(5)] result_manager.results = tests assert len(result_manager) == 5 - def test_json(self, list_result_factory: Callable[[int], list[TestResult]]) -> None: + def test_json(self, test_result_factory: Callable[..., TestResult]) -> None: """Test ResultManager.json property.""" result_manager = ResultManager() - success_list = list_result_factory(3) + success_list = [test_result_factory(i) for i in range(3)] for test in success_list: test.result = AntaTestStatus.SUCCESS result_manager.results = success_list @@ -74,10 +70,10 @@ def test_json(self, list_result_factory: Callable[[int], list[TestResult]]) -> N assert test.get("custom_field") is None assert test.get("result") == "success" - def test_sorted_category_stats(self, list_result_factory: Callable[[int], list[TestResult]]) -> None: + def test_sorted_category_stats(self, test_result_factory: Callable[..., TestResult]) -> None: """Test ResultManager.sorted_category_stats.""" result_manager = ResultManager() - results = list_result_factory(4) + results = [test_result_factory(i) for i in range(4)] # Modify the categories to have a mix of different acronym categories results[0].categories = ["ospf"] @@ -149,7 +145,7 @@ def test_sorted_category_stats(self, list_result_factory: Callable[[int], list[T ) def test_add( self, - test_result_factory: Callable[[], TestResult], + test_result_factory: Callable[..., TestResult], starting_status: str, test_status: str, expected_status: str, @@ -171,7 +167,7 @@ def test_add( assert result_manager.status == expected_status assert len(result_manager) == 1 - def test_add_clear_cache(self, result_manager: ResultManager, test_result_factory: Callable[[], TestResult]) -> None: + def test_add_clear_cache(self, result_manager: ResultManager, test_result_factory: Callable[..., TestResult]) -> None: """Test ResultManager.add and make sure the cache is reset after adding a new test.""" # Check the cache is empty assert "results_by_status" not in result_manager.__dict__ @@ -234,9 +230,7 @@ def test_get_results_sort_by(self, result_manager: ResultManager) -> None: # Check all results with bad sort_by with pytest.raises( ValueError, - match=re.escape( - "Invalid sort_by fields: ['bad_field']. Accepted fields are: ['name', 'test', 'categories', 'description', 'result', 'messages', 'custom_field']", - ), + match=re.escape("Invalid sort_by fields: ['bad_field']."), ): all_results = result_manager.get_results(sort_by=["bad_field"]) @@ -278,11 +272,11 @@ def test_get_status( assert result_manager.get_status(ignore_error=ignore_error) == expected_status - def test_filter(self, test_result_factory: Callable[[], TestResult], list_result_factory: Callable[[int], list[TestResult]]) -> None: + def test_filter(self, test_result_factory: Callable[..., TestResult]) -> None: """Test ResultManager.filter.""" result_manager = ResultManager() - success_list = list_result_factory(3) + success_list = [test_result_factory(i) for i in range(3)] for test in success_list: test.result = AntaTestStatus.SUCCESS result_manager.results = success_list @@ -307,7 +301,7 @@ def test_filter(self, test_result_factory: Callable[[], TestResult], list_result assert len(result_manager.filter({AntaTestStatus.FAILURE, AntaTestStatus.ERROR, AntaTestStatus.SKIPPED})) == 3 assert len(result_manager.filter({AntaTestStatus.SUCCESS, AntaTestStatus.FAILURE, AntaTestStatus.ERROR, AntaTestStatus.SKIPPED})) == 0 - def test_get_by_tests(self, test_result_factory: Callable[[], TestResult], result_manager_factory: Callable[[int], ResultManager]) -> None: + def test_get_by_tests(self, test_result_factory: Callable[..., TestResult], result_manager_factory: Callable[[int], ResultManager]) -> None: """Test ResultManager.get_by_tests.""" result_manager = result_manager_factory(3) @@ -329,7 +323,7 @@ def test_get_by_tests(self, test_result_factory: Callable[[], TestResult], resul assert len(rm) == 3 assert len(rm.filter_by_tests({"Test1"})) == 1 - def test_get_by_devices(self, test_result_factory: Callable[[], TestResult], result_manager_factory: Callable[[int], ResultManager]) -> None: + def test_get_by_devices(self, test_result_factory: Callable[..., TestResult], result_manager_factory: Callable[[int], ResultManager]) -> None: """Test ResultManager.get_by_devices.""" result_manager = result_manager_factory(3) @@ -351,11 +345,11 @@ def test_get_by_devices(self, test_result_factory: Callable[[], TestResult], res assert len(rm) == 3 assert len(rm.filter_by_devices({"Device1"})) == 1 - def test_get_tests(self, test_result_factory: Callable[[], TestResult], list_result_factory: Callable[[int], list[TestResult]]) -> None: + def test_get_tests(self, test_result_factory: Callable[..., TestResult]) -> None: """Test ResultManager.get_tests.""" result_manager = ResultManager() - tests = list_result_factory(3) + tests = [test_result_factory(i) for i in range(3)] for test in tests: test.test = "Test1" result_manager.results = tests @@ -367,11 +361,11 @@ def test_get_tests(self, test_result_factory: Callable[[], TestResult], list_res assert len(result_manager.get_tests()) == 2 assert all(t in result_manager.get_tests() for t in ["Test1", "Test2"]) - def test_get_devices(self, test_result_factory: Callable[[], TestResult], list_result_factory: Callable[[int], list[TestResult]]) -> None: + def test_get_devices(self, test_result_factory: Callable[..., TestResult]) -> None: """Test ResultManager.get_tests.""" result_manager = ResultManager() - tests = list_result_factory(3) + tests = [test_result_factory(i) for i in range(3)] for test in tests: test.name = "Device1" result_manager.results = tests @@ -383,7 +377,7 @@ def test_get_devices(self, test_result_factory: Callable[[], TestResult], list_r assert len(result_manager.get_devices()) == 2 assert all(t in result_manager.get_devices() for t in ["Device1", "Device2"]) - def test_stats_computation_methods(self, test_result_factory: Callable[[], TestResult], caplog: pytest.LogCaptureFixture) -> None: + def test_stats_computation_methods(self, test_result_factory: Callable[..., TestResult], caplog: pytest.LogCaptureFixture) -> None: """Test ResultManager internal stats computation methods.""" result_manager = ResultManager() @@ -433,7 +427,7 @@ def test_stats_computation_methods(self, test_result_factory: Callable[[], TestR assert result_manager._test_stats["test1"].devices_success_count == 1 assert result_manager._test_stats["test2"].devices_failure_count == 1 - def test_stats_property_computation(self, test_result_factory: Callable[[], TestResult], caplog: pytest.LogCaptureFixture) -> None: + def test_stats_property_computation(self, test_result_factory: Callable[..., TestResult], caplog: pytest.LogCaptureFixture) -> None: """Test that stats are computed only once when accessed via properties.""" result_manager = ResultManager() @@ -482,7 +476,7 @@ def test_stats_property_computation(self, test_result_factory: Callable[[], Test assert "Computing statistics for all results" in caplog.text assert result_manager._stats_in_sync is True - def test_sort_by_result(self, test_result_factory: Callable[[], TestResult]) -> None: + def test_sort_by_result(self, test_result_factory: Callable[..., TestResult]) -> None: """Test sorting by result.""" result_manager = ResultManager() test1 = test_result_factory() @@ -496,7 +490,7 @@ def test_sort_by_result(self, test_result_factory: Callable[[], TestResult]) -> sorted_manager = result_manager.sort(["result"]) assert [r.result for r in sorted_manager.results] == ["error", "failure", "success"] - def test_sort_by_name(self, test_result_factory: Callable[[], TestResult]) -> None: + def test_sort_by_name(self, test_result_factory: Callable[..., TestResult]) -> None: """Test sorting by name.""" result_manager = ResultManager() test1 = test_result_factory() @@ -510,7 +504,7 @@ def test_sort_by_name(self, test_result_factory: Callable[[], TestResult]) -> No sorted_manager = result_manager.sort(["name"]) assert [r.name for r in sorted_manager.results] == ["Device1", "Device2", "Device3"] - def test_sort_by_categories(self, test_result_factory: Callable[[], TestResult]) -> None: + def test_sort_by_categories(self, test_result_factory: Callable[..., TestResult]) -> None: """Test sorting by categories.""" result_manager = ResultManager() test1 = test_result_factory() @@ -528,7 +522,7 @@ def test_sort_by_categories(self, test_result_factory: Callable[[], TestResult]) assert results[1].categories == ["VXLAN", "networking"] assert results[2].categories == ["system", "hardware"] - def test_sort_multiple_fields(self, test_result_factory: Callable[[], TestResult]) -> None: + def test_sort_multiple_fields(self, test_result_factory: Callable[..., TestResult]) -> None: """Test sorting by multiple fields.""" result_manager = ResultManager() test1 = test_result_factory() @@ -558,7 +552,7 @@ def test_sort_invalid_field(self) -> None: with pytest.raises( ValueError, match=re.escape( - "Invalid sort_by fields: ['bad_field']. Accepted fields are: ['name', 'test', 'categories', 'description', 'result', 'messages', 'custom_field']", + "Invalid sort_by fields: ['bad_field'].", ), ): result_manager.sort(["bad_field"]) diff --git a/tests/units/result_manager/test_models.py b/tests/units/result_manager/test_models.py index 1846af435..88f64ca4e 100644 --- a/tests/units/result_manager/test_models.py +++ b/tests/units/result_manager/test_models.py @@ -66,4 +66,4 @@ def test____str__(self, test_result_factory: Callable[[int], Result], target: An assert len(testresult.messages) == 0 testresult._set_status(target, message) assert testresult.result == target - assert str(testresult) == f"Test 'VerifyTest1' (on '{DEVICE_NAME}'): Result '{target}'\nMessages: {[message]}" + assert str(testresult) == f"Test 'FakeTestWithInput' (on '{DEVICE_NAME}'): Result '{target}'\nMessages: {[message]}"