diff --git a/.github/workflows/code-testing.yml b/.github/workflows/code-testing.yml index 1d2473bdc..de2e6bc64 100644 --- a/.github/workflows/code-testing.yml +++ b/.github/workflows/code-testing.yml @@ -119,6 +119,23 @@ jobs: run: pip install tox tox-gh-actions - name: "Run pytest via tox for ${{ matrix.python }}" run: tox + test-python-windows: + name: Pytest on 3.12 for windows + runs-on: windows-2022 + needs: [lint-python, type-python] + env: + # Required to prevent asyncssh to fail. + USERNAME: WindowsUser + steps: + - uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install dependencies + run: pip install tox tox-gh-actions + - name: Run pytest via tox for 3.12 on Windows + run: tox test-documentation: name: Build offline documentation for testing runs-on: ubuntu-20.04 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index de75925ab..02bed0388 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: - '' - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.1 + rev: v0.8.3 hooks: - id: ruff name: Run Ruff linter diff --git a/anta/constants.py b/anta/constants.py index 175a4adcc..4dcef3050 100644 --- a/anta/constants.py +++ b/anta/constants.py @@ -17,3 +17,12 @@ - [Summary Totals Per Category](#summary-totals-per-category) - [Test Results](#test-results)""" """Table of Contents for the Markdown report.""" + +KNOWN_EOS_ERRORS = [ + r"BGP inactive", + r"VRF '.*' is not active", + r".* does not support IP", + r"IS-IS (.*) is disabled because: .*", + r"No source interface .*", +] +"""List of known EOS errors that should set a test status to 'failure' with the error message.""" diff --git a/anta/decorators.py b/anta/decorators.py index f5608ef26..043162323 100644 --- a/anta/decorators.py +++ b/anta/decorators.py @@ -17,7 +17,8 @@ F = TypeVar("F", bound=Callable[..., Any]) -def deprecated_test(new_tests: list[str] | None = None) -> Callable[[F], F]: +# TODO: Remove this decorator in ANTA v2.0.0 in favor of deprecated_test_class +def deprecated_test(new_tests: list[str] | None = None) -> Callable[[F], F]: # pragma: no cover """Return a decorator to log a message of WARNING severity when a test is deprecated. Parameters @@ -62,6 +63,57 @@ async def wrapper(*args: Any, **kwargs: Any) -> Any: return decorator +def deprecated_test_class(new_tests: list[str] | None = None, removal_in_version: str | None = None) -> Callable[[type[AntaTest]], type[AntaTest]]: + """Return a decorator to log a message of WARNING severity when a test is deprecated. + + Parameters + ---------- + new_tests + A list of new test classes that should replace the deprecated test. + removal_in_version + A string indicating the version in which the test will be removed. + + Returns + ------- + Callable[[type], type] + A decorator that can be used to wrap test functions. + + """ + + def decorator(cls: type[AntaTest]) -> type[AntaTest]: + """Actual decorator that logs the message. + + Parameters + ---------- + cls + The cls to be decorated. + + Returns + ------- + cls + The decorated cls. + """ + orig_init = cls.__init__ + + def new_init(*args: Any, **kwargs: Any) -> None: + """Overload __init__ to generate a warning message for deprecation.""" + if new_tests: + new_test_names = ", ".join(new_tests) + logger.warning("%s test is deprecated. Consider using the following new tests: %s.", cls.name, new_test_names) + else: + logger.warning("%s test is deprecated.", cls.name) + orig_init(*args, **kwargs) + + if removal_in_version is not None: + cls.__removal_in_version = removal_in_version + + # NOTE: we are ignoring mypy warning as we want to assign to a method here + cls.__init__ = new_init # type: ignore[method-assign] + return cls + + return decorator + + def skip_on_platforms(platforms: list[str]) -> Callable[[F], F]: """Return a decorator to skip a test based on the device's hardware model. diff --git a/anta/device.py b/anta/device.py index d7d2b0de2..561323f96 100644 --- a/anta/device.py +++ b/anta/device.py @@ -255,7 +255,7 @@ class AsyncEOSDevice(AntaDevice): """ - def __init__( + def __init__( # noqa: PLR0913 self, host: str, username: str, @@ -372,7 +372,7 @@ def _keys(self) -> tuple[Any, ...]: """ return (self._session.host, self._session.port) - async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: # noqa: C901 function is too complex - because of many required except blocks + async def _collect(self, command: AntaCommand, *, collection_id: str | None = None) -> None: """Collect device command output from EOS using aio-eapi. Supports outformat `json` and `text` as output structure. @@ -409,15 +409,7 @@ async def _collect(self, command: AntaCommand, *, collection_id: str | None = No command.output = response[-1] except asynceapi.EapiCommandError as e: # This block catches exceptions related to EOS issuing an error. - command.errors = e.errors - if command.requires_privileges: - logger.error( - "Command '%s' requires privileged mode on %s. Verify user permissions and if the `enable` option is required.", command.command, self.name - ) - if command.supported: - logger.error("Command '%s' failed on %s: %s", command.command, self.name, e.errors[0] if len(e.errors) == 1 else e.errors) - else: - logger.debug("Command '%s' is not supported on '%s' (%s)", command.command, self.name, self.hw_model) + self._log_eapi_command_error(command, e) except TimeoutException as e: # This block catches Timeout exceptions. command.errors = [exc_to_str(e)] @@ -446,6 +438,18 @@ async def _collect(self, command: AntaCommand, *, collection_id: str | None = No anta_log_exception(e, f"An error occurred while issuing an eAPI request to {self.name}", logger) logger.debug("%s: %s", self.name, command) + def _log_eapi_command_error(self, command: AntaCommand, e: asynceapi.EapiCommandError) -> None: + """Appropriately log the eapi command error.""" + command.errors = e.errors + if command.requires_privileges: + logger.error("Command '%s' requires privileged mode on %s. Verify user permissions and if the `enable` option is required.", command.command, self.name) + if not command.supported: + logger.debug("Command '%s' is not supported on '%s' (%s)", command.command, self.name, self.hw_model) + elif command.returned_known_eos_error: + logger.debug("Command '%s' returned a known error '%s': %s", command.command, self.name, command.errors) + else: + logger.error("Command '%s' failed on %s: %s", command.command, self.name, e.errors[0] if len(e.errors) == 1 else e.errors) + async def refresh(self) -> None: """Update attributes of an AsyncEOSDevice instance. diff --git a/anta/input_models/avt.py b/anta/input_models/avt.py new file mode 100644 index 000000000..9219c2fc8 --- /dev/null +++ b/anta/input_models/avt.py @@ -0,0 +1,36 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for AVT tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address + +from pydantic import BaseModel, ConfigDict + + +class AVTPath(BaseModel): + """AVT (Adaptive Virtual Topology) model representing path details and associated information.""" + + model_config = ConfigDict(extra="forbid") + vrf: str = "default" + """VRF context. Defaults to `default`.""" + avt_name: str + """The name of the Adaptive Virtual Topology (AVT).""" + destination: IPv4Address + """The IPv4 address of the destination peer in the AVT.""" + next_hop: IPv4Address + """The IPv4 address of the next hop used to reach the AVT peer.""" + path_type: str | None = None + """Specifies the type of path for the AVT. If not specified, both types 'direct' and 'multihop' are considered.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the AVTPath for reporting. + + Examples + -------- + AVT CONTROL-PLANE-PROFILE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.1) + + """ + return f"AVT {self.avt_name} VRF: {self.vrf} (Destination: {self.destination}, Next-hop: {self.next_hop})" diff --git a/anta/input_models/cvx.py b/anta/input_models/cvx.py new file mode 100644 index 000000000..4f937498c --- /dev/null +++ b/anta/input_models/cvx.py @@ -0,0 +1,19 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for CVX tests.""" + +from __future__ import annotations + +from typing import Literal + +from pydantic import BaseModel + +from anta.custom_types import Hostname + + +class CVXPeers(BaseModel): + """Model for a CVX Cluster Peer.""" + + peer_name: Hostname + registration_state: Literal["Connecting", "Connected", "Registration error", "Registration complete", "Unexpected peer state"] = "Registration complete" diff --git a/anta/input_models/interfaces.py b/anta/input_models/interfaces.py index 5036156de..9e33a2c54 100644 --- a/anta/input_models/interfaces.py +++ b/anta/input_models/interfaces.py @@ -7,17 +7,42 @@ from typing import Literal -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict -from anta.custom_types import Interface +from anta.custom_types import Interface, PortChannelInterface class InterfaceState(BaseModel): """Model for an interface state.""" + model_config = ConfigDict(extra="forbid") name: Interface """Interface to validate.""" - status: Literal["up", "down", "adminDown"] - """Expected status of the interface.""" + status: Literal["up", "down", "adminDown"] | None = None + """Expected status of the interface. Required field in the `VerifyInterfacesStatus` test.""" line_protocol_status: Literal["up", "down", "testing", "unknown", "dormant", "notPresent", "lowerLayerDown"] | None = None - """Expected line protocol status of the interface.""" + """Expected line protocol status of the interface. Optional field in the `VerifyInterfacesStatus` test.""" + portchannel: PortChannelInterface | None = None + """Port-Channel in which the interface is bundled. Required field in the `VerifyLACPInterfacesStatus` test.""" + lacp_rate_fast: bool = False + """Specifies the LACP timeout mode for the link aggregation group. + + Options: + - True: Also referred to as fast mode. + - False: The default mode, also known as slow mode. + + Can be enabled in the `VerifyLACPInterfacesStatus` tests. + """ + + def __str__(self) -> str: + """Return a human-readable string representation of the InterfaceState for reporting. + + Examples + -------- + - Interface: Ethernet1 Port-Channel: Port-Channel100 + - Interface: Ethernet1 + """ + base_string = f"Interface: {self.name}" + if self.portchannel is not None: + base_string += f" Port-Channel: {self.portchannel}" + return base_string diff --git a/anta/input_models/security.py b/anta/input_models/security.py new file mode 100644 index 000000000..373d89735 --- /dev/null +++ b/anta/input_models/security.py @@ -0,0 +1,61 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for security tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address +from typing import Any +from warnings import warn + +from pydantic import BaseModel, ConfigDict + + +class IPSecPeer(BaseModel): + """IPSec (Internet Protocol Security) model represents the details of an IPv4 security peer.""" + + model_config = ConfigDict(extra="forbid") + peer: IPv4Address + """The IPv4 address of the security peer.""" + vrf: str = "default" + """VRF context. Defaults to `default`.""" + connections: list[IPSecConn] | None = None + """A list of IPv4 security connections associated with the peer. Defaults to None.""" + + def __str__(self) -> str: + """Return a string representation of the IPSecPeer model. Used in failure messages. + + Examples + -------- + - Peer: 1.1.1.1 VRF: default + """ + return f"Peer: {self.peer} VRF: {self.vrf}" + + +class IPSecConn(BaseModel): + """Details of an IPv4 security connection for a peer.""" + + model_config = ConfigDict(extra="forbid") + source_address: IPv4Address + """The IPv4 address of the source in the security connection.""" + destination_address: IPv4Address + """The IPv4 address of the destination in the security connection.""" + + +class IPSecPeers(IPSecPeer): # pragma: no cover + """Alias for the IPSecPeers model to maintain backward compatibility. + + When initialized, it will emit a deprecation warning and call the IPSecPeer model. + + TODO: Remove this class in ANTA v2.0.0. + """ + + def __init__(self, **data: Any) -> None: # noqa: ANN401 + """Initialize the IPSecPeer class, emitting a deprecation warning.""" + warn( + message="IPSecPeers model is deprecated and will be removed in ANTA v2.0.0. Use the IPSecPeer model instead.", + category=DeprecationWarning, + stacklevel=2, + ) + super().__init__(**data) diff --git a/anta/input_models/stun.py b/anta/input_models/stun.py new file mode 100644 index 000000000..d1af40508 --- /dev/null +++ b/anta/input_models/stun.py @@ -0,0 +1,35 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Module containing input models for services tests.""" + +from __future__ import annotations + +from ipaddress import IPv4Address + +from pydantic import BaseModel, ConfigDict + +from anta.custom_types import Port + + +class StunClientTranslation(BaseModel): + """STUN (Session Traversal Utilities for NAT) model represents the configuration of an IPv4-based client translations.""" + + model_config = ConfigDict(extra="forbid") + source_address: IPv4Address + """The IPv4 address of the STUN client""" + source_port: Port = 4500 + """The port number used by the STUN client for communication. Defaults to 4500.""" + public_address: IPv4Address | None = None + """The public-facing IPv4 address of the STUN client, discovered via the STUN server.""" + public_port: Port | None = None + """The public-facing port number of the STUN client, discovered via the STUN server.""" + + def __str__(self) -> str: + """Return a human-readable string representation of the StunClientTranslation for reporting. + + Examples + -------- + Client 10.0.0.1 Port: 4500 + """ + return f"Client {self.source_address} Port: {self.source_port}" diff --git a/anta/models.py b/anta/models.py index e5f0b58cb..a7a5adcfc 100644 --- a/anta/models.py +++ b/anta/models.py @@ -16,6 +16,7 @@ from pydantic import BaseModel, ConfigDict, ValidationError, create_model from anta import GITHUB_SUGGESTION +from anta.constants import KNOWN_EOS_ERRORS from anta.custom_types import REGEXP_EOS_BLACKLIST_CMDS, Revision from anta.logger import anta_log_exception, exc_to_str from anta.result_manager.models import AntaTestStatus, TestResult @@ -240,10 +241,29 @@ def requires_privileges(self) -> bool: @property def supported(self) -> bool: - """Return True if the command is supported on the device hardware platform, False otherwise. + """Indicates if the command is supported on the device. + + Returns + ------- + bool + True if the command is supported on the device hardware platform, False otherwise. Raises ------ + RuntimeError + If the command has not been collected and has not returned an error. + AntaDevice.collect() must be called before this property. + """ + if not self.collected and not self.error: + msg = f"Command '{self.command}' has not been collected and has not returned an error. Call AntaDevice.collect()." + + raise RuntimeError(msg) + return all("not supported on this hardware platform" not in e for e in self.errors) + + @property + def returned_known_eos_error(self) -> bool: + """Return True if the command returned a known_eos_error on the device, False otherwise. + RuntimeError If the command has not been collected and has not returned an error. AntaDevice.collect() must be called before this property. @@ -251,7 +271,7 @@ def supported(self) -> bool: if not self.collected and not self.error: msg = f"Command '{self.command}' has not been collected and has not returned an error. Call AntaDevice.collect()." raise RuntimeError(msg) - return not any("not supported on this hardware platform" in e for e in self.errors) + return any(any(re.match(pattern, e) for e in self.errors) for pattern in KNOWN_EOS_ERRORS) class AntaTemplateRenderError(RuntimeError): @@ -328,6 +348,8 @@ def test(self) -> None: # Optional class attributes name: ClassVar[str] description: ClassVar[str] + __removal_in_version: ClassVar[str] + """Internal class variable set by the `deprecated_test_class` decorator.""" # Mandatory class attributes # TODO: find a way to tell mypy these are mandatory for child classes @@ -629,14 +651,9 @@ async def wrapper( AntaTest.update_progress() return self.result - if cmds := self.failed_commands: - unsupported_commands = [f"'{c.command}' is not supported on {self.device.hw_model}" for c in cmds if not c.supported] - if unsupported_commands: - msg = f"Test {self.name} has been skipped because it is not supported on {self.device.hw_model}: {GITHUB_SUGGESTION}" - self.logger.warning(msg) - self.result.is_skipped("\n".join(unsupported_commands)) - else: - self.result.is_error(message="\n".join([f"{c.command} has failed: {', '.join(c.errors)}" for c in cmds])) + if self.failed_commands: + self._handle_failed_commands() + AntaTest.update_progress() return self.result @@ -656,6 +673,28 @@ async def wrapper( return wrapper + def _handle_failed_commands(self) -> None: + """Handle failed commands inside a test. + + There can be 3 types: + * unsupported on hardware commands which set the test status to 'skipped' + * known EOS error which set the test status to 'failure' + * unknown failure which set the test status to 'error' + """ + cmds = self.failed_commands + unsupported_commands = [f"'{c.command}' is not supported on {self.device.hw_model}" for c in cmds if not c.supported] + if unsupported_commands: + msg = f"Test {self.name} has been skipped because it is not supported on {self.device.hw_model}: {GITHUB_SUGGESTION}" + self.logger.warning(msg) + self.result.is_skipped("\n".join(unsupported_commands)) + return + returned_known_eos_error = [f"'{c.command}' failed on {self.device.name}: {', '.join(c.errors)}" for c in cmds if c.returned_known_eos_error] + if returned_known_eos_error: + self.result.is_failure("\n".join(returned_known_eos_error)) + return + + self.result.is_error(message="\n".join([f"{c.command} has failed: {', '.join(c.errors)}" for c in cmds])) + @classmethod def update_progress(cls: type[AntaTest]) -> None: """Update progress bar for all AntaTest objects if it exists.""" diff --git a/anta/py.typed b/anta/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/anta/reporter/csv_reporter.py b/anta/reporter/csv_reporter.py index 4554e6f60..3f5592388 100644 --- a/anta/reporter/csv_reporter.py +++ b/anta/reporter/csv_reporter.py @@ -107,7 +107,7 @@ def generate(cls, results: ResultManager, csv_filename: pathlib.Path) -> None: ] try: - with csv_filename.open(mode="w", encoding="utf-8") as csvfile: + with csv_filename.open(mode="w", encoding="utf-8", newline="") as csvfile: csvwriter = csv.writer( csvfile, delimiter=",", diff --git a/anta/result_manager/__init__.py b/anta/result_manager/__init__.py index 73fd6c86c..7a4b3c0db 100644 --- a/anta/result_manager/__init__.py +++ b/anta/result_manager/__init__.py @@ -6,15 +6,20 @@ from __future__ import annotations import json +import logging from collections import defaultdict from functools import cached_property from itertools import chain +from typing import Any from anta.result_manager.models import AntaTestStatus, TestResult from .models import CategoryStats, DeviceStats, TestStats +logger = logging.getLogger(__name__) + +# pylint: disable=too-many-instance-attributes class ResultManager: """Helper to manage Test Results and generate reports. @@ -68,6 +73,15 @@ class ResultManager: ] """ + _result_entries: list[TestResult] + status: AntaTestStatus + error_status: bool + + _device_stats: defaultdict[str, DeviceStats] + _category_stats: defaultdict[str, CategoryStats] + _test_stats: defaultdict[str, TestStats] + _stats_in_sync: bool + def __init__(self) -> None: """Class constructor. @@ -89,13 +103,16 @@ def __init__(self) -> None: If the status of the added test is error, the status is untouched and the error_status is set to True. """ + self.reset() + + def reset(self) -> None: + """Create or reset the attributes of the ResultManager instance.""" self._result_entries: list[TestResult] = [] self.status: AntaTestStatus = AntaTestStatus.UNSET self.error_status = False - self.device_stats: defaultdict[str, DeviceStats] = defaultdict(DeviceStats) - self.category_stats: defaultdict[str, CategoryStats] = defaultdict(CategoryStats) - self.test_stats: defaultdict[str, TestStats] = defaultdict(TestStats) + # Initialize the statistics attributes + self._reset_stats() def __len__(self) -> int: """Implement __len__ method to count number of results.""" @@ -110,26 +127,43 @@ def results(self) -> list[TestResult]: def results(self, value: list[TestResult]) -> None: """Set the list of TestResult.""" # When setting the results, we need to reset the state of the current instance - self._result_entries = [] - self.status = AntaTestStatus.UNSET - self.error_status = False - - # Also reset the stats attributes - self.device_stats = defaultdict(DeviceStats) - self.category_stats = defaultdict(CategoryStats) - self.test_stats = defaultdict(TestStats) + self.reset() for result in value: self.add(result) + @property + def dump(self) -> list[dict[str, Any]]: + """Get a list of dictionary of the results.""" + return [result.model_dump() for result in self._result_entries] + @property def json(self) -> str: """Get a JSON representation of the results.""" - return json.dumps([result.model_dump() for result in self._result_entries], indent=4) + return json.dumps(self.dump, indent=4) + + @property + def device_stats(self) -> defaultdict[str, DeviceStats]: + """Get the device statistics.""" + self._ensure_stats_in_sync() + return self._device_stats + + @property + def category_stats(self) -> defaultdict[str, CategoryStats]: + """Get the category statistics.""" + self._ensure_stats_in_sync() + return self._category_stats + + @property + def test_stats(self) -> defaultdict[str, TestStats]: + """Get the test statistics.""" + self._ensure_stats_in_sync() + return self._test_stats @property def sorted_category_stats(self) -> dict[str, CategoryStats]: """A property that returns the category_stats dictionary sorted by key name.""" + self._ensure_stats_in_sync() return dict(sorted(self.category_stats.items())) @cached_property @@ -158,6 +192,13 @@ def _update_status(self, test_status: AntaTestStatus) -> None: elif self.status == "success" and test_status == "failure": self.status = AntaTestStatus.FAILURE + def _reset_stats(self) -> None: + """Create or reset the statistics attributes.""" + self._device_stats = defaultdict(DeviceStats) + self._category_stats = defaultdict(CategoryStats) + self._test_stats = defaultdict(TestStats) + self._stats_in_sync = False + def _update_stats(self, result: TestResult) -> None: """Update the statistics based on the test result. @@ -169,7 +210,7 @@ def _update_stats(self, result: TestResult) -> None: count_attr = f"tests_{result.result}_count" # Update device stats - device_stats: DeviceStats = self.device_stats[result.name] + device_stats: DeviceStats = self._device_stats[result.name] setattr(device_stats, count_attr, getattr(device_stats, count_attr) + 1) if result.result in ("failure", "error"): device_stats.tests_failure.add(result.test) @@ -179,16 +220,34 @@ def _update_stats(self, result: TestResult) -> None: # Update category stats for category in result.categories: - category_stats: CategoryStats = self.category_stats[category] + category_stats: CategoryStats = self._category_stats[category] setattr(category_stats, count_attr, getattr(category_stats, count_attr) + 1) # Update test stats count_attr = f"devices_{result.result}_count" - test_stats: TestStats = self.test_stats[result.test] + test_stats: TestStats = self._test_stats[result.test] setattr(test_stats, count_attr, getattr(test_stats, count_attr) + 1) if result.result in ("failure", "error"): test_stats.devices_failure.add(result.name) + def _compute_stats(self) -> None: + """Compute all statistics from the current results.""" + logger.info("Computing statistics for all results.") + + # Reset all stats + self._reset_stats() + + # Recompute stats for all results + for result in self._result_entries: + self._update_stats(result) + + self._stats_in_sync = True + + def _ensure_stats_in_sync(self) -> None: + """Ensure statistics are in sync with current results.""" + if not self._stats_in_sync: + self._compute_stats() + def add(self, result: TestResult) -> None: """Add a result to the ResultManager instance. @@ -202,7 +261,7 @@ def add(self, result: TestResult) -> None: """ self._result_entries.append(result) self._update_status(result.result) - self._update_stats(result) + self._stats_in_sync = False # Every time a new result is added, we need to clear the cached property self.__dict__.pop("results_by_status", None) diff --git a/anta/runner.py b/anta/runner.py index 7b0eadf75..4c6da928a 100644 --- a/anta/runner.py +++ b/anta/runner.py @@ -8,7 +8,7 @@ import asyncio import logging import os -import resource +import sys from collections import defaultdict from typing import TYPE_CHECKING, Any @@ -26,35 +26,38 @@ from anta.result_manager import ResultManager from anta.result_manager.models import TestResult -logger = logging.getLogger(__name__) +if os.name == "posix": + import resource -DEFAULT_NOFILE = 16384 + DEFAULT_NOFILE = 16384 + def adjust_rlimit_nofile() -> tuple[int, int]: + """Adjust the maximum number of open file descriptors for the ANTA process. -def adjust_rlimit_nofile() -> tuple[int, int]: - """Adjust the maximum number of open file descriptors for the ANTA process. + The limit is set to the lower of the current hard limit and the value of the ANTA_NOFILE environment variable. - The limit is set to the lower of the current hard limit and the value of the ANTA_NOFILE environment variable. + If the `ANTA_NOFILE` environment variable is not set or is invalid, `DEFAULT_NOFILE` is used. - If the `ANTA_NOFILE` environment variable is not set or is invalid, `DEFAULT_NOFILE` is used. + Returns + ------- + tuple[int, int] + The new soft and hard limits for open file descriptors. + """ + try: + nofile = int(os.environ.get("ANTA_NOFILE", DEFAULT_NOFILE)) + except ValueError as exception: + logger.warning("The ANTA_NOFILE environment variable value is invalid: %s\nDefault to %s.", exc_to_str(exception), DEFAULT_NOFILE) + nofile = DEFAULT_NOFILE + + limits = resource.getrlimit(resource.RLIMIT_NOFILE) + logger.debug("Initial limit numbers for open file descriptors for the current ANTA process: Soft Limit: %s | Hard Limit: %s", limits[0], limits[1]) + nofile = min(limits[1], nofile) + logger.debug("Setting soft limit for open file descriptors for the current ANTA process to %s", nofile) + resource.setrlimit(resource.RLIMIT_NOFILE, (nofile, limits[1])) + return resource.getrlimit(resource.RLIMIT_NOFILE) - Returns - ------- - tuple[int, int] - The new soft and hard limits for open file descriptors. - """ - try: - nofile = int(os.environ.get("ANTA_NOFILE", DEFAULT_NOFILE)) - except ValueError as exception: - logger.warning("The ANTA_NOFILE environment variable value is invalid: %s\nDefault to %s.", exc_to_str(exception), DEFAULT_NOFILE) - nofile = DEFAULT_NOFILE - limits = resource.getrlimit(resource.RLIMIT_NOFILE) - logger.debug("Initial limit numbers for open file descriptors for the current ANTA process: Soft Limit: %s | Hard Limit: %s", limits[0], limits[1]) - nofile = min(limits[1], nofile) - logger.debug("Setting soft limit for open file descriptors for the current ANTA process to %s", nofile) - resource.setrlimit(resource.RLIMIT_NOFILE, (nofile, limits[1])) - return resource.getrlimit(resource.RLIMIT_NOFILE) +logger = logging.getLogger(__name__) def log_cache_statistics(devices: list[AntaDevice]) -> None: @@ -167,7 +170,8 @@ def prepare_tests( if total_test_count == 0: msg = ( - f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current test catalog and device inventory, please verify your inputs." + f"There are no tests{f' matching the tags {tags} ' if tags else ' '}to run in the current " + "test catalog and device inventory, please verify your inputs." ) logger.warning(msg) return None @@ -175,7 +179,7 @@ def prepare_tests( return device_to_tests -def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]], manager: ResultManager) -> list[Coroutine[Any, Any, TestResult]]: +def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinition]], manager: ResultManager | None = None) -> list[Coroutine[Any, Any, TestResult]]: """Get the coroutines for the ANTA run. Parameters @@ -183,7 +187,7 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio selected_tests A mapping of devices to the tests to run. The selected tests are generated by the `prepare_tests` function. manager - A ResultManager + An optional ResultManager object to pre-populate with the test results. Used in dry-run mode. Returns ------- @@ -195,7 +199,8 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio for test in test_definitions: try: test_instance = test.test(device=device, inputs=test.inputs) - manager.add(test_instance.result) + if manager is not None: + manager.add(test_instance.result) coros.append(test_instance.test()) except Exception as e: # noqa: PERF203, BLE001 # An AntaTest instance is potentially user-defined code. @@ -211,7 +216,7 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio @cprofile() -async def main( # noqa: PLR0913 +async def main( manager: ResultManager, inventory: AntaInventory, catalog: AntaCatalog, @@ -246,9 +251,6 @@ async def main( # noqa: PLR0913 dry_run Build the list of coroutine to run and stop before test execution. """ - # Adjust the maximum number of open file descriptors for the ANTA process - limits = adjust_rlimit_nofile() - if not catalog.tests: logger.info("The list of tests is empty, exiting") return @@ -269,10 +271,19 @@ async def main( # noqa: PLR0913 "--- ANTA NRFU Run Information ---\n" f"Number of devices: {len(inventory)} ({len(selected_inventory)} established)\n" f"Total number of selected tests: {final_tests_count}\n" - f"Maximum number of open file descriptors for the current ANTA process: {limits[0]}\n" - "---------------------------------" ) + if os.name == "posix": + # Adjust the maximum number of open file descriptors for the ANTA process + limits = adjust_rlimit_nofile() + run_info += f"Maximum number of open file descriptors for the current ANTA process: {limits[0]}\n" + else: + # Running on non-Posix system, cannot manage the resource. + limits = (sys.maxsize, sys.maxsize) + run_info += "Running on a non-POSIX system, cannot adjust the maximum number of file descriptors.\n" + + run_info += "---------------------------------" + logger.info(run_info) if final_tests_count > limits[0]: @@ -282,7 +293,7 @@ async def main( # noqa: PLR0913 "Please consult the ANTA FAQ." ) - coroutines = get_coroutines(selected_tests, manager) + coroutines = get_coroutines(selected_tests, manager if dry_run else None) if dry_run: logger.info("Dry-run mode, exiting before running the tests.") @@ -294,6 +305,8 @@ async def main( # noqa: PLR0913 AntaTest.nrfu_task = AntaTest.progress.add_task("Running NRFU Tests...", total=len(coroutines)) with Catchtime(logger=logger, message="Running ANTA tests"): - await asyncio.gather(*coroutines) + results = await asyncio.gather(*coroutines) + for result in results: + manager.add(result) log_cache_statistics(selected_inventory.devices) diff --git a/anta/tests/avt.py b/anta/tests/avt.py index 66b30babe..b0f1a465b 100644 --- a/anta/tests/avt.py +++ b/anta/tests/avt.py @@ -7,12 +7,10 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from ipaddress import IPv4Address from typing import ClassVar -from pydantic import BaseModel - from anta.decorators import skip_on_platforms +from anta.input_models.avt import AVTPath from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.tools import get_value @@ -71,14 +69,22 @@ def test(self) -> None: class VerifyAVTSpecificPath(AntaTest): - """Verifies the status and type of an Adaptive Virtual Topology (AVT) path for a specified VRF. + """Verifies the Adaptive Virtual Topology (AVT) path. + + This test performs the following checks for each specified LLDP neighbor: + + 1. Confirming that the AVT paths are associated with the specified VRF. + 2. Verifying that each AVT path is active and valid. + 3. Ensuring that the AVT path matches the specified type (direct/multihop) if provided. Expected Results ---------------- - * Success: The test will pass if all AVT paths for the specified VRF are active, valid, and match the specified type (direct/multihop) if provided. - If multiple paths are configured, the test will pass only if all the paths are valid and active. - * Failure: The test will fail if no AVT paths are configured for the specified VRF, or if any configured path is not active, valid, - or does not match the specified type. + * Success: The test will pass if all of the following conditions are met: + - All AVT paths for the specified VRF are active, valid, and match the specified path type (direct/multihop), if provided. + - If multiple paths are configured, the test will pass only if all paths meet these criteria. + * Failure: The test will fail if any of the following conditions are met: + - No AVT paths are configured for the specified VRF. + - Any configured path is inactive, invalid, or does not match the specified type. Examples -------- @@ -94,35 +100,16 @@ class VerifyAVTSpecificPath(AntaTest): ``` """ - description = "Verifies the status and type of an AVT path for a specified VRF." categories: ClassVar[list[str]] = ["avt"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ - AntaTemplate(template="show adaptive-virtual-topology path vrf {vrf} avt {avt_name} destination {destination}") - ] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show adaptive-virtual-topology path", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyAVTSpecificPath test.""" - avt_paths: list[AVTPaths] + avt_paths: list[AVTPath] """List of AVT paths to verify.""" - - class AVTPaths(BaseModel): - """Model for the details of AVT paths.""" - - vrf: str = "default" - """The VRF for the AVT path. Defaults to 'default' if not provided.""" - avt_name: str - """Name of the adaptive virtual topology.""" - destination: IPv4Address - """The IPv4 address of the AVT peer.""" - next_hop: IPv4Address - """The IPv4 address of the next hop for the AVT peer.""" - path_type: str | None = None - """The type of the AVT path. If not provided, both 'direct' and 'multihop' paths are considered.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each input AVT path/peer.""" - return [template.render(vrf=path.vrf, avt_name=path.avt_name, destination=path.destination) for path in self.inputs.avt_paths] + AVTPaths: ClassVar[type[AVTPath]] = AVTPath + """To maintain backward compatibility.""" @skip_on_platforms(["cEOSLab", "vEOS-lab"]) @AntaTest.anta_test @@ -131,59 +118,39 @@ def test(self) -> None: # Assume the test is successful until a failure is detected self.result.is_success() - # Process each command in the instance - for command, input_avt in zip(self.instance_commands, self.inputs.avt_paths): - # Extract the command output and parameters - vrf = command.params.vrf - avt_name = command.params.avt_name - peer = str(command.params.destination) - - command_output = command.json_output.get("vrfs", {}) - - # If no AVT is configured, mark the test as failed and skip to the next command - if not command_output: - self.result.is_failure(f"AVT configuration for peer '{peer}' under topology '{avt_name}' in VRF '{vrf}' is not found.") - continue - - # Extract the AVT paths - avt_paths = get_value(command_output, f"{vrf}.avts.{avt_name}.avtPaths") - next_hop, input_path_type = str(input_avt.next_hop), input_avt.path_type + command_output = self.instance_commands[0].json_output + for avt_path in self.inputs.avt_paths: + if (path_output := get_value(command_output, f"vrfs.{avt_path.vrf}.avts.{avt_path.avt_name}.avtPaths")) is None: + self.result.is_failure(f"{avt_path} - No AVT path configured") + return - nexthop_path_found = path_type_found = False + path_found = path_type_found = False # Check each AVT path - for path, path_data in avt_paths.items(): - # If the path does not match the expected next hop, skip to the next path - if path_data.get("nexthopAddr") != next_hop: - continue - - nexthop_path_found = True + for path, path_data in path_output.items(): + dest = path_data.get("destination") + nexthop = path_data.get("nexthopAddr") path_type = "direct" if get_value(path_data, "flags.directPath") else "multihop" - # If the path type does not match the expected path type, skip to the next path - if input_path_type and path_type != input_path_type: - continue - - path_type_found = True - valid = get_value(path_data, "flags.valid") - active = get_value(path_data, "flags.active") - - # Check the path status and type against the expected values - if not all([valid, active]): - failure_reasons = [] - if not get_value(path_data, "flags.active"): - failure_reasons.append("inactive") - if not get_value(path_data, "flags.valid"): - failure_reasons.append("invalid") - # Construct the failure message prefix - failed_log = f"AVT path '{path}' for topology '{avt_name}' in VRF '{vrf}'" - self.result.is_failure(f"{failed_log} is {', '.join(failure_reasons)}.") - - # If no matching next hop or path type was found, mark the test as failed - if not nexthop_path_found or not path_type_found: - self.result.is_failure( - f"No '{input_path_type}' path found with next-hop address '{next_hop}' for AVT peer '{peer}' under topology '{avt_name}' in VRF '{vrf}'." - ) + if not avt_path.path_type: + path_found = all([dest == str(avt_path.destination), nexthop == str(avt_path.next_hop)]) + + else: + path_type_found = all([dest == str(avt_path.destination), nexthop == str(avt_path.next_hop), path_type == avt_path.path_type]) + if path_type_found: + path_found = True + # Check the path status and type against the expected values + valid = get_value(path_data, "flags.valid") + active = get_value(path_data, "flags.active") + if not all([valid, active]): + self.result.is_failure(f"{avt_path} - Incorrect path {path} - Valid: {valid}, Active: {active}") + + # If no matching path found, mark the test as failed + if not path_found: + if avt_path.path_type and not path_type_found: + self.result.is_failure(f"{avt_path} Path Type: {avt_path.path_type} - Path not found") + else: + self.result.is_failure(f"{avt_path} - Path not found") class VerifyAVTRole(AntaTest): diff --git a/anta/tests/connectivity.py b/anta/tests/connectivity.py index f612e0fd9..3bd616a61 100644 --- a/anta/tests/connectivity.py +++ b/anta/tests/connectivity.py @@ -56,13 +56,12 @@ class Input(AntaTest.Input): def render(self, template: AntaTemplate) -> list[AntaCommand]: """Render the template for each host in the input list.""" - commands = [] - for host in self.inputs.hosts: - # df_bit includes leading space when enabled, empty string when disabled - df_bit = " df-bit" if host.df_bit else "" - command = template.render(destination=host.destination, source=host.source, vrf=host.vrf, repeat=host.repeat, size=host.size, df_bit=df_bit) - commands.append(command) - return commands + return [ + template.render( + destination=host.destination, source=host.source, vrf=host.vrf, repeat=host.repeat, size=host.size, df_bit=" df-bit" if host.df_bit else "" + ) + for host in self.inputs.hosts + ] @AntaTest.anta_test def test(self) -> None: diff --git a/anta/tests/cvx.py b/anta/tests/cvx.py index 63ec336a9..61600829c 100644 --- a/anta/tests/cvx.py +++ b/anta/tests/cvx.py @@ -7,12 +7,15 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from typing import TYPE_CHECKING, ClassVar +from typing import TYPE_CHECKING, Any, ClassVar, Literal +from anta.custom_types import PositiveInteger from anta.models import AntaCommand, AntaTest +from anta.tools import get_value if TYPE_CHECKING: from anta.models import AntaTemplate +from anta.input_models.cvx import CVXPeers class VerifyMcsClientMounts(AntaTest): @@ -71,7 +74,7 @@ class VerifyManagementCVX(AntaTest): """ categories: ClassVar[list[str]] = ["cvx"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management cvx", revision=1)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show management cvx", revision=3)] class Input(AntaTest.Input): """Input model for the VerifyManagementCVX test.""" @@ -84,6 +87,197 @@ def test(self) -> None: """Main test function for VerifyManagementCVX.""" command_output = self.instance_commands[0].json_output self.result.is_success() - cluster_status = command_output["clusterStatus"] - if (cluster_state := cluster_status.get("enabled")) != self.inputs.enabled: + if (cluster_state := get_value(command_output, "clusterStatus.enabled")) != self.inputs.enabled: self.result.is_failure(f"Management CVX status is not valid: {cluster_state}") + + +class VerifyMcsServerMounts(AntaTest): + """Verify if all MCS server mounts are in a MountComplete state. + + Expected Results + ---------------- + * Success: The test will pass if all the MCS mount status on MCS server are mountStateMountComplete. + * Failure: The test will fail even if any MCS server mount status is not mountStateMountComplete. + + Examples + -------- + ```yaml + anta.tests.cvx: + + - VerifyMcsServerMounts: + connections_count: 100 + ``` + """ + + categories: ClassVar[list[str]] = ["cvx"] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show cvx mounts", revision=1)] + + mcs_path_types: ClassVar[list[str]] = ["Mcs::ApiConfigRedundancyStatus", "Mcs::ActiveFlows", "Mcs::Client::Status"] + """The list of expected MCS path types to verify.""" + + class Input(AntaTest.Input): + """Input model for the VerifyMcsServerMounts test.""" + + connections_count: int + """The expected number of active CVX Connections with mountStateMountComplete""" + + def validate_mount_states(self, mount: dict[str, Any], hostname: str) -> None: + """Validate the mount states of a given mount.""" + mount_states = mount["mountStates"][0] + + if (num_path_states := len(mount_states["pathStates"])) != (expected_num := len(self.mcs_path_types)): + self.result.is_failure(f"Incorrect number of mount path states for {hostname} - Expected: {expected_num}, Actual: {num_path_states}") + + for path in mount_states["pathStates"]: + if (path_type := path.get("type")) not in self.mcs_path_types: + self.result.is_failure(f"Unexpected MCS path type for {hostname}: '{path_type}'.") + if (path_state := path.get("state")) != "mountStateMountComplete": + self.result.is_failure(f"MCS server mount state for path '{path_type}' is not valid is for {hostname}: '{path_state}'.") + + @AntaTest.anta_test + def test(self) -> None: + """Main test function for VerifyMcsServerMounts.""" + command_output = self.instance_commands[0].json_output + self.result.is_success() + active_count = 0 + + if not (connections := command_output.get("connections")): + self.result.is_failure("CVX connections are not available.") + return + + for connection in connections: + mounts = connection.get("mounts", []) + hostname = connection["hostname"] + + mcs_mounts = [mount for mount in mounts if mount["service"] == "Mcs"] + + if not mounts: + self.result.is_failure(f"No mount status for {hostname}") + continue + + if not mcs_mounts: + self.result.is_failure(f"MCS mount state not detected for {hostname}") + else: + for mount in mcs_mounts: + self.validate_mount_states(mount, hostname) + active_count += 1 + + if active_count != self.inputs.connections_count: + self.result.is_failure(f"Incorrect CVX successful connections count. Expected: {self.inputs.connections_count}, Actual : {active_count}") + + +class VerifyActiveCVXConnections(AntaTest): + """Verifies the number of active CVX Connections. + + Expected Results + ---------------- + * Success: The test will pass if number of connections is equal to the expected number of connections. + * Failure: The test will fail otherwise. + + Examples + -------- + ```yaml + anta.tests.cvx: + - VerifyActiveCVXConnections: + connections_count: 100 + ``` + """ + + categories: ClassVar[list[str]] = ["cvx"] + # TODO: @gmuloc - cover "% Unavailable command (controller not ready)" + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show cvx connections brief", revision=1)] + + class Input(AntaTest.Input): + """Input model for the VerifyActiveCVXConnections test.""" + + connections_count: PositiveInteger + """The expected number of active CVX Connections.""" + + @AntaTest.anta_test + def test(self) -> None: + """Main test function for VerifyActiveCVXConnections.""" + command_output = self.instance_commands[0].json_output + self.result.is_success() + + if not (connections := command_output.get("connections")): + self.result.is_failure("CVX connections are not available.") + return + + active_count = len([connection for connection in connections if connection.get("oobConnectionActive")]) + + if self.inputs.connections_count != active_count: + self.result.is_failure(f"CVX active connections count. Expected: {self.inputs.connections_count}, Actual : {active_count}") + + +class VerifyCVXClusterStatus(AntaTest): + """Verifies the CVX Server Cluster status. + + Expected Results + ---------------- + * Success: The test will pass if all of the following conditions is met: + - CVX Enabled state is true + - Cluster Mode is true + - Role is either Master or Standby. + - peer_status matches defined state + * Failure: The test will fail if any of the success conditions is not met. + + Examples + -------- + ```yaml + anta.tests.cvx: + - VerifyCVXClusterStatus: + role: Master + peer_status: + - peer_name : cvx-red-2 + registration_state: Registration complete + - peer_name: cvx-red-3 + registration_state: Registration error + ``` + """ + + categories: ClassVar[list[str]] = ["cvx"] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show cvx", revision=1)] + + class Input(AntaTest.Input): + """Input model for the VerifyCVXClusterStatus test.""" + + role: Literal["Master", "Standby", "Disconnected"] = "Master" + peer_status: list[CVXPeers] + + @AntaTest.anta_test + def test(self) -> None: + """Run the main test for VerifyCVXClusterStatus.""" + command_output = self.instance_commands[0].json_output + self.result.is_success() + + # Validate Server enabled status + if not command_output.get("enabled"): + self.result.is_failure("CVX Server status is not enabled") + + # Validate cluster status and mode + if not (cluster_status := command_output.get("clusterStatus")) or not command_output.get("clusterMode"): + self.result.is_failure("CVX Server is not a cluster") + return + + # Check cluster role + if (cluster_role := cluster_status.get("role")) != self.inputs.role: + self.result.is_failure(f"CVX Role is not valid: {cluster_role}") + return + + # Validate peer status + peer_cluster = cluster_status.get("peerStatus", {}) + + # Check peer count + if (num_of_peers := len(peer_cluster)) != (expected_num_of_peers := len(self.inputs.peer_status)): + self.result.is_failure(f"Unexpected number of peers {num_of_peers} vs {expected_num_of_peers}") + + # Check each peer + for peer in self.inputs.peer_status: + # Retrieve the peer status from the peer cluster + if (eos_peer_status := get_value(peer_cluster, peer.peer_name, separator="..")) is None: + self.result.is_failure(f"{peer.peer_name} is not present") + continue + + # Validate the registration state of the peer + if (peer_reg_state := eos_peer_status.get("registrationState")) != peer.registration_state: + self.result.is_failure(f"{peer.peer_name} registration state is not complete: {peer_reg_state}") diff --git a/anta/tests/interfaces.py b/anta/tests/interfaces.py index dc6938110..b87c394f3 100644 --- a/anta/tests/interfaces.py +++ b/anta/tests/interfaces.py @@ -15,11 +15,11 @@ from pydantic_extra_types.mac_address import MacAddress from anta import GITHUB_SUGGESTION -from anta.custom_types import EthernetInterface, Interface, Percent, PortChannelInterface, PositiveInteger +from anta.custom_types import EthernetInterface, Interface, Percent, PositiveInteger from anta.decorators import skip_on_platforms from anta.input_models.interfaces import InterfaceState from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import custom_division, get_failed_logs, get_item, get_value +from anta.tools import custom_division, format_data, get_failed_logs, get_item, get_value BPS_GBPS_CONVERSIONS = 1000000000 @@ -848,17 +848,27 @@ def test(self) -> None: class VerifyLACPInterfacesStatus(AntaTest): - """Verifies the Link Aggregation Control Protocol (LACP) status of the provided interfaces. + """Verifies the Link Aggregation Control Protocol (LACP) status of the interface. - - Verifies that the interface is a member of the LACP port channel. - - Ensures that the synchronization is established. - - Ensures the interfaces are in the correct state for collecting and distributing traffic. - - Validates that LACP settings, such as timeouts, are correctly configured. (i.e The long timeout mode, also known as "slow" mode, is the default setting.) + This test performs the following checks for each specified interface: + + 1. Verifies that the interface is a member of the LACP port channel. + 2. Verifies LACP port states and operational status: + - Activity: Active LACP mode (initiates) + - Timeout: Short (Fast Mode), Long (Slow Mode - default) + - Aggregation: Port aggregable + - Synchronization: Port in sync with partner + - Collecting: Incoming frames aggregating + - Distributing: Outgoing frames aggregating Expected Results ---------------- - * Success: The test will pass if the provided interfaces are bundled in port channel and all specified parameters are correct. - * Failure: The test will fail if any interface is not bundled in port channel or any of specified parameter is not correct. + * Success: Interface is bundled and all LACP states match expected values for both actor and partner + * Failure: If any of the following occur: + - Interface or port channel is not configured. + - Interface is not bundled in port channel. + - Actor or partner port LACP states don't match expected configuration. + - LACP rate (timeout) mismatch when fast mode is configured. Examples -------- @@ -872,25 +882,14 @@ class VerifyLACPInterfacesStatus(AntaTest): """ categories: ClassVar[list[str]] = ["interfaces"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show lacp interface {interface}", revision=1)] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaCommand(command="show lacp interface", revision=1)] class Input(AntaTest.Input): """Input model for the VerifyLACPInterfacesStatus test.""" - interfaces: list[LACPInterface] - """List of LACP member interface.""" - - class LACPInterface(BaseModel): - """Model for an LACP member interface.""" - - name: EthernetInterface - """Ethernet interface to validate.""" - portchannel: PortChannelInterface - """Port Channel in which the interface is bundled.""" - - def render(self, template: AntaTemplate) -> list[AntaCommand]: - """Render the template for each interface in the input list.""" - return [template.render(interface=interface.name) for interface in self.inputs.interfaces] + interfaces: list[InterfaceState] + """List of interfaces with their expected state.""" + InterfaceState: ClassVar[type[InterfaceState]] = InterfaceState @AntaTest.anta_test def test(self) -> None: @@ -900,21 +899,17 @@ def test(self) -> None: # Member port verification parameters. member_port_details = ["activity", "aggregation", "synchronization", "collecting", "distributing", "timeout"] - # Iterating over command output for different interfaces - for command, input_entry in zip(self.instance_commands, self.inputs.interfaces): - interface = input_entry.name - portchannel = input_entry.portchannel - + command_output = self.instance_commands[0].json_output + for interface in self.inputs.interfaces: # Verify if a PortChannel is configured with the provided interface - if not (interface_details := get_value(command.json_output, f"portChannels.{portchannel}.interfaces.{interface}")): - self.result.is_failure(f"Interface '{interface}' is not configured to be a member of LACP '{portchannel}'.") + if not (interface_details := get_value(command_output, f"portChannels..{interface.portchannel}..interfaces..{interface.name}", separator="..")): + self.result.is_failure(f"{interface} - Not configured") continue # Verify the interface is bundled in port channel. actor_port_status = interface_details.get("actorPortStatus") if actor_port_status != "bundled": - message = f"For Interface {interface}:\nExpected `bundled` as the local port status, but found `{actor_port_status}` instead.\n" - self.result.is_failure(message) + self.result.is_failure(f"{interface} - Not bundled - Port Status: {actor_port_status}") continue # Collecting actor and partner port details @@ -929,21 +924,12 @@ def test(self) -> None: # Forming expected interface details expected_details = {param: param != "timeout" for param in member_port_details} - expected_interface_output = {"actor_port_details": expected_details, "partner_port_details": expected_details} + # Updating the short LACP timeout, if expected. + if interface.lacp_rate_fast: + expected_details["timeout"] = True - # Forming failure message - if actual_interface_output != expected_interface_output: - message = f"For Interface {interface}:\n" - actor_port_failed_log = get_failed_logs( - expected_interface_output.get("actor_port_details", {}), actual_interface_output.get("actor_port_details", {}) - ) - partner_port_failed_log = get_failed_logs( - expected_interface_output.get("partner_port_details", {}), actual_interface_output.get("partner_port_details", {}) - ) - - if actor_port_failed_log: - message += f"Actor port details:{actor_port_failed_log}\n" - if partner_port_failed_log: - message += f"Partner port details:{partner_port_failed_log}\n" - - self.result.is_failure(message) + if (act_port_details := actual_interface_output["actor_port_details"]) != expected_details: + self.result.is_failure(f"{interface} - Actor port details mismatch - {format_data(act_port_details)}") + + if (part_port_details := actual_interface_output["partner_port_details"]) != expected_details: + self.result.is_failure(f"{interface} - Partner port details mismatch - {format_data(part_port_details)}") diff --git a/anta/tests/routing/bgp.py b/anta/tests/routing/bgp.py index 4f55a0f4b..f44729ec2 100644 --- a/anta/tests/routing/bgp.py +++ b/anta/tests/routing/bgp.py @@ -273,6 +273,7 @@ def test(self) -> None: # Check if the BGP session is established if peer["state"] != "Established": self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Session state is not established - State: {peer['state']}") + continue # Check if the AFI/SAFI state is negotiated capability_status = get_value(peer, f"neighborCapabilities.multiprotocolCaps.{address_family.eos_key}") @@ -280,10 +281,11 @@ def test(self) -> None: self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - AFI/SAFI state is not negotiated - {format_data(capability_status)}") # Check the TCP session message queues - inq = peer["peerTcpInfo"]["inputQueueLength"] - outq = peer["peerTcpInfo"]["outputQueueLength"] - if address_family.check_tcp_queues and (inq != 0 or outq != 0): - self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Session has non-empty message queues - InQ: {inq}, OutQ: {outq}") + if address_family.check_tcp_queues: + inq = peer["peerTcpInfo"]["inputQueueLength"] + outq = peer["peerTcpInfo"]["outputQueueLength"] + if inq != 0 or outq != 0: + self.result.is_failure(f"{address_family} Peer: {peer['peerAddress']} - Session has non-empty message queues - InQ: {inq}, OutQ: {outq}") class VerifyBGPSpecificPeers(AntaTest): @@ -374,6 +376,7 @@ def test(self) -> None: # Check if the BGP session is established if peer_data["state"] != "Established": self.result.is_failure(f"{address_family} Peer: {peer_ip} - Session state is not established - State: {peer_data['state']}") + continue # Check if the AFI/SAFI state is negotiated capability_status = get_value(peer_data, f"neighborCapabilities.multiprotocolCaps.{address_family.eos_key}") @@ -384,10 +387,11 @@ def test(self) -> None: self.result.is_failure(f"{address_family} Peer: {peer_ip} - AFI/SAFI state is not negotiated - {format_data(capability_status)}") # Check the TCP session message queues - inq = peer_data["peerTcpInfo"]["inputQueueLength"] - outq = peer_data["peerTcpInfo"]["outputQueueLength"] - if address_family.check_tcp_queues and (inq != 0 or outq != 0): - self.result.is_failure(f"{address_family} Peer: {peer_ip} - Session has non-empty message queues - InQ: {inq}, OutQ: {outq}") + if address_family.check_tcp_queues: + inq = peer_data["peerTcpInfo"]["inputQueueLength"] + outq = peer_data["peerTcpInfo"]["outputQueueLength"] + if inq != 0 or outq != 0: + self.result.is_failure(f"{address_family} Peer: {peer_ip} - Session has non-empty message queues - InQ: {inq}, OutQ: {outq}") class VerifyBGPExchangedRoutes(AntaTest): @@ -873,16 +877,17 @@ def test(self) -> None: if not evpn_routes: no_evpn_routes.append((address, vni)) continue - # Verify that each EVPN route has at least one valid and active path - for route, route_data in evpn_routes.items(): - has_active_path = False - for path in route_data["evpnRoutePaths"]: - if path["routeType"]["valid"] is True and path["routeType"]["active"] is True: - # At least one path is valid and active, no need to check the other paths + + # Verify that at least one EVPN route has at least one active/valid path across all learned routes from all RDs combined + has_active_path = False + for route_data in evpn_routes.values(): + for path in route_data.get("evpnRoutePaths", []): + route_type = path.get("routeType", {}) + if route_type.get("active") and route_type.get("valid"): has_active_path = True break - if not has_active_path: - bad_evpn_routes.append(route) + if not has_active_path: + bad_evpn_routes.extend(list(evpn_routes)) if no_evpn_routes: self.result.is_failure(f"The following VXLAN endpoint do not have any EVPN Type-2 route: {no_evpn_routes}") diff --git a/anta/tests/security.py b/anta/tests/security.py index bbfa2b0e7..38bf2409e 100644 --- a/anta/tests/security.py +++ b/anta/tests/security.py @@ -8,12 +8,12 @@ # Mypy does not understand AntaTest.Input typing # mypy: disable-error-code=attr-defined from datetime import datetime, timezone -from ipaddress import IPv4Address from typing import TYPE_CHECKING, ClassVar, get_args from pydantic import BaseModel, Field, model_validator from anta.custom_types import EcdsaKeySize, EncryptionAlgorithm, PositiveInteger, RsaKeySize +from anta.input_models.security import IPSecPeer, IPSecPeers from anta.models import AntaCommand, AntaTemplate, AntaTest from anta.tools import get_failed_logs, get_item, get_value @@ -692,15 +692,22 @@ def test(self) -> None: class VerifySpecificIPSecConn(AntaTest): - """Verifies the state of IPv4 security connections for a specified peer. + """Verifies the IPv4 security connections. - It optionally allows for the verification of a specific path for a peer by providing source and destination addresses. - If these addresses are not provided, it will verify all paths for the specified peer. + This test performs the following checks for each peer: + + 1. Validates that the VRF is configured. + 2. Checks for the presence of IPv4 security connections for the specified peer. + 3. For each relevant peer: + - If source and destination addresses are provided, verifies the security connection for the specific path exists and is `Established`. + - If no addresses are provided, verifies that all security connections associated with the peer are `Established`. Expected Results ---------------- - * Success: The test passes if the IPv4 security connection for a peer is established in the specified VRF. - * Failure: The test fails if IPv4 security is not configured, a connection is not found for a peer, or the connection is not established in the specified VRF. + * Success: If all checks pass for all specified IPv4 security connections. + * Failure: If any of the following occur: + - No IPv4 security connections are found for the peer + - The security connection is not established for the specified path or any of the peer connections is not established when no path is specified. Examples -------- @@ -719,35 +726,16 @@ class VerifySpecificIPSecConn(AntaTest): ``` """ - description = "Verifies IPv4 security connections for a peer." categories: ClassVar[list[str]] = ["security"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip security connection vrf {vrf} path peer {peer}")] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show ip security connection vrf {vrf} path peer {peer}", revision=2)] class Input(AntaTest.Input): """Input model for the VerifySpecificIPSecConn test.""" - ip_security_connections: list[IPSecPeers] + ip_security_connections: list[IPSecPeer] """List of IP4v security peers.""" - - class IPSecPeers(BaseModel): - """Details of IPv4 security peers.""" - - peer: IPv4Address - """IPv4 address of the peer.""" - - vrf: str = "default" - """Optional VRF for the IP security peer.""" - - connections: list[IPSecConn] | None = None - """Optional list of IPv4 security connections of a peer.""" - - class IPSecConn(BaseModel): - """Details of IPv4 security connections for a peer.""" - - source_address: IPv4Address - """Source IPv4 address of the connection.""" - destination_address: IPv4Address - """Destination IPv4 address of the connection.""" + IPSecPeers: ClassVar[type[IPSecPeers]] = IPSecPeers + """To maintain backward compatibility.""" def render(self, template: AntaTemplate) -> list[AntaCommand]: """Render the template for each input IP Sec connection.""" @@ -757,15 +745,15 @@ def render(self, template: AntaTemplate) -> list[AntaCommand]: def test(self) -> None: """Main test function for VerifySpecificIPSecConn.""" self.result.is_success() + for command_output, input_peer in zip(self.instance_commands, self.inputs.ip_security_connections): conn_output = command_output.json_output["connections"] - peer = command_output.params.peer - vrf = command_output.params.vrf conn_input = input_peer.connections + vrf = input_peer.vrf # Check if IPv4 security connection is configured if not conn_output: - self.result.is_failure(f"No IPv4 security connection configured for peer `{peer}`.") + self.result.is_failure(f"{input_peer} - Not configured") continue # If connection details are not provided then check all connections of a peer @@ -775,10 +763,8 @@ def test(self) -> None: if state != "Established": source = conn_data.get("saddr") destination = conn_data.get("daddr") - vrf = conn_data.get("tunnelNs") self.result.is_failure( - f"Expected state of IPv4 security connection `source:{source} destination:{destination} vrf:{vrf}` for peer `{peer}` is `Established` " - f"but found `{state}` instead." + f"{input_peer} Source: {source} Destination: {destination} - Connection down - Expected: Established, Actual: {state}" ) continue @@ -794,14 +780,10 @@ def test(self) -> None: if (source_input, destination_input, vrf) in existing_connections: existing_state = existing_connections[(source_input, destination_input, vrf)] if existing_state != "Established": - self.result.is_failure( - f"Expected state of IPv4 security connection `source:{source_input} destination:{destination_input} vrf:{vrf}` " - f"for peer `{peer}` is `Established` but found `{existing_state}` instead." - ) + failure = f"Expected: Established, Actual: {existing_state}" + self.result.is_failure(f"{input_peer} Source: {source_input} Destination: {destination_input} - Connection down - {failure}") else: - self.result.is_failure( - f"IPv4 security connection `source:{source_input} destination:{destination_input} vrf:{vrf}` for peer `{peer}` is not found." - ) + self.result.is_failure(f"{input_peer} Source: {source_input} Destination: {destination_input} - Connection not found.") class VerifyHardwareEntropy(AntaTest): diff --git a/anta/tests/stun.py b/anta/tests/stun.py index 8b4f4fb2f..2be13c4b2 100644 --- a/anta/tests/stun.py +++ b/anta/tests/stun.py @@ -7,29 +7,36 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations -from ipaddress import IPv4Address from typing import ClassVar -from pydantic import BaseModel - -from anta.custom_types import Port +from anta.decorators import deprecated_test_class +from anta.input_models.stun import StunClientTranslation from anta.models import AntaCommand, AntaTemplate, AntaTest -from anta.tools import get_failed_logs, get_value +from anta.tools import get_value + +class VerifyStunClientTranslation(AntaTest): + """Verifies the translation for a source address on a STUN client. -class VerifyStunClient(AntaTest): - """Verifies STUN client settings, including local IP/port and optionally public IP/port. + This test performs the following checks for each specified address family: + + 1. Validates that there is a translation for the source address on the STUN client. + 2. If public IP and port details are provided, validates their correctness against the configuration. Expected Results ---------------- - * Success: The test will pass if the STUN client is correctly configured with the specified IPv4 source address/port and public address/port. - * Failure: The test will fail if the STUN client is not configured or if the IPv4 source address, public address, or port details are incorrect. + * Success: If all of the following conditions are met: + - The test will pass if the source address translation is present. + - If public IP and port details are provided, they must also match the translation information. + * Failure: If any of the following occur: + - There is no translation for the source address on the STUN client. + - The public IP or port details, if specified, are incorrect. Examples -------- ```yaml anta.tests.stun: - - VerifyStunClient: + - VerifyStunClientTranslation: stun_clients: - source_address: 172.18.3.2 public_address: 172.18.3.21 @@ -43,24 +50,14 @@ class VerifyStunClient(AntaTest): """ categories: ClassVar[list[str]] = ["stun"] - commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show stun client translations {source_address} {source_port}")] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [AntaTemplate(template="show stun client translations {source_address} {source_port}", revision=1)] class Input(AntaTest.Input): - """Input model for the VerifyStunClient test.""" + """Input model for the VerifyStunClientTranslation test.""" - stun_clients: list[ClientAddress] - - class ClientAddress(BaseModel): - """Source and public address/port details of STUN client.""" - - source_address: IPv4Address - """IPv4 source address of STUN client.""" - source_port: Port = 4500 - """Source port number for STUN client.""" - public_address: IPv4Address | None = None - """Optional IPv4 public address of STUN client.""" - public_port: Port | None = None - """Optional public port number for STUN client.""" + stun_clients: list[StunClientTranslation] + """List of STUN clients.""" + StunClientTranslation: ClassVar[type[StunClientTranslation]] = StunClientTranslation def render(self, template: AntaTemplate) -> list[AntaCommand]: """Render the template for each STUN translation.""" @@ -68,48 +65,57 @@ def render(self, template: AntaTemplate) -> list[AntaCommand]: @AntaTest.anta_test def test(self) -> None: - """Main test function for VerifyStunClient.""" + """Main test function for VerifyStunClientTranslation.""" self.result.is_success() # Iterate over each command output and corresponding client input for command, client_input in zip(self.instance_commands, self.inputs.stun_clients): bindings = command.json_output["bindings"] - source_address = str(command.params.source_address) - source_port = command.params.source_port + input_public_address = client_input.public_address + input_public_port = client_input.public_port # If no bindings are found for the STUN client, mark the test as a failure and continue with the next client if not bindings: - self.result.is_failure(f"STUN client transaction for source `{source_address}:{source_port}` is not found.") + self.result.is_failure(f"{client_input} - STUN client translation not found.") continue - # Extract the public address and port from the client input - public_address = client_input.public_address - public_port = client_input.public_port - # Extract the transaction ID from the bindings transaction_id = next(iter(bindings.keys())) - # Prepare the actual and expected STUN data for comparison - actual_stun_data = { - "source ip": get_value(bindings, f"{transaction_id}.sourceAddress.ip"), - "source port": get_value(bindings, f"{transaction_id}.sourceAddress.port"), - } - expected_stun_data = {"source ip": source_address, "source port": source_port} - - # If public address is provided, add it to the actual and expected STUN data - if public_address is not None: - actual_stun_data["public ip"] = get_value(bindings, f"{transaction_id}.publicAddress.ip") - expected_stun_data["public ip"] = str(public_address) - - # If public port is provided, add it to the actual and expected STUN data - if public_port is not None: - actual_stun_data["public port"] = get_value(bindings, f"{transaction_id}.publicAddress.port") - expected_stun_data["public port"] = public_port - - # If the actual STUN data does not match the expected STUN data, mark the test as failure - if actual_stun_data != expected_stun_data: - failed_log = get_failed_logs(expected_stun_data, actual_stun_data) - self.result.is_failure(f"For STUN source `{source_address}:{source_port}`:{failed_log}") + # Verifying the public address if provided + if input_public_address and str(input_public_address) != (actual_public_address := get_value(bindings, f"{transaction_id}.publicAddress.ip")): + self.result.is_failure(f"{client_input} - Incorrect public-facing address - Expected: {input_public_address} Actual: {actual_public_address}") + + # Verifying the public port if provided + if input_public_port and input_public_port != (actual_public_port := get_value(bindings, f"{transaction_id}.publicAddress.port")): + self.result.is_failure(f"{client_input} - Incorrect public-facing port - Expected: {input_public_port} Actual: {actual_public_port}") + + +@deprecated_test_class(new_tests=["VerifyStunClientTranslation"], removal_in_version="v2.0.0") +class VerifyStunClient(VerifyStunClientTranslation): + """(Deprecated) Verifies the translation for a source address on a STUN client. + + Alias for the VerifyStunClientTranslation test to maintain backward compatibility. + When initialized, it will emit a deprecation warning and call the VerifyStunClientTranslation test. + + Examples + -------- + ```yaml + anta.tests.stun: + - VerifyStunClient: + stun_clients: + - source_address: 172.18.3.2 + public_address: 172.18.3.21 + source_port: 4500 + public_port: 6006 + ``` + """ + + # TODO: Remove this class in ANTA v2.0.0. + + # required to redefine name an description to overwrite parent class. + name = "VerifyStunClient" + description = "(Deprecated) Verifies the translation for a source address on a STUN client." class VerifyStunServer(AntaTest): diff --git a/asynceapi/device.py b/asynceapi/device.py index 7793ce519..c423c366c 100644 --- a/asynceapi/device.py +++ b/asynceapi/device.py @@ -121,7 +121,7 @@ async def check_connection(self) -> bool: """ return await port_check_url(self.base_url) - async def cli( # noqa: PLR0913 + async def cli( self, command: str | dict[str, Any] | None = None, commands: Sequence[str | dict[str, Any]] | None = None, @@ -195,7 +195,7 @@ async def cli( # noqa: PLR0913 return None raise - def _jsonrpc_command( # noqa: PLR0913 + def _jsonrpc_command( self, commands: Sequence[str | dict[str, Any]] | None = None, ofmt: str | None = None, diff --git a/docs/advanced_usages/as-python-lib.md b/docs/advanced_usages/as-python-lib.md index 49c010f80..fce5e7eea 100644 --- a/docs/advanced_usages/as-python-lib.md +++ b/docs/advanced_usages/as-python-lib.md @@ -6,8 +6,8 @@ ANTA is a Python library that can be used in user applications. This section describes how you can leverage ANTA Python modules to help you create your own NRFU solution. -!!! tip - If you are unfamiliar with asyncio, refer to the Python documentation relevant to your Python version - https://docs.python.org/3/library/asyncio.html +> [!TIP] +> If you are unfamiliar with asyncio, refer to the Python documentation relevant to your Python version - https://docs.python.org/3/library/asyncio.html ## [AntaDevice](../api/device.md#anta.device.AntaDevice) Abstract Class @@ -47,8 +47,10 @@ The [AntaInventory](../api/inventory.md#anta.inventory.AntaInventory) class is a --8<-- "parse_anta_inventory_file.py" ``` -!!! note "How to create your inventory file" - Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files. +> [!NOTE] +> **How to create your inventory file** +> +> Please visit this [dedicated section](../usage-inventory-catalog.md) for how to use inventory and catalog files. ### Run EOS commands diff --git a/docs/advanced_usages/custom-tests.md b/docs/advanced_usages/custom-tests.md index 8b217998c..2fc61ccc4 100644 --- a/docs/advanced_usages/custom-tests.md +++ b/docs/advanced_usages/custom-tests.md @@ -4,8 +4,8 @@ ~ that can be found in the LICENSE file. --> -!!! info - This documentation applies for both creating tests in ANTA or creating your own test package. +> [!INFO] +> This documentation applies for both creating tests in ANTA or creating your own test package. ANTA is not only a Python library with a CLI and a collection of built-in tests, it is also a framework you can extend by building your own tests. @@ -15,7 +15,7 @@ A test is a Python class where a test function is defined and will be run by the ANTA provides an abstract class [AntaTest](../api/models.md#anta.models.AntaTest). This class does the heavy lifting and provide the logic to define, collect and test data. The code below is an example of a simple test in ANTA, which is an [AntaTest](../api/models.md#anta.models.AntaTest) subclass: -```python +````python from anta.models import AntaTest, AntaCommand from anta.decorators import skip_on_platforms @@ -49,7 +49,7 @@ class VerifyTemperature(AntaTest): self.result.is_success() else: self.result.is_failure(f"Device temperature exceeds acceptable limits. Current system status: '{temperature_status}'") -``` +```` [AntaTest](../api/models.md#anta.models.AntaTest) also provide more advanced capabilities like [AntaCommand](../api/models.md#anta.models.AntaCommand) templating using the [AntaTemplate](../api/models.md#anta.models.AntaTemplate) class or test inputs definition and validation using [AntaTest.Input](../api/models.md#anta.models.AntaTest.Input) [pydantic](https://docs.pydantic.dev/latest/) model. This will be discussed in the sections below. @@ -64,8 +64,8 @@ Full AntaTest API documentation is available in the [API documentation section]( - `categories` (`list[str]`): A list of categories in which the test belongs. - `commands` (`[list[AntaCommand | AntaTemplate]]`): A list of command to collect from devices. This list **must** be a list of [AntaCommand](../api/models.md#anta.models.AntaCommand) or [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances. Rendering [AntaTemplate](../api/models.md#anta.models.AntaTemplate) instances will be discussed later. -!!! info - All these class attributes are mandatory. If any attribute is missing, a `NotImplementedError` exception will be raised during class instantiation. +> [!INFO] +> All these class attributes are mandatory. If any attribute is missing, a `NotImplementedError` exception will be raised during class instantiation. ### Instance Attributes @@ -82,11 +82,15 @@ Full AntaTest API documentation is available in the [API documentation section]( show_root_toc_entry: false heading_level: 10 -!!! note "Logger object" - ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/models.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information. - -!!! note "AntaDevice object" - Even if `device` is not a private attribute, you should not need to access this object in your code. +> [!NOTE] +> +> - **Logger object** +> +> ANTA already provides comprehensive logging at every steps of a test execution. The [AntaTest](../api/models.md#anta.models.AntaTest) class also provides a `logger` attribute that is a Python logger specific to the test instance. See [Python documentation](https://docs.python.org/3/library/logging.html) for more information. +> +> - **AntaDevice object** +> +> Even if `device` is not a private attribute, you should not need to access this object in your code. ### Test Inputs @@ -129,8 +133,8 @@ Full `ResultOverwrite` model documentation is available in [API documentation se show_root_toc_entry: false heading_level: 10 -!!! note - The pydantic model is configured using the [`extra=forbid`](https://docs.pydantic.dev/latest/usage/model_config/#extra-attributes) that will fail input validation if extra fields are provided. +> [!NOTE] +> The pydantic model is configured using the [`extra=forbid`](https://docs.pydantic.dev/latest/usage/model_config/#extra-attributes) that will fail input validation if extra fields are provided. ### Methods @@ -160,8 +164,8 @@ In this section, we will go into all the details of writing an [AntaTest](../api Import [anta.models.AntaTest](../api/models.md#anta.models.AntaTest) and define your own class. Define the mandatory class attributes using [anta.models.AntaCommand](../api/models.md#anta.models.AntaCommand), [anta.models.AntaTemplate](../api/models.md#anta.models.AntaTemplate) or both. -!!! info - Caching can be disabled per `AntaCommand` or `AntaTemplate` by setting the `use_cache` argument to `False`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). +> [!NOTE] +> Caching can be disabled per `AntaCommand` or `AntaTemplate` by setting the `use_cache` argument to `False`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). ```python from anta.models import AntaTest, AntaCommand, AntaTemplate @@ -193,21 +197,23 @@ class (AntaTest): ] ``` -!!! tip "Command revision and version" - * Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use `text` outformat sometimes. - * The model can change across time (adding feature, ... ) and when the model is changed in a non backward-compatible way, the **revision** number is bumped. The initial model starts with **revision** 1. - * A **revision** applies to a particular CLI command whereas a **version** is global to an eAPI call. The **version** is internally translated to a specific **revision** for each CLI command in the RPC call. The currently supported **version** values are `1` and `latest`. - * A **revision takes precedence over a version** (e.g. if a command is run with version="latest" and revision=1, the first revision of the model is returned) - * By default, eAPI returns the first revision of each model to ensure that when upgrading, integrations with existing tools are not broken. This is done by using by default `version=1` in eAPI calls. - - By default, ANTA uses `version="latest"` in AntaCommand, but when developing tests, the revision MUST be provided when the outformat of the command is `json`. As explained earlier, this is to ensure that the eAPI always returns the same output model and that the test remains always valid from the day it was created. For some commands, you may also want to run them with a different revision or version. - - For instance, the `VerifyBFDPeersHealth` test leverages the first revision of `show bfd peers`: - - ``` - # revision 1 as later revision introduce additional nesting for type - commands = [AntaCommand(command="show bfd peers", revision=1)] - ``` +> [!TIP] +> **Command revision and version** +> +> - Most of EOS commands return a JSON structure according to a model (some commands may not be modeled hence the necessity to use `text` outformat sometimes. +> - The model can change across time (adding feature, ... ) and when the model is changed in a non backward-compatible way, the **revision** number is bumped. The initial model starts with **revision** 1. +> - A **revision** applies to a particular CLI command whereas a **version** is global to an eAPI call. The **version** is internally translated to a specific **revision** for each CLI command in the RPC call. The currently supported **version** values are `1` and `latest`. +> - A **revision takes precedence over a version** (e.g. if a command is run with version="latest" and revision=1, the first revision of the model is returned) +> - By default, eAPI returns the first revision of each model to ensure that when upgrading, integrations with existing tools are not broken. This is done by using by default `version=1` in eAPI calls. +> +> By default, ANTA uses `version="latest"` in AntaCommand, but when developing tests, the revision MUST be provided when the outformat of the command is `json`. As explained earlier, this is to ensure that the eAPI always returns the same output model and that the test remains always valid from the day it was created. For some commands, you may also want to run them with a different revision or version. +> +> For instance, the `VerifyBFDPeersHealth` test leverages the first revision of `show bfd peers`: +> +> ```python +> # revision 1 as later revision introduce additional nesting for type +> commands = [AntaCommand(command="show bfd peers", revision=1)] +> ``` ### Inputs definition @@ -242,8 +248,8 @@ You can also leverage [anta.custom_types](../api/types.md) that provides reusabl Regarding required, optional and nullable fields, refer to this [documentation](https://docs.pydantic.dev/latest/migration/#required-optional-and-nullable-fields) on how to define them. -!!! note - All the `pydantic` features are supported. For instance you can define [validators](https://docs.pydantic.dev/latest/usage/validators/) for complex input validation. +> [!NOTE] +> All the `pydantic` features are supported. For instance you can define [validators](https://docs.pydantic.dev/latest/usage/validators/) for complex input validation. ### Template rendering @@ -338,10 +344,10 @@ class VerifyTemperature(AntaTest): ## Access your custom tests in the test catalog -!!! warning "" - This section is required only if you are not merging your development into ANTA. Otherwise, just follow [contribution guide](../contribution.md). +> [!WARNING] +> This section is required only if you are not merging your development into ANTA. Otherwise, just follow [contribution guide](../contribution.md). -For that, you need to create your own Python package as described in this [hitchhiker's guide](https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/) to package Python code. We assume it is well known and we won't focus on this aspect. Thus, your package must be impartable by ANTA hence available in the module search path `sys.path` (you can use `PYTHONPATH` for example). +For that, you need to create your own Python package as described in this [hitchhiker's guide](https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/) to package Python code. We assume it is well known and we won't focus on this aspect. Thus, your package must be importable by ANTA hence available in the module search path `sys.path` (you can use `PYTHONPATH` for example). It is very similar to what is documented in [catalog section](../usage-inventory-catalog.md) but you have to use your own package name.2 diff --git a/docs/api/tests.avt.md b/docs/api/tests.avt.md index f9e1acfdd..a55fcce14 100644 --- a/docs/api/tests.avt.md +++ b/docs/api/tests.avt.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for Adaptive Virtual Topology (AVT) tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.avt + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,18 @@ anta_title: ANTA catalog for Adaptive Virtual Topology (AVT) tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.avt + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + anta_hide_test_module_description: true + merge_init_into_class: false + show_labels: true + filters: + - "!^__init__" + - "!^__str__" diff --git a/docs/api/tests.routing.bgp.md b/docs/api/tests.routing.bgp.md index 30e4362a0..b40ff7b2d 100644 --- a/docs/api/tests.routing.bgp.md +++ b/docs/api/tests.routing.bgp.md @@ -7,6 +7,9 @@ anta_title: ANTA catalog for BGP tests ~ that can be found in the LICENSE file. --> +!!! info "`multi-agent` Service Routing Protocols Model Requirements" + The BGP tests in this section are only supported on switches running the `multi-agent` routing protocols model. Starting from EOS version 4.30.1F, `service routing protocols model` is set to `multi-agent` by default. These BGP commands may **not** be compatible with switches running the legacy `ribd` routing protocols model and may fail if attempted. + # Tests ::: anta.tests.routing.bgp @@ -39,3 +42,4 @@ anta_title: ANTA catalog for BGP tests - "!^__str__" - "!AFI_SAFI_EOS_KEY" - "!eos_key" + - "!BgpAfi" diff --git a/docs/api/tests.security.md b/docs/api/tests.security.md index fe008ba66..599783236 100644 --- a/docs/api/tests.security.md +++ b/docs/api/tests.security.md @@ -7,7 +7,10 @@ anta_title: ANTA catalog for security tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.security + options: show_root_heading: false show_root_toc_entry: false @@ -18,3 +21,18 @@ anta_title: ANTA catalog for security tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.security + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: + - "!^__init__" + - "!^__str__" diff --git a/docs/api/tests.stun.md b/docs/api/tests.stun.md index b4274e9a7..6a73b8880 100644 --- a/docs/api/tests.stun.md +++ b/docs/api/tests.stun.md @@ -7,6 +7,8 @@ anta_title: ANTA catalog for STUN tests ~ that can be found in the LICENSE file. --> +# Tests + ::: anta.tests.stun options: show_root_heading: false @@ -18,3 +20,18 @@ anta_title: ANTA catalog for STUN tests filters: - "!test" - "!render" + +# Input models + +::: anta.input_models.stun + + options: + show_root_heading: false + show_root_toc_entry: false + show_bases: false + merge_init_into_class: false + anta_hide_test_module_description: true + show_labels: true + filters: + - "!^__init__" + - "!^__str__" diff --git a/docs/cli/debug.md b/docs/cli/debug.md index 4c864db0f..45ad791f5 100644 --- a/docs/cli/debug.md +++ b/docs/cli/debug.md @@ -61,6 +61,7 @@ Options: --help Show this message and exit. ``` +> [!TIP] > `username`, `password`, `enable-password`, `enable`, `timeout` and `insecure` values are the same for all devices ### Example @@ -162,8 +163,8 @@ Run templated command 'show vlan {vlan_id}' with {'vlan_id': '10'} on DC1-LEAF1A ### Example of multiple arguments -!!! warning - If multiple arguments of the same key are provided, only the last argument value will be kept in the template parameters. +> [!WARNING] +> If multiple arguments of the same key are provided, only the last argument value will be kept in the template parameters. ```bash anta -log DEBUG debug run-template --template "ping {dst} source {src}" dst "8.8.8.8" src Loopback0 --device DC1-SPINE1     diff --git a/docs/cli/exec.md b/docs/cli/exec.md index 4f6d5d169..a7a0fe330 100644 --- a/docs/cli/exec.md +++ b/docs/cli/exec.md @@ -64,6 +64,7 @@ Options: --help Show this message and exit. ``` +> [!TIP] > `username`, `password`, `enable-password`, `enable`, `timeout` and `insecure` values are the same for all devices ### Example @@ -242,6 +243,7 @@ Options: --help Show this message and exit. ``` +> [!TIP] > `username`, `password`, `enable-password`, `enable`, `timeout` and `insecure` values are the same for all devices When executed, this command fetches tech-support files and downloads them locally into a device-specific subfolder within the designated folder. You can specify the output folder with the `--output` option. @@ -250,15 +252,17 @@ ANTA uses SCP to download files from devices and will not trust unknown SSH host The configuration `aaa authorization exec default` must be present on devices to be able to use SCP. -!!! warning Deprecation - ANTA can automatically configure `aaa authorization exec default local` using the `anta exec collect-tech-support --configure` option but this option is deprecated and will be removed in ANTA 2.0.0. +> [!CAUTION] +> **Deprecation** +> +> ANTA can automatically configure `aaa authorization exec default local` using the `anta exec collect-tech-support --configure` option but this option is deprecated and will be removed in ANTA 2.0.0. If you require specific AAA configuration for `aaa authorization exec default`, like `aaa authorization exec default none` or `aaa authorization exec default group tacacs+`, you will need to configure it manually. The `--latest` option allows retrieval of a specific number of the most recent tech-support files. -!!! warning - By default **all** the tech-support files present on the devices are retrieved. +> [!WARNING] +> By default **all** the tech-support files present on the devices are retrieved. ### Example diff --git a/docs/cli/get-inventory-information.md b/docs/cli/get-inventory-information.md index ab1bebcd3..d45cb6af3 100644 --- a/docs/cli/get-inventory-information.md +++ b/docs/cli/get-inventory-information.md @@ -52,8 +52,8 @@ Options: --help Show this message and exit. ``` -!!! tip - By default, `anta get inventory` only provides information that doesn't rely on a device connection. If you are interested in obtaining connection-dependent details, like the hardware model, use the `--connected` option. +> [!TIP] +> By default, `anta get inventory` only provides information that doesn't rely on a device connection. If you are interested in obtaining connection-dependent details, like the hardware model, use the `--connected` option. ### Example diff --git a/docs/cli/get-tests.md b/docs/cli/get-tests.md index 09933cb51..3c2b369c0 100644 --- a/docs/cli/get-tests.md +++ b/docs/cli/get-tests.md @@ -26,8 +26,8 @@ Options: --help Show this message and exit. ``` -!!! tip - By default, `anta get tests` will retrieve all tests available in ANTA. +> [!TIP] +> By default, `anta get tests` will retrieve all tests available in ANTA. ### Examples @@ -91,9 +91,8 @@ anta.tests.aaa: vrf: MGMT ``` -!!! tip - - You can filter tests by providing a prefix - ANTA will return all tests that start with your specified string. +> [!TIP] +> You can filter tests by providing a prefix - ANTA will return all tests that start with your specified string. ```yaml title="anta get tests --test VerifyTacacs" anta.tests.aaa: diff --git a/docs/cli/inv-from-ansible.md b/docs/cli/inv-from-ansible.md index f7cc54a1f..c891693fc 100644 --- a/docs/cli/inv-from-ansible.md +++ b/docs/cli/inv-from-ansible.md @@ -31,12 +31,13 @@ Options: --help Show this message and exit. ``` -!!! warning "Warnings" - - * `anta get from-ansible` does not support inline vaulted variables, comment them out to generate your inventory. - If the vaulted variable is necessary to build the inventory (e.g. `ansible_host`), it needs to be unvaulted for `from-ansible` command to work." - - * The current implementation only considers devices directly attached to a specific Ansible group and does not support inheritance when using the `--ansible-group` option. +> [!WARNING] +> +> - `anta get from-ansible` does not support inline vaulted variables, comment them out to generate your inventory. +> +> - If the vaulted variable is necessary to build the inventory (e.g. `ansible_host`), it needs to be unvaulted for `from-ansible` command to work." +> +> - The current implementation only considers devices directly attached to a specific Ansible group and does not support inheritance when using the `--ansible-group` option. By default, if user does not provide `--output` file, anta will save output to configured anta inventory (`anta --inventory`). If the output file has content, anta will ask user to overwrite when running in interactive console. This mechanism can be controlled by triggers in case of CI usage: `--overwrite` to force anta to overwrite file. If not set, anta will exit diff --git a/docs/cli/inv-from-cvp.md b/docs/cli/inv-from-cvp.md index 9717870ad..e08ffd616 100644 --- a/docs/cli/inv-from-cvp.md +++ b/docs/cli/inv-from-cvp.md @@ -52,8 +52,8 @@ anta_inventory: - pod2 ``` -!!! warning - The current implementation only considers devices directly attached to a specific container when using the `--cvp-container` option. +> [!WARNING] +> The current implementation only considers devices directly attached to a specific container when using the `--cvp-container` option. ## Creating an inventory from multiple containers diff --git a/docs/cli/nrfu.md b/docs/cli/nrfu.md index 0f2b42524..667eb5f9a 100644 --- a/docs/cli/nrfu.md +++ b/docs/cli/nrfu.md @@ -26,8 +26,8 @@ ANTA provides a set of commands for performing NRFU tests on devices. These comm All commands under the `anta nrfu` namespace require a catalog yaml file specified with the `--catalog` option and a device inventory file specified with the `--inventory` option. -!!! info - Issuing the command `anta nrfu` will run `anta nrfu table` without any option. +> [!TIP] +> Issuing the command `anta nrfu` will run `anta nrfu table` without any option. ### Tag management diff --git a/docs/cli/overview.md b/docs/cli/overview.md index f1247b7e2..be6b1f43e 100644 --- a/docs/cli/overview.md +++ b/docs/cli/overview.md @@ -45,9 +45,10 @@ Then, run the CLI without options: anta nrfu ``` -!!! note - All environment variables may not be needed for every commands. - Refer to ` --help` for the comprehensive environment variables names. +> [!NOTE] +> All environment variables may not be needed for every commands. +> +> Refer to ` --help` for the comprehensive environment variables names. Below are the environment variables usable with the `anta nrfu` command: @@ -63,8 +64,8 @@ Below are the environment variables usable with the `anta nrfu` command: | ANTA_ENABLE | Whether it is necessary to go to enable mode on devices. | No | | ANTA_ENABLE_PASSWORD | The optional enable password, when this variable is set, ANTA_ENABLE or `--enable` is required. | No | -!!! info - Caching can be disabled with the global parameter `--disable-cache`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). +> [!NOTE] +> Caching can be disabled with the global parameter `--disable-cache`. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](../advanced_usages/caching.md). ## ANTA Exit Codes diff --git a/docs/cli/tag-management.md b/docs/cli/tag-management.md index 4108d75bb..b07e0c9e0 100644 --- a/docs/cli/tag-management.md +++ b/docs/cli/tag-management.md @@ -92,10 +92,11 @@ anta.tests.interfaces: tags: ['spine'] ``` -> A tag used to filter a test can also be a device name - -!!! tip "Use different input values for a specific test" - Leverage tags to define different input values for a specific test. See the `VerifyUptime` example above. +> [!TIP] +> +> - A tag used to filter a test can also be a device name +> +> - **Use different input values for a specific test**: Leverage tags to define different input values for a specific test. See the `VerifyUptime` example above. ## Using tags diff --git a/docs/contribution.md b/docs/contribution.md index 88f09c180..03923f695 100644 --- a/docs/contribution.md +++ b/docs/contribution.md @@ -86,9 +86,9 @@ Success: no issues found in 82 source files > NOTE: Typing is configured quite strictly, do not hesitate to reach out if you have any questions, struggles, nightmares. -## Unit tests +## Unit tests with Pytest -To keep high quality code, we require to provide a Pytest for every tests implemented in ANTA. +To keep high quality code, we require to provide a **Pytest** for every tests implemented in ANTA. All submodule should have its own pytest section under `tests/units/anta_tests/.py`. diff --git a/docs/faq.md b/docs/faq.md index d6376811f..ee823b491 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -110,6 +110,17 @@ anta_title: Frequently Asked Questions (FAQ) pip install -U pyopenssl>22.0 ``` +## Caveat running on non-POSIX platforms (e.g. Windows) + +???+ faq "Caveat running on non-POSIX platforms (e.g. Windows)" + + While ANTA should in general work on non-POSIX platforms (e.g. Windows), + there are some known limitations: + + - On non-Posix platforms, ANTA is not able to check and/or adjust the system limit of file descriptors. + + ANTA test suite is being run in the CI on a Windows runner. + ## `__NSCFConstantString initialize` error on OSX ???+ faq "`__NSCFConstantString initialize` error on OSX" diff --git a/docs/requirements-and-installation.md b/docs/requirements-and-installation.md index 1b3575877..22faf7c12 100644 --- a/docs/requirements-and-installation.md +++ b/docs/requirements-and-installation.md @@ -25,9 +25,8 @@ The ANTA package and the cli require some packages that are not part of the Pyth pip install anta ``` -!!! Warning - - * This command alone **will not** install the ANTA CLI requirements. +> [!WARNING] +> This command alone **will not** install the ANTA CLI requirements. ### Install ANTA CLI as an application with `pipx` @@ -37,9 +36,8 @@ pip install anta pipx install anta[cli] ``` -!!! Info - - Please take the time to read through the installation instructions of `pipx` before getting started. +> [!INFO] +> Please take the time to read through the installation instructions of `pipx` before getting started. ### Install CLI from Pypi server @@ -80,8 +78,8 @@ which anta /home/tom/.pyenv/shims/anta ``` -!!! warning - Before running the `anta --version` command, please be aware that some users have reported issues related to the `urllib3` package. If you encounter an error at this step, please refer to our [FAQ](faq.md) page for guidance on resolving it. +> [!WARNING] +> Before running the `anta --version` command, please be aware that some users have reported issues related to the `urllib3` package. If you encounter an error at this step, please refer to our [FAQ](faq.md) page for guidance on resolving it. ```bash # Check ANTA version diff --git a/docs/templates/python/material/class.html.jinja b/docs/templates/python/material/class.html.jinja index cf016c92e..cbf9fac22 100644 --- a/docs/templates/python/material/class.html.jinja +++ b/docs/templates/python/material/class.html.jinja @@ -53,3 +53,21 @@ {{ super() }} {% endif %} {% endblock source %} + +{# overwrite block base to render some stuff on deprecation for anta_test #} +{% block bases %} +{{ super() }} + +{% for dec in class.decorators %} +{% if dec.value.function.name == "deprecated_test_class" %} +Static Badge +{% for arg in dec.value.arguments | selectattr("name", "equalto", "removal_in_version") | list %} +Static Badge +{% endfor %} +
+{% for arg in dec.value.arguments | selectattr("name", "equalto", "new_tests") | list %} +Replaced with: {{ arg.value.elements | map("replace", "'", "", 1) | map("replace", "'", "", 1) | join(", ") | safe }} +{% endfor %} +{% endif %} +{% endfor %} +{% endblock bases %} diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 25b061c84..a422f7cc0 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -33,9 +33,8 @@ To help document the issue in Github, it is important to capture some logs so th ANTA provides very verbose logs when using the `DEBUG` level. When using DEBUG log level with a log file, the DEBUG logging level is not sent to stdout, but only to the file. -!!! danger - - On real deployments, do not use DEBUG logging level without setting a log file at the same time. +> [!CAUTION] +> On real deployments, do not use DEBUG logging level without setting a log file at the same time. To save the logs to a file called `anta.log`, use the following flags: @@ -46,11 +45,10 @@ anta -l DEBUG –log-file anta.log See `anta --help` for more information. These have to precede the `nrfu` cmd. -!!! tip - - Remember that in ANTA, each level of command has its own options and they can only be set at this level. - so the `-l` and `--log-file` MUST be between `anta` and the `ANTA_COMMAND`. - similarly, all the `nrfu` options MUST be set between the `nrfu` and the `ANTA_NRFU_SUBCOMMAND` (`json`, `text`, `table` or `tpl-report`). +> [!TIP] +> Remember that in ANTA, each level of command has its own options and they can only be set at this level. +> so the `-l` and `--log-file` MUST be between `anta` and the `ANTA_COMMAND`. +> similarly, all the `nrfu` options MUST be set between the `nrfu` and the `ANTA_NRFU_SUBCOMMAND` (`json`, `text`, `table` or `tpl-report`). As an example, for the `nrfu` command, it would look like: @@ -60,9 +58,8 @@ anta -l DEBUG --log-file anta.log nrfu --enable --username username --password a ### `ANTA_DEBUG` environment variable -!!! warning - - Do not use this if you do not know why. This produces a lot of logs and can create confusion if you do not know what to look for. +> [!WARNING] +> Do not use this if you do not know why. This produces a lot of logs and can create confusion if you do not know what to look for. The environment variable `ANTA_DEBUG=true` enable ANTA Debug Mode. diff --git a/docs/usage-inventory-catalog.md b/docs/usage-inventory-catalog.md index e41321ae5..7baebfbee 100644 --- a/docs/usage-inventory-catalog.md +++ b/docs/usage-inventory-catalog.md @@ -47,8 +47,8 @@ The inventory file must start with the `anta_inventory` key then define one or m A full description of the inventory model is available in [API documentation](api/inventory.models.input.md) -!!! info - Caching can be disabled per device, network or range by setting the `disable_cache` key to `True` in the inventory file. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](advanced_usages/caching.md). +> [!INFO] +> Caching can be disabled per device, network or range by setting the `disable_cache` key to `True` in the inventory file. For more details about how caching is implemented in ANTA, please refer to [Caching in ANTA](advanced_usages/caching.md). ### Example @@ -199,8 +199,8 @@ anta.tests.system: tags: ['leaf'] ``` -!!! info - When using the CLI, you can filter the NRFU execution using tags. Refer to [this section](cli/tag-management.md) of the CLI documentation. +> [!INFO] +> When using the CLI, you can filter the NRFU execution using tags. Refer to [this section](cli/tag-management.md) of the CLI documentation. ### Tests available in ANTA @@ -277,8 +277,10 @@ custom.tests.system: type: ['cEOS-LAB'] ``` -!!! tip "How to create custom tests" - To create your custom tests, you should refer to this [documentation](advanced_usages/custom-tests.md) +> [!TIP] +> **How to create custom tests** +> +> To create your custom tests, you should refer to this [documentation](advanced_usages/custom-tests.md) ### Customize test description and categories @@ -317,5 +319,5 @@ The following script reads all the files in `intended/test_catalogs/` with names --8<-- "merge_catalogs.py" ``` -!!! warning - The `AntaCatalog.merge()` method is deprecated and will be removed in ANTA v2.0. Please use the `AntaCatalog.merge_catalogs()` class method instead. +> [!WARNING] +> The `AntaCatalog.merge()` method is deprecated and will be removed in ANTA v2.0. Please use the `AntaCatalog.merge_catalogs()` class method instead. diff --git a/examples/tests.yaml b/examples/tests.yaml index 273d20a5b..86ed117a3 100644 --- a/examples/tests.yaml +++ b/examples/tests.yaml @@ -63,7 +63,7 @@ anta.tests.avt: # Verifies the AVT role of a device. role: edge - VerifyAVTSpecificPath: - # Verifies the status and type of an AVT path for a specified VRF. + # Verifies the Adaptive Virtual Topology (AVT) path. avt_paths: - avt_name: CONTROL-PLANE-PROFILE vrf: default @@ -135,11 +135,25 @@ anta.tests.connectivity: df_bit: True size: 100 anta.tests.cvx: + - VerifyActiveCVXConnections: + # Verifies the number of active CVX Connections. + connections_count: 100 + - VerifyCVXClusterStatus: + # Verifies the CVX Server Cluster status. + role: Master + peer_status: + - peer_name : cvx-red-2 + registration_state: Registration complete + - peer_name: cvx-red-3 + registration_state: Registration error - VerifyManagementCVX: # Verifies the management CVX global status. enabled: true - VerifyMcsClientMounts: # Verify if all MCS client mounts are in mountStateMountComplete. + - VerifyMcsServerMounts: + # Verify if all MCS server mounts are in a MountComplete state. + connections_count: 100 anta.tests.field_notices: - VerifyFieldNotice44Resolution: # Verifies that the device is using the correct Aboot version per FN0044. @@ -253,7 +267,7 @@ anta.tests.interfaces: specific_mtu: - Ethernet1: 2500 - VerifyLACPInterfacesStatus: - # Verifies the Link Aggregation Control Protocol (LACP) status of the provided interfaces. + # Verifies the Link Aggregation Control Protocol (LACP) status of the interface. interfaces: - name: Ethernet1 portchannel: Port-Channel100 @@ -640,7 +654,7 @@ anta.tests.security: - VerifySSHStatus: # Verifies if the SSHD agent is disabled in the default VRF. - VerifySpecificIPSecConn: - # Verifies IPv4 security connections for a peer. + # Verifies the IPv4 security connections. ip_security_connections: - peer: 10.255.0.1 - peer: 10.255.0.2 @@ -745,7 +759,14 @@ anta.tests.stp: threshold: 10 anta.tests.stun: - VerifyStunClient: - # Verifies STUN client settings, including local IP/port and optionally public IP/port. + # (Deprecated) Verifies the translation for a source address on a STUN client. + stun_clients: + - source_address: 172.18.3.2 + public_address: 172.18.3.21 + source_port: 4500 + public_port: 6006 + - VerifyStunClientTranslation: + # Verifies the translation for a source address on a STUN client. stun_clients: - source_address: 172.18.3.2 public_address: 172.18.3.21 diff --git a/mkdocs.yml b/mkdocs.yml index 7b1900771..9c05fb939 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -79,6 +79,7 @@ watch: - anta plugins: + - gh-admonitions - mkdocstrings: default_handler: python custom_templates: docs/templates diff --git a/pyproject.toml b/pyproject.toml index e1600b0be..c0d6b8c2a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,13 +23,13 @@ dependencies = [ "asyncssh>=2.16", "cvprac>=1.3.1", "eval-type-backport>=0.1.3", # Support newer typing features in older Python versions (required until Python 3.9 support is removed) + "httpx>=0.27.0", "Jinja2>=3.1.2", "pydantic>=2.7", "pydantic-extra-types>=2.3.0", "PyYAML>=6.0", "requests>=2.31.0", - "rich>=13.5.2,<14", - "httpx>=0.27.0" + "rich>=13.5.2,<14" ] keywords = ["test", "anta", "Arista", "network", "automation", "networking", "devops", "netdevops"] classifiers = [ @@ -70,7 +70,6 @@ dev = [ "pytest-cov>=4.1.0", "pytest-dependency", "pytest-codspeed>=2.2.0", - "respx", "pytest-html>=3.2.0", "pytest-httpx>=0.30.0", "pytest-metadata>=3.0.0", @@ -98,7 +97,8 @@ doc = [ "mkdocs-material>=9.5.34", "mkdocstrings[python]>=0.26.0", "mkdocstrings-python>=1.11.0", - "black>=24.10.0" + "black>=24.10.0", + "mkdocs-github-admonitions-plugin>=0.0.3" ] [project.urls] @@ -109,13 +109,13 @@ Contributing = "https://anta.arista.com/main/contribution/" [project.scripts] anta = "anta.cli:cli" -################################ -# Tools -################################ [tool.setuptools.packages.find] include = ["anta*", "asynceapi*"] namespaces = false +[tool.setuptools.package-data] +"anta" = ["py.typed"] + ################################ # Version ################################ @@ -259,6 +259,9 @@ extras = # tox -e -- path/to/my/test::test commands = pytest {posargs} +# To test on non-POSIX system +# https://github.com/tox-dev/tox/issues/1455 +passenv = USERNAME [testenv:lint] description = Check the code style @@ -367,6 +370,7 @@ convention = "numpy" # we have not removed pylint completely, these settings should be kept in sync with our pylintrc file. # https://github.com/astral-sh/ruff/issues/970 max-branches = 13 +max-args = 10 [tool.ruff.lint.mccabe] # Unlike Flake8, default to a complexity level of 10. @@ -377,6 +381,7 @@ max-complexity = 10 "RICH_COLOR_PALETTE" ] + [tool.ruff.lint.flake8-type-checking] # These classes require that type annotations be available at runtime runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.Input"] @@ -391,7 +396,6 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In "tests/units/*" = [ "ARG002", # Sometimes we need to declare unused arguments when a parameter is not used but declared in @pytest.mark.parametrize "FBT001", # Boolean-typed positional argument in function definition - "PLR0913", # Too many arguments to function call "PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable "S105", # Passwords are indeed hardcoded in tests "S106", # Passwords are indeed hardcoded in tests @@ -413,7 +417,7 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In "T201", # Allow print statements ] "anta/cli/*" = [ - "PLR0913", # Allow more than 5 input arguments in CLI functions + "PLR0913", # CLI has many arguments defined in functions "ANN401", # TODO: Check if we can update the Any type hints in the CLI ] "anta/tests/field_notices.py" = [ @@ -430,13 +434,6 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In ] "anta/tools.py" = [ "ANN401", # Ok to use Any type hint in our custom get functions - "PLR0913", # Ok to have more than 5 arguments in our custom get functions -] -"anta/device.py" = [ - "PLR0913", # Ok to have more than 5 arguments in the AntaDevice classes -] -"anta/inventory/__init__.py" = [ - "PLR0913", # Ok to have more than 5 arguments in the AntaInventory class ] "examples/*.py" = [ # These are example scripts and linked in snippets "S105", # Possible hardcoded password @@ -471,9 +468,11 @@ disable = [ # Any rule listed here can be disabled: https://github.com/astral-sh "reimported", "wrong-import-order", "wrong-import-position", + "unnecessary-lambda", "abstract-class-instantiated", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-instantiation-of-abstract-classes-abstract "unexpected-keyword-arg", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg and other rules - "no-value-for-parameter" # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg + "no-value-for-parameter", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg + "import-outside-toplevel" ] max-statements=61 max-returns=8 diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index 61f2fa11c..04ce54c24 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -4,12 +4,14 @@ """Fixtures for benchmarking ANTA.""" import logging +from collections import defaultdict import pytest import respx from _pytest.terminal import TerminalReporter from anta.catalog import AntaCatalog +from anta.result_manager import ResultManager from .utils import AntaMockEnvironment @@ -17,6 +19,12 @@ TEST_CASE_COUNT = None +# Used to globally configure the benchmarks by specifying parameters for inventories +BENCHMARK_PARAMETERS = [ + pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"), + pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"), +] + @pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data. def anta_mock_env_fixture() -> AntaMockEnvironment: @@ -35,6 +43,22 @@ def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog: return anta_mock_env.catalog +@pytest.fixture(name="session_results", scope="session") # We want this fixture to be reused across test modules within tests/benchmark +def session_results_fixture() -> defaultdict[str, ResultManager]: + """Return a dictionary of ResultManger objects for the benchmarks. + + The key is the test id as defined in the pytest_generate_tests in this module. + Used to pass a populated ResultManager from one benchmark to another. + """ + return defaultdict(lambda: ResultManager()) + + +@pytest.fixture +def results(request: pytest.FixtureRequest, session_results: defaultdict[str, ResultManager]) -> ResultManager: + """Return the unique ResultManger object for the current benchmark parameter.""" + return session_results[request.node.callspec.id] + + def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: """Display the total number of ANTA unit test cases used to benchmark.""" terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases") @@ -49,9 +73,12 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: return metafunc.parametrize( "inventory", - [ - pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"), - pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"), - ], + BENCHMARK_PARAMETERS, + indirect=True, + ) + elif "results" in metafunc.fixturenames: + metafunc.parametrize( + "results", + BENCHMARK_PARAMETERS, indirect=True, ) diff --git a/tests/benchmark/test_anta.py b/tests/benchmark/test_anta.py index e82de645d..7d1f21c60 100644 --- a/tests/benchmark/test_anta.py +++ b/tests/benchmark/test_anta.py @@ -5,6 +5,7 @@ import asyncio import logging +from collections import defaultdict from unittest.mock import patch import pytest @@ -22,45 +23,61 @@ logger = logging.getLogger(__name__) -def test_anta_dry_run(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None: +def test_anta_dry_run( + benchmark: BenchmarkFixture, + event_loop: asyncio.AbstractEventLoop, + catalog: AntaCatalog, + inventory: AntaInventory, + request: pytest.FixtureRequest, + session_results: defaultdict[str, ResultManager], +) -> None: """Benchmark ANTA in Dry-Run Mode.""" # Disable logging during ANTA execution to avoid having these function time in benchmarks logging.disable() - def _() -> ResultManager: - manager = ResultManager() - catalog.clear_indexes() - event_loop.run_until_complete(main(manager, inventory, catalog, dry_run=True)) - return manager + results = session_results[request.node.callspec.id] - manager = benchmark(_) + @benchmark + def _() -> None: + results.reset() + catalog.clear_indexes() + event_loop.run_until_complete(main(results, inventory, catalog, dry_run=True)) logging.disable(logging.NOTSET) - if len(manager.results) != len(inventory) * len(catalog.tests): - pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(manager.results)}", pytrace=False) - bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(manager.results)}\n" "-----------------------------------------------" + + if len(results.results) != len(inventory) * len(catalog.tests): + pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(results.results)}", pytrace=False) + bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(results.results)}\n" "-----------------------------------------------" logger.info(bench_info) @patch("anta.models.AntaTest.collect", collect) @patch("anta.device.AntaDevice.collect_commands", collect_commands) +@pytest.mark.dependency(name="anta_benchmark", scope="package") @respx.mock # Mock eAPI responses -def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None: +def test_anta( + benchmark: BenchmarkFixture, + event_loop: asyncio.AbstractEventLoop, + catalog: AntaCatalog, + inventory: AntaInventory, + request: pytest.FixtureRequest, + session_results: defaultdict[str, ResultManager], +) -> None: """Benchmark ANTA.""" # Disable logging during ANTA execution to avoid having these function time in benchmarks logging.disable() - def _() -> ResultManager: - manager = ResultManager() - catalog.clear_indexes() - event_loop.run_until_complete(main(manager, inventory, catalog)) - return manager + results = session_results[request.node.callspec.id] - manager = benchmark(_) + @benchmark + def _() -> None: + results.reset() + catalog.clear_indexes() + event_loop.run_until_complete(main(results, inventory, catalog)) logging.disable(logging.NOTSET) - if len(catalog.tests) * len(inventory) != len(manager.results): + if len(catalog.tests) * len(inventory) != len(results.results): # This could mean duplicates exist. # TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list seen = set() @@ -74,17 +91,17 @@ def _() -> ResultManager: for test in dupes: msg = f"Found duplicate in test catalog: {test}" logger.error(msg) - pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(manager.results)}", pytrace=False) + pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(results.results)}", pytrace=False) bench_info = ( "\n--- ANTA NRFU Benchmark Information ---\n" - f"Test results: {len(manager.results)}\n" - f"Success: {manager.get_total_results({AntaTestStatus.SUCCESS})}\n" - f"Failure: {manager.get_total_results({AntaTestStatus.FAILURE})}\n" - f"Skipped: {manager.get_total_results({AntaTestStatus.SKIPPED})}\n" - f"Error: {manager.get_total_results({AntaTestStatus.ERROR})}\n" - f"Unset: {manager.get_total_results({AntaTestStatus.UNSET})}\n" + f"Test results: {len(results.results)}\n" + f"Success: {results.get_total_results({AntaTestStatus.SUCCESS})}\n" + f"Failure: {results.get_total_results({AntaTestStatus.FAILURE})}\n" + f"Skipped: {results.get_total_results({AntaTestStatus.SKIPPED})}\n" + f"Error: {results.get_total_results({AntaTestStatus.ERROR})}\n" + f"Unset: {results.get_total_results({AntaTestStatus.UNSET})}\n" "---------------------------------------" ) logger.info(bench_info) - assert manager.get_total_results({AntaTestStatus.ERROR}) == 0 - assert manager.get_total_results({AntaTestStatus.UNSET}) == 0 + assert results.get_total_results({AntaTestStatus.ERROR}) == 0 + assert results.get_total_results({AntaTestStatus.UNSET}) == 0 diff --git a/tests/benchmark/test_reporter.py b/tests/benchmark/test_reporter.py new file mode 100644 index 000000000..ea74fb5da --- /dev/null +++ b/tests/benchmark/test_reporter.py @@ -0,0 +1,71 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Benchmark tests for anta.reporter.""" + +import json +import logging +from pathlib import Path + +import pytest + +from anta.reporter import ReportJinja, ReportTable +from anta.reporter.csv_reporter import ReportCsv +from anta.reporter.md_reporter import MDReportGenerator +from anta.result_manager import ResultManager + +logger = logging.getLogger(__name__) + +DATA_DIR: Path = Path(__file__).parents[1].resolve() / "data" + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_all(results: ResultManager) -> None: + """Benchmark ReportTable.report_all().""" + reporter = ReportTable() + reporter.report_all(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_devices(results: ResultManager) -> None: + """Benchmark ReportTable.report_summary_devices().""" + reporter = ReportTable() + reporter.report_summary_devices(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_tests(results: ResultManager) -> None: + """Benchmark ReportTable.report_summary_tests().""" + reporter = ReportTable() + reporter.report_summary_tests(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_json(results: ResultManager) -> None: + """Benchmark JSON report.""" + assert isinstance(results.json, str) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_jinja(results: ResultManager) -> None: + """Benchmark ReportJinja.""" + assert isinstance(ReportJinja(template_path=DATA_DIR / "template.j2").render(json.loads(results.json)), str) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_csv(results: ResultManager, tmp_path: Path) -> None: + """Benchmark ReportCsv.generate().""" + ReportCsv.generate(results=results, csv_filename=tmp_path / "report.csv") + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_markdown(results: ResultManager, tmp_path: Path) -> None: + """Benchmark MDReportGenerator.generate().""" + MDReportGenerator.generate(results=results, md_filename=tmp_path / "report.md") diff --git a/tests/benchmark/test_runner.py b/tests/benchmark/test_runner.py index b020a85d0..a8639af3e 100644 --- a/tests/benchmark/test_runner.py +++ b/tests/benchmark/test_runner.py @@ -5,19 +5,21 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any from anta.result_manager import ResultManager from anta.runner import get_coroutines, prepare_tests if TYPE_CHECKING: from collections import defaultdict + from collections.abc import Coroutine from pytest_codspeed import BenchmarkFixture from anta.catalog import AntaCatalog, AntaTestDefinition from anta.device import AntaDevice from anta.inventory import AntaInventory + from anta.result_manager.models import TestResult def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None: @@ -40,9 +42,13 @@ def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inven assert selected_tests is not None - coroutines = benchmark(lambda: get_coroutines(selected_tests=selected_tests, manager=ResultManager())) - for coros in coroutines: - coros.close() + def bench() -> list[Coroutine[Any, Any, TestResult]]: + coros = get_coroutines(selected_tests=selected_tests, manager=ResultManager()) + for c in coros: + c.close() + return coros + + coroutines = benchmark(bench) count = sum(len(tests) for tests in selected_tests.values()) assert count == len(coroutines) diff --git a/tests/units/anta_tests/routing/test_bgp.py b/tests/units/anta_tests/routing/test_bgp.py index c5b8cedb6..2ae5e8e1b 100644 --- a/tests/units/anta_tests/routing/test_bgp.py +++ b/tests/units/anta_tests/routing/test_bgp.py @@ -525,19 +525,16 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "peerAddress": "10.100.0.12", "state": "Idle", "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, - "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, { "peerAddress": "10.100.0.13", "state": "Idle", "neighborCapabilities": {"multiprotocolCaps": {"dps": {"advertised": True, "received": True, "enabled": True}}}, - "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, { "peerAddress": "10.100.0.14", - "state": "Idle", + "state": "Active", "neighborCapabilities": {"multiprotocolCaps": {"linkState": {"advertised": True, "received": True, "enabled": True}}}, - "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, ] }, @@ -545,9 +542,8 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "peerList": [ { "peerAddress": "10.100.0.12", - "state": "Idle", + "state": "Active", "neighborCapabilities": {"multiprotocolCaps": {"ipv4SrTe": {"advertised": True, "received": True, "enabled": True}}}, - "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, ] }, @@ -566,9 +562,9 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "result": "failure", "messages": [ "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Session state is not established - State: Idle", - "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - Session state is not established - State: Idle", + "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - Session state is not established - State: Active", "AFI: path-selection Peer: 10.100.0.13 - Session state is not established - State: Idle", - "AFI: link-state Peer: 10.100.0.14 - Session state is not established - State: Idle", + "AFI: link-state Peer: 10.100.0.14 - Session state is not established - State: Active", ], }, }, @@ -582,19 +578,19 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "peerList": [ { "peerAddress": "10.100.0.12", - "state": "Idle", + "state": "Established", "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": False, "received": False, "enabled": True}}}, "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, { "peerAddress": "10.100.0.13", - "state": "Idle", + "state": "Established", "neighborCapabilities": {"multiprotocolCaps": {"dps": {"advertised": True, "received": False, "enabled": False}}}, "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, { "peerAddress": "10.100.0.14", - "state": "Idle", + "state": "Established", "neighborCapabilities": {"multiprotocolCaps": {"linkState": {"advertised": False, "received": False, "enabled": False}}}, "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, @@ -604,7 +600,7 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "peerList": [ { "peerAddress": "10.100.0.12", - "state": "Idle", + "state": "Established", "neighborCapabilities": {"multiprotocolCaps": {"ipv4SrTe": {"advertised": False, "received": False, "enabled": False}}}, "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, @@ -624,13 +620,9 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "expected": { "result": "failure", "messages": [ - "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Session state is not established - State: Idle", "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - AFI/SAFI state is not negotiated - Advertised: False, Received: False, Enabled: True", - "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - Session state is not established - State: Idle", "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - AFI/SAFI state is not negotiated - Advertised: False, Received: False, Enabled: False", - "AFI: path-selection Peer: 10.100.0.13 - Session state is not established - State: Idle", "AFI: path-selection Peer: 10.100.0.13 - AFI/SAFI state is not negotiated - Advertised: True, Received: False, Enabled: False", - "AFI: link-state Peer: 10.100.0.14 - Session state is not established - State: Idle", "AFI: link-state Peer: 10.100.0.14 - AFI/SAFI state is not negotiated - Advertised: False, Received: False, Enabled: False", ], }, @@ -645,19 +637,19 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "peerList": [ { "peerAddress": "10.100.0.12", - "state": "Idle", + "state": "Established", "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 4, "inputQueueLength": 2}, }, { "peerAddress": "10.100.0.13", - "state": "Idle", + "state": "Established", "neighborCapabilities": {"multiprotocolCaps": {"dps": {"advertised": True, "received": True, "enabled": True}}}, "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 1, "inputQueueLength": 1}, }, { "peerAddress": "10.100.0.14", - "state": "Idle", + "state": "Established", "neighborCapabilities": {"multiprotocolCaps": {"linkState": {"advertised": True, "received": True, "enabled": True}}}, "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 2, "inputQueueLength": 3}, }, @@ -667,7 +659,7 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "peerList": [ { "peerAddress": "10.100.0.12", - "state": "Idle", + "state": "Established", "neighborCapabilities": {"multiprotocolCaps": {"ipv4SrTe": {"advertised": True, "received": True, "enabled": True}}}, "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 1, "inputQueueLength": 5}, }, @@ -687,13 +679,9 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "expected": { "result": "failure", "messages": [ - "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Session state is not established - State: Idle", "AFI: ipv4 SAFI: unicast VRF: default Peer: 10.100.0.12 - Session has non-empty message queues - InQ: 2, OutQ: 4", - "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - Session state is not established - State: Idle", "AFI: ipv4 SAFI: sr-te VRF: MGMT Peer: 10.100.0.12 - Session has non-empty message queues - InQ: 5, OutQ: 1", - "AFI: path-selection Peer: 10.100.0.13 - Session state is not established - State: Idle", "AFI: path-selection Peer: 10.100.0.13 - Session has non-empty message queues - InQ: 1, OutQ: 1", - "AFI: link-state Peer: 10.100.0.14 - Session state is not established - State: Idle", "AFI: link-state Peer: 10.100.0.14 - Session has non-empty message queues - InQ: 3, OutQ: 2", ], }, @@ -823,7 +811,6 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "peerAddress": "10.100.0.12", "state": "Idle", "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, - "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, } ] }, @@ -833,7 +820,6 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "peerAddress": "10.100.0.14", "state": "Idle", "neighborCapabilities": {"multiprotocolCaps": {"ipv4Unicast": {"advertised": True, "received": True, "enabled": True}}}, - "peerTcpInfo": {"state": "ESTABLISHED", "outputQueueLength": 0, "inputQueueLength": 0}, }, ] }, @@ -2829,8 +2815,8 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo "evpnRoutePaths": [ { "routeType": { - "active": True, - "valid": True, + "active": False, + "valid": False, }, }, ] @@ -3053,7 +3039,7 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo }, }, { - "name": "failure-multiple-routes-multiple-paths-not-active", + "name": "success-multiple-path-and-have-one-active/valid", "test": VerifyEVPNType2Route, "eos_data": [ { @@ -3098,10 +3084,7 @@ def test_check_bgp_neighbor_capability(input_dict: dict[str, bool], expected: bo ], "inputs": {"vxlan_endpoints": [{"address": "192.168.20.102", "vni": 10020}]}, "expected": { - "result": "failure", - "messages": [ - "The following EVPN Type-2 routes do not have at least one valid and active path: ['RD: 10.1.0.6:500 mac-ip 10020 aac1.ab4e.bec2 192.168.20.102']" - ], + "result": "success", }, }, { diff --git a/tests/units/anta_tests/test_avt.py b/tests/units/anta_tests/test_avt.py index 80fbce036..d9cdaa1fa 100644 --- a/tests/units/anta_tests/test_avt.py +++ b/tests/units/anta_tests/test_avt.py @@ -361,48 +361,63 @@ "avts": { "DEFAULT-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - } - } - } - } - } - }, - { - "vrfs": { - "data": { - "avts": { - "DATA-AVT-POLICY-CONTROL-PLANE": { - "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:8": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "multihop:1": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "multihop:3": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, } } - } - } - } - }, - { - "vrfs": { + }, + }, "data": { "avts": { "DATA-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:8": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.1", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.1", + }, + "direct:8": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, + "multihop:1": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, + "multihop:3": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, } - } + }, } - } + }, } }, ], @@ -420,36 +435,85 @@ "test": VerifyAVTSpecificPath, "eos_data": [ {"vrfs": {}}, + ], + "inputs": { + "avt_paths": [ + {"avt_name": "MGMT-AVT-POLICY-DEFAULT", "vrf": "default", "destination": "10.101.255.2", "next_hop": "10.101.255.1", "path_type": "multihop"}, + {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.2", "path_type": "multihop"}, + ] + }, + "expected": { + "result": "failure", + "messages": ["AVT MGMT-AVT-POLICY-DEFAULT VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.1) - No AVT path configured"], + }, + }, + { + "name": "failure-path_type_check_true", + "test": VerifyAVTSpecificPath, + "eos_data": [ { "vrfs": { + "default": { + "avts": { + "DEFAULT-AVT-POLICY-CONTROL-PLANE": { + "avtPaths": { + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + } + } + }, + }, "data": { "avts": { "DATA-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.3", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.3", + }, } - } + }, } - } + }, } }, ], "inputs": { "avt_paths": [ - {"avt_name": "MGMT-AVT-POLICY-DEFAULT", "vrf": "default", "destination": "10.101.255.2", "next_hop": "10.101.255.1", "path_type": "multihop"}, - {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.2", "path_type": "multihop"}, + { + "avt_name": "DEFAULT-AVT-POLICY-CONTROL-PLANE", + "vrf": "default", + "destination": "10.101.255.2", + "next_hop": "10.101.255.11", + "path_type": "multihop", + }, + {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.21", "path_type": "direct"}, ] }, "expected": { "result": "failure", - "messages": ["AVT configuration for peer '10.101.255.2' under topology 'MGMT-AVT-POLICY-DEFAULT' in VRF 'default' is not found."], + "messages": [ + "AVT DEFAULT-AVT-POLICY-CONTROL-PLANE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.11) Path Type: multihop - Path not found", + "AVT DATA-AVT-POLICY-CONTROL-PLANE VRF: data (Destination: 10.101.255.1, Next-hop: 10.101.255.21) Path Type: direct - Path not found", + ], }, }, { - "name": "failure-no-path-with-correct-next-hop", + "name": "failure-path_type_check_false", "test": VerifyAVTSpecificPath, "eos_data": [ { @@ -458,30 +522,38 @@ "avts": { "DEFAULT-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, } } - } - } - } - }, - { - "vrfs": { + }, + }, "data": { "avts": { "DATA-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.3", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.3", + }, } - } + }, } - } + }, } }, ], @@ -492,18 +564,15 @@ "vrf": "default", "destination": "10.101.255.2", "next_hop": "10.101.255.11", - "path_type": "multihop", }, - {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.21", "path_type": "direct"}, + {"avt_name": "DATA-AVT-POLICY-CONTROL-PLANE", "vrf": "data", "destination": "10.101.255.1", "next_hop": "10.101.255.21"}, ] }, "expected": { "result": "failure", "messages": [ - "No 'multihop' path found with next-hop address '10.101.255.11' for AVT peer '10.101.255.2' under " - "topology 'DEFAULT-AVT-POLICY-CONTROL-PLANE' in VRF 'default'.", - "No 'direct' path found with next-hop address '10.101.255.21' for AVT peer '10.101.255.1' under " - "topology 'DATA-AVT-POLICY-CONTROL-PLANE' in VRF 'data'.", + "AVT DEFAULT-AVT-POLICY-CONTROL-PLANE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.11) - Path not found", + "AVT DATA-AVT-POLICY-CONTROL-PLANE VRF: data (Destination: 10.101.255.1, Next-hop: 10.101.255.21) - Path not found", ], }, }, @@ -517,30 +586,48 @@ "avts": { "DEFAULT-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "direct:9": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:1": {"flags": {"directPath": True, "valid": False, "active": False}, "nexthopAddr": "10.101.255.1"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": False}, "nexthopAddr": "10.101.255.1"}, + "multihop:3": { + "flags": {"directPath": False, "valid": False, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.2", + }, } } - } - } - } - }, - { - "vrfs": { + }, + }, "data": { "avts": { "DATA-AVT-POLICY-CONTROL-PLANE": { "avtPaths": { - "direct:10": {"flags": {"directPath": True, "valid": True, "active": True}, "nexthopAddr": "10.101.255.1"}, - "direct:9": {"flags": {"directPath": True, "valid": False, "active": True}, "nexthopAddr": "10.101.255.1"}, - "multihop:1": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, - "multihop:3": {"flags": {"directPath": False, "valid": True, "active": True}, "nexthopAddr": "10.101.255.2"}, + "direct:10": { + "flags": {"directPath": True, "valid": False, "active": True}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.1", + }, + "direct:9": { + "flags": {"directPath": True, "valid": True, "active": False}, + "nexthopAddr": "10.101.255.1", + "destination": "10.101.255.1", + }, + "direct:8": { + "flags": {"directPath": True, "valid": False, "active": False}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, + "multihop:1": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, + "multihop:3": { + "flags": {"directPath": False, "valid": True, "active": True}, + "nexthopAddr": "10.101.255.2", + "destination": "10.101.255.1", + }, } - } + }, } - } + }, } }, ], @@ -559,8 +646,12 @@ "expected": { "result": "failure", "messages": [ - "AVT path 'multihop:3' for topology 'DEFAULT-AVT-POLICY-CONTROL-PLANE' in VRF 'default' is inactive.", - "AVT path 'direct:9' for topology 'DATA-AVT-POLICY-CONTROL-PLANE' in VRF 'data' is invalid.", + "AVT DEFAULT-AVT-POLICY-CONTROL-PLANE VRF: default (Destination: 10.101.255.2, Next-hop: 10.101.255.1) - " + "Incorrect path multihop:3 - Valid: False, Active: True", + "AVT DATA-AVT-POLICY-CONTROL-PLANE VRF: data (Destination: 10.101.255.1, Next-hop: 10.101.255.1) - " + "Incorrect path direct:10 - Valid: False, Active: True", + "AVT DATA-AVT-POLICY-CONTROL-PLANE VRF: data (Destination: 10.101.255.1, Next-hop: 10.101.255.1) - " + "Incorrect path direct:9 - Valid: True, Active: False", ], }, }, diff --git a/tests/units/anta_tests/test_cvx.py b/tests/units/anta_tests/test_cvx.py index 0d4cec4ea..46d83b02a 100644 --- a/tests/units/anta_tests/test_cvx.py +++ b/tests/units/anta_tests/test_cvx.py @@ -7,7 +7,7 @@ from typing import Any -from anta.tests.cvx import VerifyManagementCVX, VerifyMcsClientMounts +from anta.tests.cvx import VerifyActiveCVXConnections, VerifyCVXClusterStatus, VerifyManagementCVX, VerifyMcsClientMounts, VerifyMcsServerMounts from tests.units.anta_tests import test DATA: list[dict[str, Any]] = [ @@ -140,10 +140,386 @@ "expected": {"result": "success"}, }, { - "name": "failure", + "name": "failure - no enabled state", "test": VerifyManagementCVX, "eos_data": [{"clusterStatus": {}}], "inputs": {"enabled": False}, "expected": {"result": "failure", "messages": ["Management CVX status is not valid: None"]}, }, + { + "name": "failure - no clusterStatus", + "test": VerifyManagementCVX, + "eos_data": [{}], + "inputs": {"enabled": False}, + "expected": {"result": "failure", "messages": ["Management CVX status is not valid: None"]}, + }, + { + "name": "success", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "Mcs", + "mountStates": [ + { + "pathStates": [ + {"path": "mcs/v1/apiCfgRedStatus", "type": "Mcs::ApiConfigRedundancyStatus", "state": "mountStateMountComplete"}, + {"path": "mcs/v1/activeflows", "type": "Mcs::ActiveFlows", "state": "mountStateMountComplete"}, + {"path": "mcs/switch/status", "type": "Mcs::Client::Status", "state": "mountStateMountComplete"}, + ] + } + ], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": {"result": "success"}, + }, + { + "name": "failure-no-mounts", + "test": VerifyMcsServerMounts, + "eos_data": [{"connections": [{"hostname": "media-leaf-1", "mounts": []}]}], + "inputs": {"connections_count": 1}, + "expected": { + "result": "failure", + "messages": ["No mount status for media-leaf-1", "Incorrect CVX successful connections count. Expected: 1, Actual : 0"], + }, + }, + { + "name": "failure-unexpected-number-paths", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "Mcs", + "mountStates": [ + { + "pathStates": [ + {"path": "mcs/v1/apiCfgRedStatus", "type": "Mcs::ApiStatus", "state": "mountStateMountComplete"}, + {"path": "mcs/v1/activeflows", "type": "Mcs::ActiveFlows", "state": "mountStateMountComplete"}, + ] + } + ], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": { + "result": "failure", + "messages": [ + "Incorrect number of mount path states for media-leaf-1 - Expected: 3, Actual: 2", + "Unexpected MCS path type for media-leaf-1: 'Mcs::ApiStatus'.", + ], + }, + }, + { + "name": "failure-unexpected-path-type", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "Mcs", + "mountStates": [ + { + "pathStates": [ + {"path": "mcs/v1/apiCfgRedStatus", "type": "Mcs::ApiStatus", "state": "mountStateMountComplete"}, + {"path": "mcs/v1/activeflows", "type": "Mcs::ActiveFlows", "state": "mountStateMountComplete"}, + {"path": "mcs/switch/status", "type": "Mcs::Client::Status", "state": "mountStateMountComplete"}, + ] + } + ], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": {"result": "failure", "messages": ["Unexpected MCS path type for media-leaf-1: 'Mcs::ApiStatus'"]}, + }, + { + "name": "failure-invalid-mount-state", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "Mcs", + "mountStates": [ + { + "pathStates": [ + {"path": "mcs/v1/apiCfgRedStatus", "type": "Mcs::ApiConfigRedundancyStatus", "state": "mountStateMountFailed"}, + {"path": "mcs/v1/activeflows", "type": "Mcs::ActiveFlows", "state": "mountStateMountComplete"}, + {"path": "mcs/switch/status", "type": "Mcs::Client::Status", "state": "mountStateMountComplete"}, + ] + } + ], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": { + "result": "failure", + "messages": ["MCS server mount state for path 'Mcs::ApiConfigRedundancyStatus' is not valid is for media-leaf-1: 'mountStateMountFailed'"], + }, + }, + { + "name": "failure-no-mcs-mount", + "test": VerifyMcsServerMounts, + "eos_data": [ + { + "connections": [ + { + "hostname": "media-leaf-1", + "mounts": [ + { + "service": "blah-blah", + "mountStates": [{"pathStates": [{"path": "blah-blah-path", "type": "blah-blah-type", "state": "blah-blah-state"}]}], + } + ], + } + ] + } + ], + "inputs": {"connections_count": 1}, + "expected": {"result": "failure", "messages": ["MCS mount state not detected", "Incorrect CVX successful connections count. Expected: 1, Actual : 0"]}, + }, + { + "name": "failure-connections", + "test": VerifyMcsServerMounts, + "eos_data": [{}], + "inputs": {"connections_count": 1}, + "expected": {"result": "failure", "messages": ["CVX connections are not available."]}, + }, + { + "name": "success", + "test": VerifyActiveCVXConnections, + "eos_data": [ + { + "connections": [ + { + "switchId": "fc:bd:67:c3:16:55", + "hostname": "lyv563", + "oobConnectionActive": True, + }, + { + "switchId": "00:1c:73:3c:e3:9e", + "hostname": "tg264", + "oobConnectionActive": True, + }, + ] + } + ], + "inputs": {"connections_count": 2}, + "expected": {"result": "success"}, + }, + { + "name": "failure", + "test": VerifyActiveCVXConnections, + "eos_data": [ + { + "connections": [ + { + "switchId": "fc:bd:67:c3:16:55", + "hostname": "lyv563", + "oobConnectionActive": False, + }, + { + "switchId": "00:1c:73:3c:e3:9e", + "hostname": "tg264", + "oobConnectionActive": True, + }, + ] + } + ], + "inputs": {"connections_count": 2}, + "expected": {"result": "failure", "messages": ["CVX active connections count. Expected: 2, Actual : 1"]}, + }, + { + "name": "failure-no-connections", + "test": VerifyActiveCVXConnections, + "eos_data": [{}], + "inputs": {"connections_count": 2}, + "expected": {"result": "failure", "messages": ["CVX connections are not available"]}, + }, + { + "name": "success-all", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": { + "cvx-red-2": {"peerName": "cvx-red-2", "registrationState": "Registration complete"}, + "cvx-red-3": {"peerName": "cvx-red-3", "registrationState": "Registration complete"}, + }, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "success"}, + }, + { + "name": "failure-invalid-role", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Standby", + "peerStatus": { + "cvx-red-2": {"peerName": "cvx-red-2", "registrationState": "Registration complete"}, + "cvx-red-3": {"peerName": "cvx-red-3", "registrationState": "Registration complete"}, + }, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "failure", "messages": ["CVX Role is not valid: Standby"]}, + }, + { + "name": "failure-cvx-enabled", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": False, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": {}, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [], + }, + "expected": {"result": "failure", "messages": ["CVX Server status is not enabled"]}, + }, + { + "name": "failure-cluster-enabled", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": False, + "clusterStatus": {}, + } + ], + "inputs": { + "role": "Master", + "peer_status": [], + }, + "expected": {"result": "failure", "messages": ["CVX Server is not a cluster"]}, + }, + { + "name": "failure-missing-peers", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": { + "cvx-red-2": {"peerName": "cvx-red-2", "registrationState": "Registration complete"}, + }, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "failure", "messages": ["Unexpected number of peers 1 vs 2", "cvx-red-3 is not present"]}, + }, + { + "name": "failure-invalid-peers", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": {}, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "failure", "messages": ["Unexpected number of peers 0 vs 2", "cvx-red-2 is not present", "cvx-red-3 is not present"]}, + }, + { + "name": "failure-registration-error", + "test": VerifyCVXClusterStatus, + "eos_data": [ + { + "enabled": True, + "clusterMode": True, + "clusterStatus": { + "role": "Master", + "peerStatus": { + "cvx-red-2": {"peerName": "cvx-red-2", "registrationState": "Registration error"}, + "cvx-red-3": {"peerName": "cvx-red-3", "registrationState": "Registration complete"}, + }, + }, + } + ], + "inputs": { + "role": "Master", + "peer_status": [ + {"peer_name": "cvx-red-2", "registrationState": "Registration complete"}, + {"peer_name": "cvx-red-3", "registrationState": "Registration complete"}, + ], + }, + "expected": {"result": "failure", "messages": ["cvx-red-2 registration state is not complete: Registration error"]}, + }, ] diff --git a/tests/units/anta_tests/test_interfaces.py b/tests/units/anta_tests/test_interfaces.py index ac0530881..271683b0c 100644 --- a/tests/units/anta_tests/test_interfaces.py +++ b/tests/units/anta_tests/test_interfaces.py @@ -2510,6 +2510,43 @@ "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Port-Channel5"}]}, "expected": {"result": "success"}, }, + { + "name": "success-short-timeout", + "test": VerifyLACPInterfacesStatus, + "eos_data": [ + { + "portChannels": { + "Port-Channel5": { + "interfaces": { + "Ethernet5": { + "actorPortStatus": "bundled", + "partnerPortState": { + "activity": True, + "timeout": True, + "aggregation": True, + "synchronization": True, + "collecting": True, + "distributing": True, + }, + "actorPortState": { + "activity": True, + "timeout": True, + "aggregation": True, + "synchronization": True, + "collecting": True, + "distributing": True, + }, + } + } + } + }, + "interface": "Ethernet5", + "orphanPorts": {}, + } + ], + "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Port-Channel5", "lacp_rate_fast": True}]}, + "expected": {"result": "success"}, + }, { "name": "failure-not-bundled", "test": VerifyLACPInterfacesStatus, @@ -2531,7 +2568,7 @@ "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Po5"}]}, "expected": { "result": "failure", - "messages": ["For Interface Ethernet5:\nExpected `bundled` as the local port status, but found `No Aggregate` instead.\n"], + "messages": ["Interface: Ethernet5 Port-Channel: Port-Channel5 - Not bundled - Port Status: No Aggregate"], }, }, { @@ -2545,7 +2582,7 @@ "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "Po 5"}]}, "expected": { "result": "failure", - "messages": ["Interface 'Ethernet5' is not configured to be a member of LACP 'Port-Channel5'."], + "messages": ["Interface: Ethernet5 Port-Channel: Port-Channel5 - Not configured"], }, }, { @@ -2586,13 +2623,55 @@ "expected": { "result": "failure", "messages": [ - "For Interface Ethernet5:\n" - "Actor port details:\nExpected `True` as the activity, but found `False` instead." - "\nExpected `True` as the aggregation, but found `False` instead." - "\nExpected `True` as the synchronization, but found `False` instead." - "\nPartner port details:\nExpected `True` as the activity, but found `False` instead.\n" - "Expected `True` as the aggregation, but found `False` instead.\n" - "Expected `True` as the synchronization, but found `False` instead.\n" + "Interface: Ethernet5 Port-Channel: Port-Channel5 - Actor port details mismatch - Activity: False, Aggregation: False, " + "Synchronization: False, Collecting: True, Distributing: True, Timeout: False", + "Interface: Ethernet5 Port-Channel: Port-Channel5 - Partner port details mismatch - Activity: False, Aggregation: False, " + "Synchronization: False, Collecting: True, Distributing: True, Timeout: False", + ], + }, + }, + { + "name": "failure-short-timeout", + "test": VerifyLACPInterfacesStatus, + "eos_data": [ + { + "portChannels": { + "Port-Channel5": { + "interfaces": { + "Ethernet5": { + "actorPortStatus": "bundled", + "partnerPortState": { + "activity": True, + "timeout": False, + "aggregation": True, + "synchronization": True, + "collecting": True, + "distributing": True, + }, + "actorPortState": { + "activity": True, + "timeout": False, + "aggregation": True, + "synchronization": True, + "collecting": True, + "distributing": True, + }, + } + } + } + }, + "interface": "Ethernet5", + "orphanPorts": {}, + } + ], + "inputs": {"interfaces": [{"name": "Ethernet5", "portchannel": "port-channel 5", "lacp_rate_fast": True}]}, + "expected": { + "result": "failure", + "messages": [ + "Interface: Ethernet5 Port-Channel: Port-Channel5 - Actor port details mismatch - Activity: True, Aggregation: True, " + "Synchronization: True, Collecting: True, Distributing: True, Timeout: False", + "Interface: Ethernet5 Port-Channel: Port-Channel5 - Partner port details mismatch - Activity: True, Aggregation: True, " + "Synchronization: True, Collecting: True, Distributing: True, Timeout: False", ], }, }, diff --git a/tests/units/anta_tests/test_security.py b/tests/units/anta_tests/test_security.py index 0d4a478b0..472eb7e18 100644 --- a/tests/units/anta_tests/test_security.py +++ b/tests/units/anta_tests/test_security.py @@ -1079,7 +1079,7 @@ }, ] }, - "expected": {"result": "failure", "messages": ["No IPv4 security connection configured for peer `10.255.0.1`."]}, + "expected": {"result": "failure", "messages": ["Peer: 10.255.0.1 VRF: default - Not configured"]}, }, { "name": "failure-not-established", @@ -1127,14 +1127,10 @@ "expected": { "result": "failure", "messages": [ - "Expected state of IPv4 security connection `source:172.18.3.2 destination:172.18.2.2 vrf:default` for peer `10.255.0.1` is `Established` " - "but found `Idle` instead.", - "Expected state of IPv4 security connection `source:100.64.2.2 destination:100.64.1.2 vrf:default` for peer `10.255.0.1` is `Established` " - "but found `Idle` instead.", - "Expected state of IPv4 security connection `source:100.64.2.2 destination:100.64.1.2 vrf:MGMT` for peer `10.255.0.2` is `Established` " - "but found `Idle` instead.", - "Expected state of IPv4 security connection `source:172.18.2.2 destination:172.18.1.2 vrf:MGMT` for peer `10.255.0.2` is `Established` " - "but found `Idle` instead.", + "Peer: 10.255.0.1 VRF: default Source: 172.18.3.2 Destination: 172.18.2.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.1 VRF: default Source: 100.64.2.2 Destination: 100.64.1.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.2 VRF: MGMT Source: 100.64.2.2 Destination: 100.64.1.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.2 VRF: MGMT Source: 172.18.2.2 Destination: 172.18.1.2 - Connection down - Expected: Established, Actual: Idle", ], }, }, @@ -1194,12 +1190,10 @@ "expected": { "result": "failure", "messages": [ - "Expected state of IPv4 security connection `source:172.18.3.2 destination:172.18.2.2 vrf:default` for peer `10.255.0.1` is `Established` " - "but found `Idle` instead.", - "Expected state of IPv4 security connection `source:100.64.3.2 destination:100.64.2.2 vrf:default` for peer `10.255.0.1` is `Established` " - "but found `Idle` instead.", - "IPv4 security connection `source:100.64.4.2 destination:100.64.1.2 vrf:default` for peer `10.255.0.2` is not found.", - "IPv4 security connection `source:172.18.4.2 destination:172.18.1.2 vrf:default` for peer `10.255.0.2` is not found.", + "Peer: 10.255.0.1 VRF: default Source: 172.18.3.2 Destination: 172.18.2.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.1 VRF: default Source: 100.64.3.2 Destination: 100.64.2.2 - Connection down - Expected: Established, Actual: Idle", + "Peer: 10.255.0.2 VRF: default Source: 100.64.4.2 Destination: 100.64.1.2 - Connection not found.", + "Peer: 10.255.0.2 VRF: default Source: 172.18.4.2 Destination: 172.18.1.2 - Connection not found.", ], }, }, diff --git a/tests/units/anta_tests/test_stun.py b/tests/units/anta_tests/test_stun.py index 005ae35f8..23834831a 100644 --- a/tests/units/anta_tests/test_stun.py +++ b/tests/units/anta_tests/test_stun.py @@ -7,13 +7,13 @@ from typing import Any -from anta.tests.stun import VerifyStunClient, VerifyStunServer +from anta.tests.stun import VerifyStunClientTranslation, VerifyStunServer from tests.units.anta_tests import test DATA: list[dict[str, Any]] = [ { "name": "success", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ { "bindings": { @@ -60,7 +60,7 @@ }, { "name": "failure-incorrect-public-ip", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ { "bindings": { @@ -88,14 +88,14 @@ "expected": { "result": "failure", "messages": [ - "For STUN source `100.64.3.2:4500`:\nExpected `192.164.3.2` as the public ip, but found `192.64.3.2` instead.", - "For STUN source `172.18.3.2:4500`:\nExpected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.", + "Client 100.64.3.2 Port: 4500 - Incorrect public-facing address - Expected: 192.164.3.2 Actual: 192.64.3.2", + "Client 172.18.3.2 Port: 4500 - Incorrect public-facing address - Expected: 192.118.3.2 Actual: 192.18.3.2", ], }, }, { "name": "failure-no-client", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ {"bindings": {}}, {"bindings": {}}, @@ -108,12 +108,12 @@ }, "expected": { "result": "failure", - "messages": ["STUN client transaction for source `100.64.3.2:4500` is not found.", "STUN client transaction for source `172.18.3.2:4500` is not found."], + "messages": ["Client 100.64.3.2 Port: 4500 - STUN client translation not found.", "Client 172.18.3.2 Port: 4500 - STUN client translation not found."], }, }, { "name": "failure-incorrect-public-port", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ {"bindings": {}}, { @@ -134,16 +134,15 @@ "expected": { "result": "failure", "messages": [ - "STUN client transaction for source `100.64.3.2:4500` is not found.", - "For STUN source `172.18.3.2:4500`:\n" - "Expected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.\n" - "Expected `6006` as the public port, but found `4800` instead.", + "Client 100.64.3.2 Port: 4500 - STUN client translation not found.", + "Client 172.18.3.2 Port: 4500 - Incorrect public-facing address - Expected: 192.118.3.2 Actual: 192.18.3.2", + "Client 172.18.3.2 Port: 4500 - Incorrect public-facing port - Expected: 6006 Actual: 4800", ], }, }, { "name": "failure-all-type", - "test": VerifyStunClient, + "test": VerifyStunClientTranslation, "eos_data": [ {"bindings": {}}, { @@ -164,12 +163,9 @@ "expected": { "result": "failure", "messages": [ - "STUN client transaction for source `100.64.3.2:4500` is not found.", - "For STUN source `172.18.4.2:4800`:\n" - "Expected `172.18.4.2` as the source ip, but found `172.18.3.2` instead.\n" - "Expected `4800` as the source port, but found `4500` instead.\n" - "Expected `192.118.3.2` as the public ip, but found `192.18.3.2` instead.\n" - "Expected `6006` as the public port, but found `4800` instead.", + "Client 100.64.3.2 Port: 4500 - STUN client translation not found.", + "Client 172.18.4.2 Port: 4800 - Incorrect public-facing address - Expected: 192.118.3.2 Actual: 192.18.3.2", + "Client 172.18.4.2 Port: 4800 - Incorrect public-facing port - Expected: 6006 Actual: 4800", ], }, }, diff --git a/tests/units/cli/get/test_commands.py b/tests/units/cli/get/test_commands.py index 6a842bf04..8edfa73b5 100644 --- a/tests/units/cli/get/test_commands.py +++ b/tests/units/cli/get/test_commands.py @@ -383,7 +383,7 @@ def test_from_ansible_overwrite( None, False, True, - "There are 2 tests available in 'anta.tests.stun'", + "There are 3 tests available in 'anta.tests.stun'", ExitCode.OK, id="Get multiple test count", ), diff --git a/tests/units/cli/nrfu/test_commands.py b/tests/units/cli/nrfu/test_commands.py index 817ab7830..372c86a8f 100644 --- a/tests/units/cli/nrfu/test_commands.py +++ b/tests/units/cli/nrfu/test_commands.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: from click.testing import CliRunner -DATA_DIR: Path = Path(__file__).parent.parent.parent.parent.resolve() / "data" +DATA_DIR: Path = Path(__file__).parents[3].resolve() / "data" def test_anta_nrfu_table_help(click_runner: CliRunner) -> None: @@ -79,7 +79,7 @@ def test_anta_nrfu_text(click_runner: CliRunner) -> None: def test_anta_nrfu_text_multiple_failures(click_runner: CliRunner) -> None: """Test anta nrfu text with multiple failures, catalog is given via env.""" result = click_runner.invoke(anta, ["nrfu", "text"], env={"ANTA_CATALOG": str(DATA_DIR / "test_catalog_double_failure.yml")}) - assert result.exit_code == ExitCode.OK + assert result.exit_code == ExitCode.TESTS_FAILED assert ( """spine1 :: VerifyInterfacesSpeed :: FAILURE Interface `Ethernet2` is not found. diff --git a/tests/units/input_models/test_interfaces.py b/tests/units/input_models/test_interfaces.py new file mode 100644 index 000000000..87d742d53 --- /dev/null +++ b/tests/units/input_models/test_interfaces.py @@ -0,0 +1,33 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Tests for anta.input_models.interfaces.py.""" + +# pylint: disable=C0302 +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from anta.input_models.interfaces import InterfaceState + +if TYPE_CHECKING: + from anta.custom_types import Interface, PortChannelInterface + + +class TestInterfaceState: + """Test anta.input_models.interfaces.InterfaceState.""" + + # pylint: disable=too-few-public-methods + + @pytest.mark.parametrize( + ("name", "portchannel", "expected"), + [ + pytest.param("Ethernet1", "Port-Channel42", "Interface: Ethernet1 Port-Channel: Port-Channel42", id="with port-channel"), + pytest.param("Ethernet1", None, "Interface: Ethernet1", id="no port-channel"), + ], + ) + def test_valid__str__(self, name: Interface, portchannel: PortChannelInterface | None, expected: str) -> None: + """Test InterfaceState __str__.""" + assert str(InterfaceState(name=name, portchannel=portchannel)) == expected diff --git a/tests/units/reporter/test__init__.py b/tests/units/reporter/test__init__.py index 6f35ff681..aef442190 100644 --- a/tests/units/reporter/test__init__.py +++ b/tests/units/reporter/test__init__.py @@ -190,5 +190,5 @@ class TestReportJinja: def test_fail__init__file_not_found(self) -> None: """Test __init__ failure if file is not found.""" - with pytest.raises(FileNotFoundError, match="template file is not found: /gnu/terry/pratchett"): + with pytest.raises(FileNotFoundError, match=r"template file is not found: [/|\\]gnu[/|\\]terry[/|\\]pratchett"): ReportJinja(Path("/gnu/terry/pratchett")) diff --git a/tests/units/reporter/test_csv.py b/tests/units/reporter/test_csv.py index 1d59daef5..d88098e13 100644 --- a/tests/units/reporter/test_csv.py +++ b/tests/units/reporter/test_csv.py @@ -8,6 +8,7 @@ import csv import pathlib from typing import Any, Callable +from unittest.mock import patch import pytest @@ -49,8 +50,8 @@ def test_report_csv_generate( # Generate the CSV report ReportCsv.generate(result_manager, csv_filename) - # Read the generated CSV file - with pathlib.Path.open(csv_filename, encoding="utf-8") as csvfile: + # Read the generated CSV file - newline required on Windows.. + with pathlib.Path.open(csv_filename, encoding="utf-8", newline="") as csvfile: reader = csv.reader(csvfile, delimiter=",") rows = list(reader) @@ -82,11 +83,9 @@ def test_report_csv_generate_os_error( max_test_entries = 10 result_manager = result_manager_factory(max_test_entries) - # Create a temporary CSV file path and make tmp_path read_only - tmp_path.chmod(0o400) csv_filename = tmp_path / "read_only.csv" - with pytest.raises(OSError, match="Permission denied"): + with patch("pathlib.Path.open", side_effect=OSError("Any OSError")), pytest.raises(OSError, match="Any OSError"): # Generate the CSV report ReportCsv.generate(result_manager, csv_filename) diff --git a/tests/units/result_manager/test__init__.py b/tests/units/result_manager/test__init__.py index fa39e56c4..49d4fc567 100644 --- a/tests/units/result_manager/test__init__.py +++ b/tests/units/result_manager/test__init__.py @@ -6,6 +6,7 @@ from __future__ import annotations import json +import logging import re from contextlib import AbstractContextManager, nullcontext from typing import TYPE_CHECKING, Callable @@ -377,3 +378,103 @@ def test_get_devices(self, test_result_factory: Callable[[], TestResult], list_r assert len(result_manager.get_devices()) == 2 assert all(t in result_manager.get_devices() for t in ["Device1", "Device2"]) + + def test_stats_computation_methods(self, test_result_factory: Callable[[], TestResult], caplog: pytest.LogCaptureFixture) -> None: + """Test ResultManager internal stats computation methods.""" + result_manager = ResultManager() + + # Initially stats should be unsynced + assert result_manager._stats_in_sync is False + + # Test _reset_stats + result_manager._reset_stats() + assert result_manager._stats_in_sync is False + assert len(result_manager._device_stats) == 0 + assert len(result_manager._category_stats) == 0 + assert len(result_manager._test_stats) == 0 + + # Add some test results + test1 = test_result_factory() + test1.name = "device1" + test1.result = AntaTestStatus.SUCCESS + test1.categories = ["system"] + test1.test = "test1" + + test2 = test_result_factory() + test2.name = "device2" + test2.result = AntaTestStatus.FAILURE + test2.categories = ["interfaces"] + test2.test = "test2" + + result_manager.add(test1) + result_manager.add(test2) + + # Stats should still be unsynced after adding results + assert result_manager._stats_in_sync is False + + # Test _compute_stats directly + with caplog.at_level(logging.INFO): + result_manager._compute_stats() + assert "Computing statistics for all results" in caplog.text + assert result_manager._stats_in_sync is True + + # Verify stats content + assert len(result_manager._device_stats) == 2 + assert len(result_manager._category_stats) == 2 + assert len(result_manager._test_stats) == 2 + assert result_manager._device_stats["device1"].tests_success_count == 1 + assert result_manager._device_stats["device2"].tests_failure_count == 1 + assert result_manager._category_stats["system"].tests_success_count == 1 + assert result_manager._category_stats["interfaces"].tests_failure_count == 1 + assert result_manager._test_stats["test1"].devices_success_count == 1 + assert result_manager._test_stats["test2"].devices_failure_count == 1 + + def test_stats_property_computation(self, test_result_factory: Callable[[], TestResult], caplog: pytest.LogCaptureFixture) -> None: + """Test that stats are computed only once when accessed via properties.""" + result_manager = ResultManager() + + # Add some test results + test1 = test_result_factory() + test1.name = "device1" + test1.result = AntaTestStatus.SUCCESS + test1.categories = ["system"] + result_manager.add(test1) + + test2 = test_result_factory() + test2.name = "device2" + test2.result = AntaTestStatus.FAILURE + test2.categories = ["interfaces"] + result_manager.add(test2) + + # Stats should be unsynced after adding results + assert result_manager._stats_in_sync is False + assert "Computing statistics" not in caplog.text + + # Access device_stats property - should trigger computation + with caplog.at_level(logging.INFO): + _ = result_manager.device_stats + assert "Computing statistics for all results" in caplog.text + assert result_manager._stats_in_sync is True + + # Clear the log + caplog.clear() + + # Access other stats properties - should not trigger computation again + with caplog.at_level(logging.INFO): + _ = result_manager.category_stats + _ = result_manager.test_stats + _ = result_manager.sorted_category_stats + assert "Computing statistics" not in caplog.text + + # Add another result - should mark stats as unsynced + test3 = test_result_factory() + test3.name = "device3" + test3.result = "error" + result_manager.add(test3) + assert result_manager._stats_in_sync is False + + # Access stats again - should trigger recomputation + with caplog.at_level(logging.INFO): + _ = result_manager.device_stats + assert "Computing statistics for all results" in caplog.text + assert result_manager._stats_in_sync is True diff --git a/tests/units/test_decorators.py b/tests/units/test_decorators.py new file mode 100644 index 000000000..c267df1d1 --- /dev/null +++ b/tests/units/test_decorators.py @@ -0,0 +1,77 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""test anta.decorators.py.""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, ClassVar + +import pytest + +from anta.decorators import deprecated_test_class, skip_on_platforms +from anta.models import AntaCommand, AntaTemplate, AntaTest + +if TYPE_CHECKING: + from anta.device import AntaDevice + + +class ExampleTest(AntaTest): + """ANTA test that always succeed.""" + + categories: ClassVar[list[str]] = [] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [] + + @AntaTest.anta_test + def test(self) -> None: + """Test function.""" + self.result.is_success() + + +@pytest.mark.parametrize( + "new_tests", + [ + pytest.param(None, id="No new_tests"), + pytest.param(["NewExampleTest"], id="one new_tests"), + pytest.param(["NewExampleTest1", "NewExampleTest2"], id="multiple new_tests"), + ], +) +def test_deprecated_test_class(caplog: pytest.LogCaptureFixture, device: AntaDevice, new_tests: list[str] | None) -> None: + """Test deprecated_test_class decorator.""" + caplog.set_level(logging.INFO) + + decorated_test_class = deprecated_test_class(new_tests=new_tests)(ExampleTest) + + # Initialize the decorated test + decorated_test_class(device) + + if new_tests is None: + assert "ExampleTest test is deprecated." in caplog.messages + else: + assert f"ExampleTest test is deprecated. Consider using the following new tests: {', '.join(new_tests)}." in caplog.messages + + +@pytest.mark.parametrize( + ("platforms", "device_platform", "expected_result"), + [ + pytest.param([], "cEOS-lab", "success", id="empty platforms"), + pytest.param(["cEOS-lab"], "cEOS-lab", "skipped", id="skip on one platform - match"), + pytest.param(["cEOS-lab"], "vEOS", "success", id="skip on one platform - no match"), + pytest.param(["cEOS-lab", "vEOS"], "cEOS-lab", "skipped", id="skip on multiple platforms - match"), + ], +) +async def test_skip_on_platforms(device: AntaDevice, platforms: list[str], device_platform: str, expected_result: str) -> None: + """Test skip_on_platforms decorator. + + Leverage the ExampleTest defined at the top of the module. + """ + # Apply the decorator - ignoring mypy warning - this is for testing + ExampleTest.test = skip_on_platforms(platforms)(ExampleTest.test) # type: ignore[method-assign] + + device.hw_model = device_platform + + test_instance = ExampleTest(device) + await test_instance.test() + + assert test_instance.result.result == expected_result diff --git a/tests/units/test_device.py b/tests/units/test_device.py index faf614481..17669df2a 100644 --- a/tests/units/test_device.py +++ b/tests/units/test_device.py @@ -6,13 +6,15 @@ from __future__ import annotations import asyncio +from contextlib import AbstractContextManager +from contextlib import nullcontext as does_not_raise from pathlib import Path from typing import TYPE_CHECKING, Any from unittest.mock import patch import pytest from asyncssh import SSHClientConnection, SSHClientConnectionOptions -from httpx import ConnectError, HTTPError +from httpx import ConnectError, HTTPError, TimeoutException from rich import print as rprint from anta.device import AntaDevice, AsyncEOSDevice @@ -24,13 +26,37 @@ from _pytest.mark.structures import ParameterSet INIT_PARAMS: list[ParameterSet] = [ - pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta"}, {"name": "42.42.42.42"}, id="no name, no port"), - pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta", "port": 666}, {"name": "42.42.42.42:666"}, id="no name, port"), + pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta"}, {"name": "42.42.42.42"}, does_not_raise(), id="no name, no port"), + pytest.param({"host": "42.42.42.42", "username": "anta", "password": "anta", "port": 666}, {"name": "42.42.42.42:666"}, does_not_raise(), id="no name, port"), pytest.param( - {"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "disable_cache": True}, {"name": "test.anta.ninja"}, id="name" + {"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "disable_cache": True}, + {"name": "test.anta.ninja"}, + does_not_raise(), + id="name", ), pytest.param( - {"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "insecure": True}, {"name": "test.anta.ninja"}, id="insecure" + {"host": "42.42.42.42", "username": "anta", "password": "anta", "name": "test.anta.ninja", "insecure": True}, + {"name": "test.anta.ninja"}, + does_not_raise(), + id="insecure", + ), + pytest.param( + {"host": None, "username": "anta", "password": "anta", "name": "test.anta.ninja"}, + None, + pytest.raises(ValueError, match="'host' is required to create an AsyncEOSDevice"), + id="host is None", + ), + pytest.param( + {"host": "42.42.42.42", "username": None, "password": "anta", "name": "test.anta.ninja"}, + None, + pytest.raises(ValueError, match="'username' is required to instantiate device 'test.anta.ninja'"), + id="username is None", + ), + pytest.param( + {"host": "42.42.42.42", "username": "anta", "password": None, "name": "test.anta.ninja"}, + None, + pytest.raises(ValueError, match="'password' is required to instantiate device 'test.anta.ninja'"), + id="password is None", ), ] EQUALITY_PARAMS: list[ParameterSet] = [ @@ -48,7 +74,10 @@ id="not-equal-port", ), pytest.param( - {"host": "42.42.42.41", "username": "anta", "password": "anta"}, {"host": "42.42.42.42", "username": "anta", "password": "anta"}, False, id="not-equal-host" + {"host": "42.42.42.41", "username": "anta", "password": "anta"}, + {"host": "42.42.42.42", "username": "anta", "password": "anta"}, + False, + id="not-equal-host", ), ] ASYNCEAPI_COLLECT_PARAMS: list[ParameterSet] = [ @@ -287,7 +316,58 @@ }, }, {"output": None, "errors": ["Authorization denied for command 'show version'"]}, - id="asynceapi.EapiCommandError", + id="asynceapi.EapiCommandError - Authorization denied", + ), + pytest.param( + {}, + { + "command": "show version", + "patch_kwargs": { + "side_effect": EapiCommandError( + passed=[], + failed="show version", + errors=["not supported on this hardware platform"], + errmsg="Invalid command", + not_exec=[], + ) + }, + }, + {"output": None, "errors": ["not supported on this hardware platform"]}, + id="asynceapi.EapiCommandError - not supported", + ), + pytest.param( + {}, + { + "command": "show version", + "patch_kwargs": { + "side_effect": EapiCommandError( + passed=[], + failed="show version", + errors=["BGP inactive"], + errmsg="Invalid command", + not_exec=[], + ) + }, + }, + {"output": None, "errors": ["BGP inactive"]}, + id="asynceapi.EapiCommandError - known EOS error", + ), + pytest.param( + {}, + { + "command": "show version", + "patch_kwargs": { + "side_effect": EapiCommandError( + passed=[], + failed="show version", + errors=["Invalid input (privileged mode required)"], + errmsg="Invalid command", + not_exec=[], + ) + }, + }, + {"output": None, "errors": ["Invalid input (privileged mode required)"]}, + id="asynceapi.EapiCommandError - requires privileges", ), pytest.param( {}, @@ -301,6 +381,12 @@ {"output": None, "errors": ["ConnectError: Cannot open port"]}, id="httpx.ConnectError", ), + pytest.param( + {}, + {"command": "show version", "patch_kwargs": {"side_effect": TimeoutException("Test")}}, + {"output": None, "errors": ["TimeoutException: Test"]}, + id="httpx.TimeoutException", + ), ] ASYNCEAPI_COPY_PARAMS: list[ParameterSet] = [ pytest.param({}, {"sources": [Path("/mnt/flash"), Path("/var/log/agents")], "destination": Path(), "direction": "from"}, id="from"), @@ -531,22 +617,24 @@ def test_cache_statistics(self, device: AntaDevice, expected: dict[str, Any] | N class TestAsyncEOSDevice: """Test for anta.device.AsyncEOSDevice.""" - @pytest.mark.parametrize(("device", "expected"), INIT_PARAMS) - def test__init__(self, device: dict[str, Any], expected: dict[str, Any]) -> None: + @pytest.mark.parametrize(("device", "expected", "expected_raise"), INIT_PARAMS) + def test__init__(self, device: dict[str, Any], expected: dict[str, Any] | None, expected_raise: AbstractContextManager[Exception]) -> None: """Test the AsyncEOSDevice constructor.""" - dev = AsyncEOSDevice(**device) + with expected_raise: + dev = AsyncEOSDevice(**device) - assert dev.name == expected["name"] - if device.get("disable_cache") is True: - assert dev.cache is None - assert dev.cache_locks is None - else: # False or None - assert dev.cache is not None - assert dev.cache_locks is not None - hash(dev) + assert expected is not None + assert dev.name == expected["name"] + if device.get("disable_cache") is True: + assert dev.cache is None + assert dev.cache_locks is None + else: # False or None + assert dev.cache is not None + assert dev.cache_locks is not None + hash(dev) - with patch("anta.device.__DEBUG__", new=True): - rprint(dev) + with patch("anta.device.__DEBUG__", new=True): + rprint(dev) @pytest.mark.parametrize(("device1", "device2", "expected"), EQUALITY_PARAMS) def test__eq(self, device1: dict[str, Any], device2: dict[str, Any], expected: bool) -> None: diff --git a/tests/units/test_models.py b/tests/units/test_models.py index 8b7c50f10..d12d85941 100644 --- a/tests/units/test_models.py +++ b/tests/units/test_models.py @@ -64,6 +64,23 @@ def test(self) -> None: self.result.is_success() +class FakeTestWithKnownEOSError(AntaTest): + """ANTA test triggering a known EOS Error that should translate to failure of the test.""" + + categories: ClassVar[list[str]] = [] + commands: ClassVar[list[AntaCommand | AntaTemplate]] = [ + AntaCommand( + command="show bgp evpn route-type mac-ip aa:c1:ab:de:50:ad vni 10010", + errors=["BGP inactive"], + ) + ] + + @AntaTest.anta_test + def test(self) -> None: + """Test function.""" + self.result.is_success() + + class FakeTestWithInput(AntaTest): """ANTA test with inputs that always succeed.""" @@ -484,6 +501,18 @@ class FakeTestWithMissingTest(AntaTest): }, }, }, + { + "name": "known EOS error command", + "test": FakeTestWithKnownEOSError, + "inputs": None, + "expected": { + "__init__": {"result": "unset"}, + "test": { + "result": "failure", + "messages": ["BGP inactive"], + }, + }, + }, ] BLACKLIST_COMMANDS_PARAMS = ["reload", "reload --force", "write", "wr mem"] @@ -613,7 +642,7 @@ def test_result_overwrite(self, device: AntaDevice) -> None: assert test.result.custom_field == "a custom field" -class TestAntaComamnd: +class TestAntaCommand: """Test for anta.models.AntaCommand.""" # ruff: noqa: B018 @@ -672,6 +701,32 @@ def test_requires_privileges(self) -> None: ) assert command.requires_privileges is False command = AntaCommand(command="show aaa methods accounting") - with pytest.raises(RuntimeError) as exec_info: + with pytest.raises( + RuntimeError, match="Command 'show aaa methods accounting' has not been collected and has not returned an error. Call AntaDevice.collect()." + ): command.requires_privileges - assert exec_info.value.args[0] == "Command 'show aaa methods accounting' has not been collected and has not returned an error. Call AntaDevice.collect()." + + @pytest.mark.parametrize( + ("command_str", "error", "is_known"), + [ + ("show ip interface Ethernet1", "Ethernet1 does not support IP", True), + ("ping vrf MGMT 1.1.1.1 source Management0 size 100 df-bit repeat 2", "VRF 'MGMT' is not active", True), + ("ping vrf MGMT 1.1.1.1 source Management1 size 100 df-bit repeat 2", "No source interface Management1", True), + ("show bgp evpn route-type mac-ip aa:c1:ab:de:50:ad vni 10010", "BGP inactive", True), + ("show isis BLAH neighbors", "IS-IS (BLAH) is disabled because: IS-IS Network Entity Title (NET) configuration is not present", True), + ("show ip interface Ethernet1", None, False), + ], + ) + def test_returned_known_eos_error(self, command_str: str, error: str | None, is_known: bool) -> None: + """Test the returned_known_eos_error property.""" + # Adding fake output when no error is present to mimic that the command has been collected + command = AntaCommand(command=command_str, errors=[error] if error else [], output=None if error else "{}") + assert command.returned_known_eos_error is is_known + + def test_returned_known_eos_error_failure(self) -> None: + """Test the returned_known_eos_error property unset.""" + command = AntaCommand(command="show ip interface Ethernet1") + with pytest.raises( + RuntimeError, match="Command 'show ip interface Ethernet1' has not been collected and has not returned an error. Call AntaDevice.collect()." + ): + command.returned_known_eos_error diff --git a/tests/units/test_runner.py b/tests/units/test_runner.py index 8d19a4d1a..23f410216 100644 --- a/tests/units/test_runner.py +++ b/tests/units/test_runner.py @@ -6,7 +6,7 @@ from __future__ import annotations import logging -import resource +import os import sys from pathlib import Path from unittest.mock import patch @@ -16,10 +16,16 @@ from anta.catalog import AntaCatalog from anta.inventory import AntaInventory from anta.result_manager import ResultManager -from anta.runner import adjust_rlimit_nofile, main, prepare_tests +from anta.runner import main, prepare_tests from .test_models import FakeTest, FakeTestWithMissingTest +if os.name == "posix": + # The function is not defined on non-POSIX system + import resource + + from anta.runner import adjust_rlimit_nofile + DATA_DIR: Path = Path(__file__).parent.parent.resolve() / "data" FAKE_CATALOG: AntaCatalog = AntaCatalog.from_list([(FakeTest, None)]) @@ -65,8 +71,10 @@ async def test_no_selected_device(caplog: pytest.LogCaptureFixture, inventory: A assert msg in caplog.messages +@pytest.mark.skipif(os.name != "posix", reason="Cannot run this test on Windows") def test_adjust_rlimit_nofile_valid_env(caplog: pytest.LogCaptureFixture) -> None: """Test adjust_rlimit_nofile with valid environment variables.""" + # pylint: disable=E0606 with ( caplog.at_level(logging.DEBUG), patch.dict("os.environ", {"ANTA_NOFILE": "20480"}), @@ -96,6 +104,7 @@ def side_effect_setrlimit(resource_id: int, limits: tuple[int, int]) -> None: setrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE, (20480, 1048576)) +@pytest.mark.skipif(os.name != "posix", reason="Cannot run this test on Windows") def test_adjust_rlimit_nofile_invalid_env(caplog: pytest.LogCaptureFixture) -> None: """Test adjust_rlimit_nofile with valid environment variables.""" with ( @@ -129,6 +138,31 @@ def side_effect_setrlimit(resource_id: int, limits: tuple[int, int]) -> None: setrlimit_mock.assert_called_once_with(resource.RLIMIT_NOFILE, (16384, 1048576)) +@pytest.mark.skipif(os.name == "posix", reason="Run this test on Windows only") +async def test_check_runner_log_for_windows(caplog: pytest.LogCaptureFixture, inventory: AntaInventory) -> None: + """Test log output for Windows host regarding rlimit.""" + caplog.set_level(logging.INFO) + manager = ResultManager() + # Using dry-run to shorten the test + await main(manager, inventory, FAKE_CATALOG, dry_run=True) + assert "Running on a non-POSIX system, cannot adjust the maximum number of file descriptors." in caplog.records[-3].message + + +# We could instead merge multiple coverage report together but that requires more work than just this. +@pytest.mark.skipif(os.name != "posix", reason="Fake non-posix for coverage") +async def test_check_runner_log_for_windows_fake(caplog: pytest.LogCaptureFixture, inventory: AntaInventory) -> None: + """Test log output for Windows host regarding rlimit.""" + with patch("os.name", new="win32"): + del sys.modules["anta.runner"] + from anta.runner import main # pylint: disable=W0621 + + caplog.set_level(logging.INFO) + manager = ResultManager() + # Using dry-run to shorten the test + await main(manager, inventory, FAKE_CATALOG, dry_run=True) + assert "Running on a non-POSIX system, cannot adjust the maximum number of file descriptors." in caplog.records[-3].message + + @pytest.mark.parametrize( ("inventory", "tags", "tests", "devices_count", "tests_count"), [