diff --git a/CHANGELOG.md b/CHANGELOG.md index e57d3b7..9c6c677 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # wiremind-kubernetes +## v7.0.1 (2022-12-27) +### Fix +- setup.cfg correct mypy and flake8 config +- mypy errors + ## v7.0.0 (2022-09-27) ### BREAKING CHANGE - stop_pods: neutralize the HPA as `HPAScaleToZero` may be in use (HPA may scale up the Deployment even if replicas=0), a more straightforward solution will diff --git a/VERSION b/VERSION index 4122521..73a86b1 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -7.0.0 \ No newline at end of file +7.0.1 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index f9bf3fc..051fecd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,15 +1,35 @@ +[options] +python_requires = >= 3.7 + [flake8] max-line-length = 120 -# W503: line break before binary operator. Black causes this error but seems to be right -# E231 missing whitespace after ','. Black causes this error but seems to be right (see pep8) -ignore = W503, E231 +# Enable flake8-mutable enable-extensions = M511 +# W503 line break before binary operator. Black causes this error +# E203 whitespace before ':' +# E231 missing whitespace after ','. Black causes this error but seems to be right (see pep8) +# E501 line too long +# Q000 Double quotes found but single quotes preferred +ignore = W503, E203, E231, E501, Q000 +jobs = 4 [mypy] python_version = 3.7 ignore_missing_imports = True +check_untyped_defs = True +disallow_untyped_defs = True +disallow_incomplete_defs = True +warn_redundant_casts = True +warn_unused_ignores = True +warn_unused_configs = True no_implicit_optional = True -strict_optional = True +show_error_codes = True +files = src [tool:pytest] -log_cli = True +log_level=INFO +# Deterministic ordering for tests; useful for pytest-xdist. +env = + PYTHONHASHSEED=0 +filterwarnings = + ignore::pytest.PytestUnknownMarkWarning diff --git a/src/wiremind_kubernetes/exceptions.py b/src/wiremind_kubernetes/exceptions.py index f7d2f2c..d4ac964 100644 --- a/src/wiremind_kubernetes/exceptions.py +++ b/src/wiremind_kubernetes/exceptions.py @@ -1,9 +1,12 @@ +from typing import Optional + + class WiremindKubernetesException(Exception): """ Base wiremind-kubernetes Exception. """ - def __init__(self, message=None): + def __init__(self, message: Optional[str] = None): super().__init__() if message: self.message = message @@ -14,7 +17,7 @@ class ExecError(WiremindKubernetesException): An error occured while executing kubernetes command. """ - def __init__(self): + def __init__(self) -> None: super().__init__(message="An error occured while executing kubernetes command.") @@ -22,3 +25,5 @@ class PodNotFound(WiremindKubernetesException): """ A required pod was not found. """ + + pass diff --git a/src/wiremind_kubernetes/kube_config.py b/src/wiremind_kubernetes/kube_config.py index a967ee7..306b7fc 100644 --- a/src/wiremind_kubernetes/kube_config.py +++ b/src/wiremind_kubernetes/kube_config.py @@ -1,23 +1,25 @@ import logging import os +from typing import Optional import kubernetes - logger = logging.getLogger(__name__) -def _load_kubeconfig(config_file=None, context=None): +def _load_kubeconfig(config_file: Optional[str] = None, context: Optional[str] = None) -> None: kubernetes.config.load_kube_config(config_file=config_file, context=context) logger.debug("Kubernetes configuration successfully set.") -def _load_incluster_config(): +def _load_incluster_config() -> None: kubernetes.config.load_incluster_config() logger.debug("Kubernetes configuration successfully set.") -def load_kubernetes_config(use_kubeconfig=None, config_file=None, context=None): +def load_kubernetes_config( + use_kubeconfig: Optional[bool] = None, config_file: Optional[str] = None, context: Optional[str] = None +) -> None: """ Load kubernetes configuration in memory, either from incluster method or from kubeconfig. :param use_kubeconfig: diff --git a/src/wiremind_kubernetes/kubernetes_client_additional_arguments.py b/src/wiremind_kubernetes/kubernetes_client_additional_arguments.py index 53734c3..847fd1f 100644 --- a/src/wiremind_kubernetes/kubernetes_client_additional_arguments.py +++ b/src/wiremind_kubernetes/kubernetes_client_additional_arguments.py @@ -10,7 +10,7 @@ class ClientWithArguments: Currently add dry_run support for write functions and pretty to all. """ - def __init__(self, client, dry_run: bool = False): + def __init__(self, client: Any, dry_run: bool = False): self.client = client() # like kubernetes.client.CoreV1Api self.read_additional_arguments: Dict[str, Any] = dict(pretty=True) # Every request, either read or write, will have those arguments added @@ -19,7 +19,7 @@ def __init__(self, client, dry_run: bool = False): # Dry run, in kube API, is not true or false, but either dry_run: All or not defined. self.additional_arguments["dry_run"] = "All" - def __getattr__(self, attr): + def __getattr__(self, attr: str) -> Any: original_attr = getattr(self.client, attr) if not callable(original_attr): @@ -31,7 +31,7 @@ def __getattr__(self, attr): is_write_function = True break - def fn(*args, **kwargs): + def fn(*args: Any, **kwargs: Any) -> Any: if is_write_function: kwargs.update(self.additional_arguments) else: # A read function @@ -42,30 +42,30 @@ def fn(*args, **kwargs): class CoreV1ApiWithArguments(ClientWithArguments): - def __init__(self, *args, dry_run: bool = False, **kwargs): + def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None: super().__init__(client=kubernetes.client.CoreV1Api, dry_run=dry_run) class AppV1ApiWithArguments(ClientWithArguments): - def __init__(self, *args, dry_run: bool = False, **kwargs): + def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None: super().__init__(client=kubernetes.client.AppsV1Api, dry_run=dry_run) class BatchV1ApiWithArguments(ClientWithArguments): - def __init__(self, *args, dry_run: bool = False, **kwargs): + def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None: super().__init__(client=kubernetes.client.BatchV1Api, dry_run=dry_run) class AutoscalingV1ApiWithArguments(ClientWithArguments): - def __init__(self, *args, dry_run: bool = False, **kwargs): + def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None: super().__init__(client=kubernetes.client.AutoscalingV1Api, dry_run=dry_run) class CustomObjectsApiWithArguments(ClientWithArguments): - def __init__(self, *args, dry_run: bool = False, **kwargs): + def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None: super().__init__(client=kubernetes.client.CustomObjectsApi, dry_run=dry_run) class RbacAuthorizationV1ApiWithArguments(ClientWithArguments): - def __init__(self, *args, dry_run: bool = False, **kwargs): + def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None: super().__init__(client=kubernetes.client.RbacAuthorizationV1Api, dry_run=dry_run) diff --git a/src/wiremind_kubernetes/kubernetes_helper.py b/src/wiremind_kubernetes/kubernetes_helper.py index c801054..66f9736 100644 --- a/src/wiremind_kubernetes/kubernetes_helper.py +++ b/src/wiremind_kubernetes/kubernetes_helper.py @@ -1,7 +1,7 @@ import logging import pprint import time -from typing import Any, Dict, List, Optional, Union, Generator +from typing import Any, Dict, Generator, List, Optional, Union import kubernetes @@ -105,15 +105,15 @@ def __init__( else: self.namespace = _get_namespace_from_kube() - def get_deployment_scale(self, deployment_name: str): + def get_deployment_scale(self, deployment_name: str) -> kubernetes.client.V1Scale: logger.debug("Getting deployment scale for %s", deployment_name) return self.client_appsv1_api.read_namespaced_deployment_scale(deployment_name, self.namespace) - def get_statefulset_scale(self, statefulset_name: str): + def get_statefulset_scale(self, statefulset_name: str) -> kubernetes.client.V1Scale: logger.debug("Getting statefulset scale for %s", statefulset_name) return self.client_appsv1_api.read_namespaced_stateful_set_scale(statefulset_name, self.namespace) - def scale_down_statefulset(self, statefulset_name: str): + def scale_down_statefulset(self, statefulset_name: str) -> None: body = self.get_statefulset_scale(statefulset_name) logger.debug("Deleting all Pods for %s", statefulset_name) body.spec.replicas = 0 @@ -121,14 +121,14 @@ def scale_down_statefulset(self, statefulset_name: str): logger.debug("Done deleting.") @retry_kubernetes_request_no_ignore - def scale_down_deployment(self, deployment_name: str): + def scale_down_deployment(self, deployment_name: str) -> None: body = self.get_deployment_scale(deployment_name) logger.debug("Deleting all Pods for %s", deployment_name) body.spec.replicas = 0 self.client_appsv1_api.patch_namespaced_deployment_scale(deployment_name, self.namespace, body) logger.debug("Done deleting.") - def scale_up_statefulset(self, statefulset_name: str, pod_amount: int = 1): + def scale_up_statefulset(self, statefulset_name: str, pod_amount: int = 1) -> None: body = self.get_statefulset_scale(statefulset_name) logger.debug("Recreating backend Pods for %s", statefulset_name) body.spec.replicas = pod_amount @@ -136,7 +136,7 @@ def scale_up_statefulset(self, statefulset_name: str, pod_amount: int = 1): logger.debug("Done recreating.") @retry_kubernetes_request_no_ignore - def scale_up_deployment(self, deployment_name: str, pod_amount: int): + def scale_up_deployment(self, deployment_name: str, pod_amount: int) -> None: body = self.get_deployment_scale(deployment_name) logger.debug("Recreating backend Pods for %s", deployment_name) body.spec.replicas = pod_amount @@ -183,7 +183,7 @@ def is_deployment_stopped(self, deployment_name: str, statefulset: bool = False) return False return True - def is_deployment_ready(self, deployment_name: str, statefulset: bool = False): + def is_deployment_ready(self, deployment_name: str, statefulset: bool = False) -> bool: if statefulset: status = self.client_appsv1_api.read_namespaced_stateful_set_status(deployment_name, self.namespace) else: @@ -203,7 +203,7 @@ def is_deployment_ready(self, deployment_name: str, statefulset: bool = False): return expected_replicas == ready_replicas @retry_kubernetes_request - def getPodNameFromDeployment(self, deployment_name, namespace_name): + def getPodNameFromDeployment(self, deployment_name: str, namespace_name: str) -> str: """ From a given deployment, get the first pod name """ @@ -225,7 +225,7 @@ def get_deployment_hpa(self, *, deployment_name: str) -> Generator: if hpa.spec.scale_target_ref.kind == "Deployment" and hpa.spec.scale_target_ref.name == deployment_name: yield hpa - def patch_deployment_hpa(self, *, hpa_name: str, body: Any): + def patch_deployment_hpa(self, *, hpa_name: str, body: Any) -> None: self.client_autoscalingv1_api.patch_namespaced_horizontal_pod_autoscaler( name=hpa_name, namespace=self.namespace, body=body ) @@ -245,7 +245,7 @@ class KubernetesDeploymentManager(NamespacedKubernetesHelper): a.start_pods() """ - def __init__(self, release_name: str, **kwargs): + def __init__(self, release_name: str, **kwargs: Any): self.release_name = release_name super().__init__(**kwargs) @@ -300,7 +300,7 @@ def _get_expected_deployment_scale_dict(self) -> Dict[int, Dict[str, int]]: logger.debug("Deployments are %s", pprint.pformat(eds_dict)) return eds_dict - def start_pods(self): + def start_pods(self) -> None: """ Start all Pods that should be started """ @@ -335,19 +335,19 @@ def _are_deployments_stopped(self, deployment_dict: Dict[str, int]) -> bool: return True @retry_kubernetes_request - def disable_hpa(self, *, deployment_name: str): + def disable_hpa(self, *, deployment_name: str) -> None: for hpa in self.get_deployment_hpa(deployment_name=deployment_name): # Tell the hpa to manage a non-existing Deployment hpa.spec.scale_target_ref.name = f"{HPA_ID_PREFIX}-{deployment_name}" self.patch_deployment_hpa(hpa_name=hpa.metadata.name, body=hpa) @retry_kubernetes_request - def re_enable_hpa(self, *, deployment_name: str): + def re_enable_hpa(self, *, deployment_name: str) -> None: for hpa in self.get_deployment_hpa(deployment_name=f"{HPA_ID_PREFIX}-{deployment_name}"): hpa.spec.scale_target_ref.name = deployment_name self.patch_deployment_hpa(hpa_name=hpa.metadata.name, body=hpa) - def _stop_deployments(self, deployment_dict: Dict[str, int]): + def _stop_deployments(self, deployment_dict: Dict[str, int]) -> None: """ Scale down a dict (deployment_name, expected_scale) of Deployments. """ @@ -361,7 +361,7 @@ def _stop_deployments(self, deployment_dict: Dict[str, int]): else: raise Exception("Timed out waiting for pods to be deleted: aborting.") - def stop_pods(self): + def stop_pods(self) -> None: """ Scale to 0 all deployments for which an ExpectedDeploymentScale links to. stop all deployments, then wait for actual stop, by priority (descending order): @@ -446,14 +446,14 @@ def create_job(self, job_body: kubernetes.client.V1Job) -> kubernetes.client.V1J except kubernetes.client.rest.ApiException as e: print("Exception when calling BatchV1Api->create_namespaced_job: %s\n" % e) - def get_job(self, job_name) -> kubernetes.client.V1Job: + def get_job(self, job_name: str) -> kubernetes.client.V1Job: """ Get a job, concatenating release_name and job_name as job name. """ job_name = f"{self.release_name}-{job_name}" return self.client_batchv1_api.read_namespaced_job(job_name, self.namespace) - def delete_job(self, job_name): + def delete_job(self, job_name: str) -> kubernetes.client.V1Status: """ Get a job, concatenating release_name and job_name as job name. """ diff --git a/src/wiremind_kubernetes/tests/e2e_tests/conftest.py b/src/wiremind_kubernetes/tests/e2e_tests/conftest.py index 9abe097..3b27304 100644 --- a/src/wiremind_kubernetes/tests/e2e_tests/conftest.py +++ b/src/wiremind_kubernetes/tests/e2e_tests/conftest.py @@ -1,13 +1,15 @@ import logging import os import time +from typing import Generator import kubernetes import pytest +from pytest_mock import MockerFixture import wiremind_kubernetes -from wiremind_kubernetes.utils import run_command from wiremind_kubernetes.tests.e2e_tests.helpers import check_not_using_wiremind_cluster +from wiremind_kubernetes.utils import run_command E2E_CLUSTER_MANIFESTS = "tests/e2e_tests/manifests" absolute_path = os.path.dirname(os.path.join(os.path.abspath(wiremind_kubernetes.__file__))) @@ -19,23 +21,23 @@ @pytest.fixture(scope="function") -def k8s_client_request_function(mocker): +def k8s_client_request_function(mocker: MockerFixture) -> Generator: yield mocker.spy(kubernetes.client.api_client.ApiClient, "request") @pytest.fixture(scope="session", autouse=True) -def setUpE2E(): +def setUpE2E() -> None: check_not_using_wiremind_cluster() -def delete_namespace(): +def delete_namespace() -> None: run_command( f"kubectl delete namespace {TEST_NAMESPACE} --wait --grace-period=1", ) @pytest.fixture -def populate_cluster(): +def populate_cluster() -> Generator[None, None, None]: run_command( f"kubectl apply -f {absolute_path}/../../CustomResourceDefinition-expecteddeploymentscales.yaml", ) @@ -87,7 +89,7 @@ def populate_cluster(): @pytest.fixture -def create_namespace(): +def create_namespace() -> Generator[None, None, None]: run_command( f"kubectl create namespace {TEST_NAMESPACE}", ) @@ -105,7 +107,7 @@ def create_namespace(): @pytest.fixture -def concerned_dm(): +def concerned_dm() -> wiremind_kubernetes.KubernetesDeploymentManager: return wiremind_kubernetes.KubernetesDeploymentManager( use_kubeconfig=True, namespace=TEST_NAMESPACE, @@ -114,7 +116,7 @@ def concerned_dm(): @pytest.fixture -def unconcerned_dm(): +def unconcerned_dm() -> wiremind_kubernetes.KubernetesDeploymentManager: return wiremind_kubernetes.KubernetesDeploymentManager( use_kubeconfig=True, namespace=TEST_NAMESPACE, diff --git a/src/wiremind_kubernetes/tests/e2e_tests/create_job_test.py b/src/wiremind_kubernetes/tests/e2e_tests/create_job_test.py index 62f4181..4d16f9f 100644 --- a/src/wiremind_kubernetes/tests/e2e_tests/create_job_test.py +++ b/src/wiremind_kubernetes/tests/e2e_tests/create_job_test.py @@ -3,13 +3,16 @@ import time import kubernetes +from pytest_mock import MockerFixture + +from wiremind_kubernetes import KubernetesDeploymentManager from .conftest import TEST_NAMESPACE logger = logging.getLogger(__name__) -def test_create_job(concerned_dm, create_namespace): +def test_create_job(concerned_dm: KubernetesDeploymentManager, create_namespace: MockerFixture) -> None: """ Test that default create job and delete job work as expected """ @@ -59,7 +62,7 @@ def test_create_job(concerned_dm, create_namespace): assert not pod_list -def test_create_job_argument(concerned_dm, create_namespace): +def test_create_job_argument(concerned_dm: KubernetesDeploymentManager, create_namespace: MockerFixture) -> None: """ Test that create job with command / args works and finishes as expected """ diff --git a/src/wiremind_kubernetes/tests/e2e_tests/helpers.py b/src/wiremind_kubernetes/tests/e2e_tests/helpers.py index e0176e8..acc7c80 100644 --- a/src/wiremind_kubernetes/tests/e2e_tests/helpers.py +++ b/src/wiremind_kubernetes/tests/e2e_tests/helpers.py @@ -10,7 +10,7 @@ logger = logging.getLogger(__name__) -def check_not_using_wiremind_cluster(): +def check_not_using_wiremind_cluster() -> None: """ Will sys.exit(1) if kubectl current context api server is not a test cluster (like kind, minikube, etc) """ @@ -32,7 +32,7 @@ def check_not_using_wiremind_cluster(): sys.exit(1) -def get_k8s_username(): +def get_k8s_username() -> str: """ Return the Kind cluster's username. """ diff --git a/src/wiremind_kubernetes/tests/e2e_tests/start_stop_test.py b/src/wiremind_kubernetes/tests/e2e_tests/start_stop_test.py index 3aeaf84..c5d5583 100644 --- a/src/wiremind_kubernetes/tests/e2e_tests/start_stop_test.py +++ b/src/wiremind_kubernetes/tests/e2e_tests/start_stop_test.py @@ -1,6 +1,8 @@ import logging import time +from pytest_mock import MockerFixture + import wiremind_kubernetes from wiremind_kubernetes import KubernetesDeploymentManager from wiremind_kubernetes.kubernetes_helper import HPA_ID_PREFIX @@ -14,7 +16,7 @@ KubernetesDeploymentManager.SCALE_DOWN_MAX_WAIT_TIME = 30 -def assert_hpa_scale_target_ref_name(*, hpa_name, scale_target_ref_name: str): +def assert_hpa_scale_target_ref_name(*, hpa_name: str, scale_target_ref_name: str) -> None: assert ( kubectl_get_json(resource="hpa", namespace=TEST_NAMESPACE, name=hpa_name)["spec"]["scaleTargetRef"]["name"] == scale_target_ref_name @@ -32,7 +34,9 @@ def are_deployments_ready( ) -def wait_for_deployments_ready(concerned_dm: KubernetesDeploymentManager, unconcerned_dm: KubernetesDeploymentManager): +def wait_for_deployments_ready( + concerned_dm: KubernetesDeploymentManager, unconcerned_dm: KubernetesDeploymentManager +) -> None: for _ in range(1, 10): logger.info("Waiting for deployments to be started...") if not are_deployments_ready(concerned_dm, unconcerned_dm): @@ -45,7 +49,12 @@ def wait_for_deployments_ready(concerned_dm: KubernetesDeploymentManager, unconc assert are_deployments_ready(concerned_dm, unconcerned_dm) # Last chance -def test_stop_start_all(concerned_dm, unconcerned_dm, populate_cluster, mocker): +def test_stop_start_all( + concerned_dm: KubernetesDeploymentManager, + unconcerned_dm: KubernetesDeploymentManager, + populate_cluster: MockerFixture, + mocker: MockerFixture, +) -> None: """ Test that we stop/start all deployments that have an EDS in the namespace default and only them. diff --git a/src/wiremind_kubernetes/tests/unit_tests/kube_config_test.py b/src/wiremind_kubernetes/tests/unit_tests/kube_config_test.py index f563688..9f37bdd 100644 --- a/src/wiremind_kubernetes/tests/unit_tests/kube_config_test.py +++ b/src/wiremind_kubernetes/tests/unit_tests/kube_config_test.py @@ -1,21 +1,24 @@ import os -from typing import Dict, Optional +from typing import Dict, Generator, Optional import kubernetes import pytest +from pytest_mock import MockerFixture + import wiremind_kubernetes from wiremind_kubernetes.kube_config import load_kubernetes_config @pytest.fixture(scope="module", autouse=True) -def clean_os_environ(): +def clean_os_environ() -> Generator: """ Get rid of the env var "CLASSIC_K8S_CONFIG", this will be set if needed using tests parameterization """ old_os_environ = os.environ.copy() os.environ.pop("CLASSIC_K8S_CONFIG", None) yield - os.environ = old_os_environ + os.environ.clear() + os.environ.update(old_os_environ) @pytest.mark.parametrize( @@ -110,8 +113,8 @@ def test_load_kubernetes_config_1( extra_env_vars: Dict[str, str], service_token_present: bool, should_call: Optional[str], - mocker, -): + mocker: MockerFixture, +) -> None: """ Test that load_kubernetes_config calls the right kube loading function, when needed, with the right parameters. Only relevant cases are tested. diff --git a/src/wiremind_kubernetes/tests/unit_tests/kubernetes_client_additional_arguments_test.py b/src/wiremind_kubernetes/tests/unit_tests/kubernetes_client_additional_arguments_test.py index 6309c46..e0ac4da 100644 --- a/src/wiremind_kubernetes/tests/unit_tests/kubernetes_client_additional_arguments_test.py +++ b/src/wiremind_kubernetes/tests/unit_tests/kubernetes_client_additional_arguments_test.py @@ -1,7 +1,9 @@ +from pytest_mock import MockerFixture + import wiremind_kubernetes.kubernetes_helper -def test_kubernetes_client_additional_arguments_core_v1_api(mocker): +def test_kubernetes_client_additional_arguments_core_v1_api(mocker: MockerFixture) -> None: """ Test that we add mandatory args to each function call of kubernetes client """ diff --git a/src/wiremind_kubernetes/tests/unit_tests/kubernetes_deployment_manager_test.py b/src/wiremind_kubernetes/tests/unit_tests/kubernetes_deployment_manager_test.py index 3311c32..696c3e7 100644 --- a/src/wiremind_kubernetes/tests/unit_tests/kubernetes_deployment_manager_test.py +++ b/src/wiremind_kubernetes/tests/unit_tests/kubernetes_deployment_manager_test.py @@ -1,9 +1,11 @@ import unittest +from pytest_mock import MockerFixture + import wiremind_kubernetes -def test_stop_pods_priority(mocker): +def test_stop_pods_priority(mocker: MockerFixture) -> None: """ Test that we honor priorities when stopping workloads. @@ -44,7 +46,7 @@ def test_stop_pods_priority(mocker): assert mocked_stop_deployments.mock_calls == expected_calls -def test_stop_deployments_correctly_wait(mocker): +def test_stop_deployments_correctly_wait(mocker: MockerFixture) -> None: """ Test that we wait for deployments to be stopped """ @@ -53,7 +55,7 @@ def test_stop_deployments_correctly_wait(mocker): mocker.patch("kubernetes.client.BatchV1Api") mocker.patch("kubernetes.client.CustomObjectsApi") - deployment_dict = {0: {"my-pod": 42, "my-other-pod": 113}} + deployment_dict = {"my-pod": 42, "my-other-pod": 113} mocked_are_deployments_stopped = mocker.patch( "wiremind_kubernetes.KubernetesDeploymentManager._are_deployments_stopped", side_effect=[False, False, True] diff --git a/src/wiremind_kubernetes/tests/unit_tests/namespaced_kubernetes_helper_test.py b/src/wiremind_kubernetes/tests/unit_tests/namespaced_kubernetes_helper_test.py index b6086b0..02a24fd 100644 --- a/src/wiremind_kubernetes/tests/unit_tests/namespaced_kubernetes_helper_test.py +++ b/src/wiremind_kubernetes/tests/unit_tests/namespaced_kubernetes_helper_test.py @@ -1,7 +1,9 @@ +from pytest_mock import MockerFixture + import wiremind_kubernetes -def test_is_deployment_stopped_ignores_failed(mocker): +def test_is_deployment_stopped_ignores_failed(mocker: MockerFixture) -> None: """ Test that we don't consider failed (like evicted) Pods as living Pods """ diff --git a/src/wiremind_kubernetes/tests/unit_tests/utils_test/retry_kubernetes_request_test.py b/src/wiremind_kubernetes/tests/unit_tests/utils_test/retry_kubernetes_request_test.py index eef9c80..35ed835 100644 --- a/src/wiremind_kubernetes/tests/unit_tests/utils_test/retry_kubernetes_request_test.py +++ b/src/wiremind_kubernetes/tests/unit_tests/utils_test/retry_kubernetes_request_test.py @@ -4,11 +4,11 @@ from wiremind_kubernetes.utils import retry_kubernetes_request, retry_kubernetes_request_no_ignore -def test_no_retry_required(): +def test_no_retry_required() -> None: counter = 0 @retry_kubernetes_request - def succeeds(): + def succeeds() -> str: nonlocal counter counter += 1 return "success" @@ -19,11 +19,11 @@ def succeeds(): assert counter == 1 -def test_retries_once(): +def test_retries_once() -> None: counter = 0 @retry_kubernetes_request - def fails_once(): + def fails_once() -> str: nonlocal counter counter += 1 if counter < 2: @@ -36,11 +36,11 @@ def fails_once(): assert counter == 2 -def test_limit_is_reached(): +def test_limit_is_reached() -> None: counter = 0 @retry_kubernetes_request - def always_fails(): + def always_fails() -> None: nonlocal counter counter += 1 raise kubernetes.client.rest.ApiException("failed") @@ -50,11 +50,11 @@ def always_fails(): assert counter == 2 -def test_404_correctly_handled(): +def test_404_correctly_handled() -> None: counter = 0 @retry_kubernetes_request - def notfound(): + def notfound() -> None: nonlocal counter counter += 1 raise kubernetes.client.rest.ApiException(status=404) @@ -65,14 +65,14 @@ def notfound(): assert counter == 1 -def test_404_correctly_ignored(): +def test_404_correctly_ignored() -> None: """ test that retry_kubernetes_request_no_ignore raises if 404 and does not retry """ counter = 0 @retry_kubernetes_request_no_ignore - def notfound(): + def notfound() -> None: nonlocal counter counter += 1 raise kubernetes.client.rest.ApiException(status=404) diff --git a/src/wiremind_kubernetes/tests/unit_tests/utils_test/run_command_test.py b/src/wiremind_kubernetes/tests/unit_tests/utils_test/run_command_test.py index 32de363..00ada0b 100644 --- a/src/wiremind_kubernetes/tests/unit_tests/utils_test/run_command_test.py +++ b/src/wiremind_kubernetes/tests/unit_tests/utils_test/run_command_test.py @@ -1,11 +1,12 @@ import subprocess import pytest +from pytest_mock import MockerFixture -from wiremind_kubernetes.utils import run_command, logger +from wiremind_kubernetes.utils import logger, run_command -def test_run_command_succeeded(mocker): +def test_run_command_succeeded(mocker: MockerFixture) -> None: """ Test that running a working command works as expected. """ @@ -23,7 +24,7 @@ def test_run_command_succeeded(mocker): log_spy.assert_called_with("lol") -def test_run_command_with_array_succeeded(mocker): +def test_run_command_with_array_succeeded(mocker: MockerFixture) -> None: """ Test that running a working command given through array works as expected. """ @@ -41,7 +42,7 @@ def test_run_command_with_array_succeeded(mocker): log_spy.assert_called_with("lol") -def test_run_command_succeeded_return_result(mocker): +def test_run_command_succeeded_return_result(mocker: MockerFixture) -> None: """ Test that running a working command using return_result=True works as expected. """ @@ -61,13 +62,13 @@ def test_run_command_succeeded_return_result(mocker): assert returncode == 0 -def test_run_command_succeeded_line_callback(mocker): +def test_run_command_succeeded_line_callback(mocker: MockerFixture) -> None: """ Test that running a working command using a custom line_callback works as expected. """ result = [] - def line_callback(line): + def line_callback(line: str) -> None: nonlocal result result.append(line) @@ -85,7 +86,7 @@ def line_callback(line): assert result == ["lol"] -def test_run_command_failed(mocker): +def test_run_command_failed(mocker: MockerFixture) -> None: """ Test that running a failing command works as expected. """ @@ -100,7 +101,7 @@ def test_run_command_failed(mocker): ) -def test_run_command_failed_still_show_output(mocker): +def test_run_command_failed_still_show_output(mocker: MockerFixture) -> None: """ Test that running a failing command still returns its output. """ @@ -112,7 +113,7 @@ def test_run_command_failed_still_show_output(mocker): log_spy.assert_called_with("lol") -def test_run_command_failed_return_result(mocker): +def test_run_command_failed_return_result(mocker: MockerFixture) -> None: """ Test that running a failing command using return_result=True works as expected. """ @@ -123,13 +124,13 @@ def test_run_command_failed_return_result(mocker): assert returncode == 1 -def test_run_command_failed_line_callback(mocker): +def test_run_command_failed_line_callback(mocker: MockerFixture) -> None: """ Test that running a failing command using custom line_callback works as expected. """ result = [] - def line_callback(line): + def line_callback(line: str) -> None: nonlocal result result.append(line) @@ -139,7 +140,7 @@ def line_callback(line): assert result == ["lol"] -def test_run_command_honors_args(mocker): +def test_run_command_honors_args(mocker: MockerFixture) -> None: """ Test that run_command honors kwargs. """ diff --git a/src/wiremind_kubernetes/utils.py b/src/wiremind_kubernetes/utils.py index 901535b..4490b83 100755 --- a/src/wiremind_kubernetes/utils.py +++ b/src/wiremind_kubernetes/utils.py @@ -3,17 +3,18 @@ import shlex import subprocess import time -from typing import Callable, List, Union +from typing import Any, Callable, List, Optional, Tuple, Union import kubernetes + from wiremind_kubernetes.exceptions import ExecError logger = logging.getLogger(__name__) def run_command( - command: Union[List, str], return_result: bool = False, line_callback: Union[Callable, None] = None, **kw_args -): + command: Union[List, str], return_result: bool = False, line_callback: Union[Callable, None] = None, **kw_args: Any +) -> Tuple[str, str, int]: """ Run command, print stdout/stderr, check that command exited correctly, return stdout/err """ @@ -30,7 +31,7 @@ def run_command( else: interpreted_command = command - process = subprocess.Popen( # type: ignore + process = subprocess.Popen( interpreted_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, **kw_args ) @@ -46,14 +47,16 @@ def run_command( if process.returncode: raise subprocess.CalledProcessError(process.returncode, command) + return "", "", 0 + -def retry_kubernetes_request(function): +def retry_kubernetes_request(function: Callable) -> Callable: """ Decorator that retries a failed Kubernetes API request if needed and ignores 404 """ @functools.wraps(function) - def wrapper(*args, **kwargs): + def wrapper(*args: Any, **kwargs: Any) -> Any: try: return function(*args, **kwargs) except kubernetes.client.rest.ApiException as e: @@ -70,13 +73,13 @@ def wrapper(*args, **kwargs): return wrapper -def retry_kubernetes_request_no_ignore(function): +def retry_kubernetes_request_no_ignore(function: Callable) -> Callable: """ Decorator that retries a failed Kubernetes API request if needed and do NOT ignore 404 (raise if 404) """ @functools.wraps(function) - def wrapper(*args, **kwargs): + def wrapper(*args: Any, **kwargs: Any) -> Any: try: return function(*args, **kwargs) except kubernetes.client.rest.ApiException as e: @@ -92,7 +95,9 @@ def wrapper(*args, **kwargs): return wrapper -def kubernetes_exec(commands, api, pod_name, namespace_name, container_name=None): +def kubernetes_exec( + commands: List[str], api: Any, pod_name: str, namespace_name: str, container_name: Optional[str] = None +) -> None: logger.info('Connecting to "%s" pod from "%s" namespace', pod_name, namespace_name) resp = kubernetes.stream.stream( api.connect_get_namespaced_pod_exec,