Skip to content

Commit

Permalink
fix(mypy): errors and setup.cfg misconfiguration (#19)
Browse files Browse the repository at this point in the history
  • Loading branch information
maxime1907 authored Dec 27, 2022
1 parent 3134868 commit 09016a5
Show file tree
Hide file tree
Showing 18 changed files with 156 additions and 95 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# wiremind-kubernetes

## v7.0.1 (2022-12-27)
### Fix
- setup.cfg correct mypy and flake8 config
- mypy errors

## v7.0.0 (2022-09-27)
### BREAKING CHANGE
- stop_pods: neutralize the HPA as `HPAScaleToZero` may be in use (HPA may scale up the Deployment even if replicas=0), a more straightforward solution will
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
7.0.0
7.0.1
30 changes: 25 additions & 5 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,15 +1,35 @@
[options]
python_requires = >= 3.7

[flake8]
max-line-length = 120
# W503: line break before binary operator. Black causes this error but seems to be right
# E231 missing whitespace after ','. Black causes this error but seems to be right (see pep8)
ignore = W503, E231
# Enable flake8-mutable
enable-extensions = M511
# W503 line break before binary operator. Black causes this error
# E203 whitespace before ':'
# E231 missing whitespace after ','. Black causes this error but seems to be right (see pep8)
# E501 line too long
# Q000 Double quotes found but single quotes preferred
ignore = W503, E203, E231, E501, Q000
jobs = 4

[mypy]
python_version = 3.7
ignore_missing_imports = True
check_untyped_defs = True
disallow_untyped_defs = True
disallow_incomplete_defs = True
warn_redundant_casts = True
warn_unused_ignores = True
warn_unused_configs = True
no_implicit_optional = True
strict_optional = True
show_error_codes = True
files = src

[tool:pytest]
log_cli = True
log_level=INFO
# Deterministic ordering for tests; useful for pytest-xdist.
env =
PYTHONHASHSEED=0
filterwarnings =
ignore::pytest.PytestUnknownMarkWarning
9 changes: 7 additions & 2 deletions src/wiremind_kubernetes/exceptions.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
from typing import Optional


class WiremindKubernetesException(Exception):
"""
Base wiremind-kubernetes Exception.
"""

def __init__(self, message=None):
def __init__(self, message: Optional[str] = None):
super().__init__()
if message:
self.message = message
Expand All @@ -14,11 +17,13 @@ class ExecError(WiremindKubernetesException):
An error occured while executing kubernetes command.
"""

def __init__(self):
def __init__(self) -> None:
super().__init__(message="An error occured while executing kubernetes command.")


class PodNotFound(WiremindKubernetesException):
"""
A required pod was not found.
"""

pass
10 changes: 6 additions & 4 deletions src/wiremind_kubernetes/kube_config.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,25 @@
import logging
import os
from typing import Optional

import kubernetes


logger = logging.getLogger(__name__)


def _load_kubeconfig(config_file=None, context=None):
def _load_kubeconfig(config_file: Optional[str] = None, context: Optional[str] = None) -> None:
kubernetes.config.load_kube_config(config_file=config_file, context=context)
logger.debug("Kubernetes configuration successfully set.")


def _load_incluster_config():
def _load_incluster_config() -> None:
kubernetes.config.load_incluster_config()
logger.debug("Kubernetes configuration successfully set.")


def load_kubernetes_config(use_kubeconfig=None, config_file=None, context=None):
def load_kubernetes_config(
use_kubeconfig: Optional[bool] = None, config_file: Optional[str] = None, context: Optional[str] = None
) -> None:
"""
Load kubernetes configuration in memory, either from incluster method or from kubeconfig.
:param use_kubeconfig:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class ClientWithArguments:
Currently add dry_run support for write functions and pretty to all.
"""

def __init__(self, client, dry_run: bool = False):
def __init__(self, client: Any, dry_run: bool = False):
self.client = client() # like kubernetes.client.CoreV1Api
self.read_additional_arguments: Dict[str, Any] = dict(pretty=True)
# Every request, either read or write, will have those arguments added
Expand All @@ -19,7 +19,7 @@ def __init__(self, client, dry_run: bool = False):
# Dry run, in kube API, is not true or false, but either dry_run: All or not defined.
self.additional_arguments["dry_run"] = "All"

def __getattr__(self, attr):
def __getattr__(self, attr: str) -> Any:
original_attr = getattr(self.client, attr)

if not callable(original_attr):
Expand All @@ -31,7 +31,7 @@ def __getattr__(self, attr):
is_write_function = True
break

def fn(*args, **kwargs):
def fn(*args: Any, **kwargs: Any) -> Any:
if is_write_function:
kwargs.update(self.additional_arguments)
else: # A read function
Expand All @@ -42,30 +42,30 @@ def fn(*args, **kwargs):


class CoreV1ApiWithArguments(ClientWithArguments):
def __init__(self, *args, dry_run: bool = False, **kwargs):
def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None:
super().__init__(client=kubernetes.client.CoreV1Api, dry_run=dry_run)


class AppV1ApiWithArguments(ClientWithArguments):
def __init__(self, *args, dry_run: bool = False, **kwargs):
def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None:
super().__init__(client=kubernetes.client.AppsV1Api, dry_run=dry_run)


class BatchV1ApiWithArguments(ClientWithArguments):
def __init__(self, *args, dry_run: bool = False, **kwargs):
def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None:
super().__init__(client=kubernetes.client.BatchV1Api, dry_run=dry_run)


class AutoscalingV1ApiWithArguments(ClientWithArguments):
def __init__(self, *args, dry_run: bool = False, **kwargs):
def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None:
super().__init__(client=kubernetes.client.AutoscalingV1Api, dry_run=dry_run)


class CustomObjectsApiWithArguments(ClientWithArguments):
def __init__(self, *args, dry_run: bool = False, **kwargs):
def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None:
super().__init__(client=kubernetes.client.CustomObjectsApi, dry_run=dry_run)


class RbacAuthorizationV1ApiWithArguments(ClientWithArguments):
def __init__(self, *args, dry_run: bool = False, **kwargs):
def __init__(self, *args: Any, dry_run: bool = False, **kwargs: Any) -> None:
super().__init__(client=kubernetes.client.RbacAuthorizationV1Api, dry_run=dry_run)
36 changes: 18 additions & 18 deletions src/wiremind_kubernetes/kubernetes_helper.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
import pprint
import time
from typing import Any, Dict, List, Optional, Union, Generator
from typing import Any, Dict, Generator, List, Optional, Union

import kubernetes

Expand Down Expand Up @@ -105,38 +105,38 @@ def __init__(
else:
self.namespace = _get_namespace_from_kube()

def get_deployment_scale(self, deployment_name: str):
def get_deployment_scale(self, deployment_name: str) -> kubernetes.client.V1Scale:
logger.debug("Getting deployment scale for %s", deployment_name)
return self.client_appsv1_api.read_namespaced_deployment_scale(deployment_name, self.namespace)

def get_statefulset_scale(self, statefulset_name: str):
def get_statefulset_scale(self, statefulset_name: str) -> kubernetes.client.V1Scale:
logger.debug("Getting statefulset scale for %s", statefulset_name)
return self.client_appsv1_api.read_namespaced_stateful_set_scale(statefulset_name, self.namespace)

def scale_down_statefulset(self, statefulset_name: str):
def scale_down_statefulset(self, statefulset_name: str) -> None:
body = self.get_statefulset_scale(statefulset_name)
logger.debug("Deleting all Pods for %s", statefulset_name)
body.spec.replicas = 0
self.client_appsv1_api.patch_namespaced_stateful_set_scale(statefulset_name, self.namespace, body)
logger.debug("Done deleting.")

@retry_kubernetes_request_no_ignore
def scale_down_deployment(self, deployment_name: str):
def scale_down_deployment(self, deployment_name: str) -> None:
body = self.get_deployment_scale(deployment_name)
logger.debug("Deleting all Pods for %s", deployment_name)
body.spec.replicas = 0
self.client_appsv1_api.patch_namespaced_deployment_scale(deployment_name, self.namespace, body)
logger.debug("Done deleting.")

def scale_up_statefulset(self, statefulset_name: str, pod_amount: int = 1):
def scale_up_statefulset(self, statefulset_name: str, pod_amount: int = 1) -> None:
body = self.get_statefulset_scale(statefulset_name)
logger.debug("Recreating backend Pods for %s", statefulset_name)
body.spec.replicas = pod_amount
self.client_appsv1_api.patch_namespaced_stateful_set_scale(statefulset_name, self.namespace, body)
logger.debug("Done recreating.")

@retry_kubernetes_request_no_ignore
def scale_up_deployment(self, deployment_name: str, pod_amount: int):
def scale_up_deployment(self, deployment_name: str, pod_amount: int) -> None:
body = self.get_deployment_scale(deployment_name)
logger.debug("Recreating backend Pods for %s", deployment_name)
body.spec.replicas = pod_amount
Expand Down Expand Up @@ -183,7 +183,7 @@ def is_deployment_stopped(self, deployment_name: str, statefulset: bool = False)
return False
return True

def is_deployment_ready(self, deployment_name: str, statefulset: bool = False):
def is_deployment_ready(self, deployment_name: str, statefulset: bool = False) -> bool:
if statefulset:
status = self.client_appsv1_api.read_namespaced_stateful_set_status(deployment_name, self.namespace)
else:
Expand All @@ -203,7 +203,7 @@ def is_deployment_ready(self, deployment_name: str, statefulset: bool = False):
return expected_replicas == ready_replicas

@retry_kubernetes_request
def getPodNameFromDeployment(self, deployment_name, namespace_name):
def getPodNameFromDeployment(self, deployment_name: str, namespace_name: str) -> str:
"""
From a given deployment, get the first pod name
"""
Expand All @@ -225,7 +225,7 @@ def get_deployment_hpa(self, *, deployment_name: str) -> Generator:
if hpa.spec.scale_target_ref.kind == "Deployment" and hpa.spec.scale_target_ref.name == deployment_name:
yield hpa

def patch_deployment_hpa(self, *, hpa_name: str, body: Any):
def patch_deployment_hpa(self, *, hpa_name: str, body: Any) -> None:
self.client_autoscalingv1_api.patch_namespaced_horizontal_pod_autoscaler(
name=hpa_name, namespace=self.namespace, body=body
)
Expand All @@ -245,7 +245,7 @@ class KubernetesDeploymentManager(NamespacedKubernetesHelper):
a.start_pods()
"""

def __init__(self, release_name: str, **kwargs):
def __init__(self, release_name: str, **kwargs: Any):
self.release_name = release_name
super().__init__(**kwargs)

Expand Down Expand Up @@ -300,7 +300,7 @@ def _get_expected_deployment_scale_dict(self) -> Dict[int, Dict[str, int]]:
logger.debug("Deployments are %s", pprint.pformat(eds_dict))
return eds_dict

def start_pods(self):
def start_pods(self) -> None:
"""
Start all Pods that should be started
"""
Expand Down Expand Up @@ -335,19 +335,19 @@ def _are_deployments_stopped(self, deployment_dict: Dict[str, int]) -> bool:
return True

@retry_kubernetes_request
def disable_hpa(self, *, deployment_name: str):
def disable_hpa(self, *, deployment_name: str) -> None:
for hpa in self.get_deployment_hpa(deployment_name=deployment_name):
# Tell the hpa to manage a non-existing Deployment
hpa.spec.scale_target_ref.name = f"{HPA_ID_PREFIX}-{deployment_name}"
self.patch_deployment_hpa(hpa_name=hpa.metadata.name, body=hpa)

@retry_kubernetes_request
def re_enable_hpa(self, *, deployment_name: str):
def re_enable_hpa(self, *, deployment_name: str) -> None:
for hpa in self.get_deployment_hpa(deployment_name=f"{HPA_ID_PREFIX}-{deployment_name}"):
hpa.spec.scale_target_ref.name = deployment_name
self.patch_deployment_hpa(hpa_name=hpa.metadata.name, body=hpa)

def _stop_deployments(self, deployment_dict: Dict[str, int]):
def _stop_deployments(self, deployment_dict: Dict[str, int]) -> None:
"""
Scale down a dict (deployment_name, expected_scale) of Deployments.
"""
Expand All @@ -361,7 +361,7 @@ def _stop_deployments(self, deployment_dict: Dict[str, int]):
else:
raise Exception("Timed out waiting for pods to be deleted: aborting.")

def stop_pods(self):
def stop_pods(self) -> None:
"""
Scale to 0 all deployments for which an ExpectedDeploymentScale links to.
stop all deployments, then wait for actual stop, by priority (descending order):
Expand Down Expand Up @@ -446,14 +446,14 @@ def create_job(self, job_body: kubernetes.client.V1Job) -> kubernetes.client.V1J
except kubernetes.client.rest.ApiException as e:
print("Exception when calling BatchV1Api->create_namespaced_job: %s\n" % e)

def get_job(self, job_name) -> kubernetes.client.V1Job:
def get_job(self, job_name: str) -> kubernetes.client.V1Job:
"""
Get a job, concatenating release_name and job_name as job name.
"""
job_name = f"{self.release_name}-{job_name}"
return self.client_batchv1_api.read_namespaced_job(job_name, self.namespace)

def delete_job(self, job_name):
def delete_job(self, job_name: str) -> kubernetes.client.V1Status:
"""
Get a job, concatenating release_name and job_name as job name.
"""
Expand Down
18 changes: 10 additions & 8 deletions src/wiremind_kubernetes/tests/e2e_tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
import logging
import os
import time
from typing import Generator

import kubernetes
import pytest
from pytest_mock import MockerFixture

import wiremind_kubernetes
from wiremind_kubernetes.utils import run_command
from wiremind_kubernetes.tests.e2e_tests.helpers import check_not_using_wiremind_cluster
from wiremind_kubernetes.utils import run_command

E2E_CLUSTER_MANIFESTS = "tests/e2e_tests/manifests"
absolute_path = os.path.dirname(os.path.join(os.path.abspath(wiremind_kubernetes.__file__)))
Expand All @@ -19,23 +21,23 @@


@pytest.fixture(scope="function")
def k8s_client_request_function(mocker):
def k8s_client_request_function(mocker: MockerFixture) -> Generator:
yield mocker.spy(kubernetes.client.api_client.ApiClient, "request")


@pytest.fixture(scope="session", autouse=True)
def setUpE2E():
def setUpE2E() -> None:
check_not_using_wiremind_cluster()


def delete_namespace():
def delete_namespace() -> None:
run_command(
f"kubectl delete namespace {TEST_NAMESPACE} --wait --grace-period=1",
)


@pytest.fixture
def populate_cluster():
def populate_cluster() -> Generator[None, None, None]:
run_command(
f"kubectl apply -f {absolute_path}/../../CustomResourceDefinition-expecteddeploymentscales.yaml",
)
Expand Down Expand Up @@ -87,7 +89,7 @@ def populate_cluster():


@pytest.fixture
def create_namespace():
def create_namespace() -> Generator[None, None, None]:
run_command(
f"kubectl create namespace {TEST_NAMESPACE}",
)
Expand All @@ -105,7 +107,7 @@ def create_namespace():


@pytest.fixture
def concerned_dm():
def concerned_dm() -> wiremind_kubernetes.KubernetesDeploymentManager:
return wiremind_kubernetes.KubernetesDeploymentManager(
use_kubeconfig=True,
namespace=TEST_NAMESPACE,
Expand All @@ -114,7 +116,7 @@ def concerned_dm():


@pytest.fixture
def unconcerned_dm():
def unconcerned_dm() -> wiremind_kubernetes.KubernetesDeploymentManager:
return wiremind_kubernetes.KubernetesDeploymentManager(
use_kubeconfig=True,
namespace=TEST_NAMESPACE,
Expand Down
Loading

0 comments on commit 09016a5

Please sign in to comment.