diff --git a/.codespellignore b/.codespellignore new file mode 100644 index 00000000..ee3a8c8d --- /dev/null +++ b/.codespellignore @@ -0,0 +1,2 @@ +juju +charm diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..91ccd086 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,47 @@ +--- +name: Bug report +about: File a bug report +labels: bug + +--- + + + +## Steps to reproduce + +1. + +## Expected behavior + + +## Actual behavior + + + +## Versions + + +Operating system: + + +Juju CLI: + + +Juju agent: + + +Charm revision: + + +LXD: + +## Log output + + +Juju debug log: + + + + +## Additional context + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..7201d64f --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,38 @@ + + + +## ๐Ÿท Type of changes + +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to change) +- [ ] Documentation Update +- [ ] Tooling and CI changes +- [ ] Dependencies upgrade or change + +## ๐Ÿ“ Description + + +## ๐Ÿ“‘ Motivation and Context + + + +## ๐Ÿงช How Has This Been Tested? + + + + +## โœ… Checklist + + +- [ ] My code follows the code style of this project. +- [ ] My change requires a change to the documentation. +- [ ] I have updated the documentation accordingly. +- [ ] I have read the [**CONTRIBUTING**](../blob/main/CONTRIBUTING.md) document. +- [ ] I have added tests to cover my changes. +- [ ] All new and existing tests passed. diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..eddd1329 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,59 @@ +name: Run tests + +on: + pull_request: + push: + branches: + - main + - feat/** + schedule: + - cron: "53 0 * * *" # Daily at 00:53 UTC + workflow_dispatch: + +jobs: + pre-commit: + name: Run pre-commits + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Install tox & poetry + run: | + pipx install tox + pipx install poetry + - name: Set up python environment + uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: 'poetry' + - name: Install pre-commit + run: | + poetry install + poetry run pre-commit install + - name: Run pre-commit hooks + run: | + poetry run pre-commit run --all-files + + lint: + name: Lint + uses: canonical/data-platform-workflows/.github/workflows/lint.yaml@v21.0.0 + + unit-test: + name: Unit test charm + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Install tox & poetry + run: | + pipx install tox + pipx install poetry + - name: Set up python environment + uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: 'poetry' + - name: Run tests + run: tox run -e unit diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml new file mode 100644 index 00000000..405125e8 --- /dev/null +++ b/.github/workflows/publish.yaml @@ -0,0 +1,61 @@ +name: Test & Publish to PyPI + +on: + workflow_dispatch: + +jobs: + build: + name: Build package + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Install poetry + run: pipx install poetry + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + cache: 'poetry' + - name: Build package + run: poetry build + - name: Store the distribution packages + uses: actions/upload-artifact@v4 + with: + name: python-package-distributions + path: dist/ + + unit-test: + name: Test package + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Install poetry and tox + run: | + pipx install tox + pipx install poetry + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + cache: 'poetry' + - name: Run unit tests + run: tox run -e unit + + publish: + name: Publish to PyPI + needs: + - build + runs-on: ubuntu-latest + environment: production + steps: + - name: Download all the dists + uses: actions/download-artifact@v4 + with: + name: python-package-distributions + path: dist/ + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + permissions: + id-token: write # Needed for trusted publishing (https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/) diff --git a/.gitignore b/.gitignore index 82f92755..e2caa5a6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,7 @@ +# Charm specific +*.charm +*.snap + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -15,6 +19,7 @@ downloads/ eggs/ .eggs/ lib/ +!single_kernel_mongo/lib/ lib64/ parts/ sdist/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..900233c4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,57 @@ +default_install_hook_types: + - pre-commit + - commit-msg + - pre-push + - post-checkout + - post-merge + +default_stages: + - pre-commit + +repos: + - repo: meta + hooks: + - id: check-hooks-apply + + - repo: https://github.com/python-poetry/poetry + rev: 1.8.4 + hooks: + - id: poetry-check + - id: poetry-lock + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + - id: check-yaml + - id: check-added-large-files + - id: debug-statements + - id: check-toml + - id: detect-private-key + exclude: "tests/unit/data/key.pem" + - id: check-merge-conflict + - id: end-of-file-fixer + - id: mixed-line-ending + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.7.2 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + - id: ruff-format + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.13.0 + hooks: + - id: mypy + language: system + args: ["--config-file=pyproject.toml","."] + pass_filenames: false + + - repo: https://github.com/codespell-project/codespell + rev: v2.2.6 + hooks: + - id: codespell + additional_dependencies: [tomli] + args: ["--write-changes"] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..629f04f6 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,59 @@ +# Contributing + +## Overview + +This documents explains the processes and practices recommended for contributing enhancements to +this operator. + +- Generally, before developing enhancements to this charm, you should consider + [opening an issue](https://github.com/canonical/mongo-single-kernel-library/issues) explaining + your use case. +- If you would like to chat with us about your use-cases or proposed + implementation, you can reach us at [Canonical Mattermost public channel](https://chat.charmhub.io/charmhub/channels/charm-dev) or [Discourse](https://discourse.charmhub.io/). +- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library will help you a lot when working + on new features or bug fixes. +- All enhancements require review before being merged. Code review typically + examines + - code quality + - test coverage + - user experience for Juju administrators this charm. +- Please help us out in ensuring easy to review branches by rebasing your pull + request branch onto the `main` branch. This also avoids merge commits and + creates a linear Git commit history. + +## Developing + +Install `tox` and `poetry` + +Install pipx: [https://pipx.pypa.io/stable/installation/](https://pipx.pypa.io/stable/installation/) + +```shell +pipx install tox +pipx install poetry +``` + +You can create an environment for development: + +```shell +poetry install +``` + +### Testing + +```shell +tox run -e format # update your code according to linting rules +tox run -e lint # code style +tox run -e unit # unit tests +tox run -e integration # integration tests +tox # runs 'lint' and 'unit' environments +``` + +### `pre-commit` hooks + +This repository comes with a sensible [pre-commit](https://github.com/pre-commit/pre-commit) hook configuration. +Please install it with `pre-commit install` as this will be checked in the CI anyway. + +## Canonical Contributor Agreement + +Canonical welcomes contributions to the Charmed MySQL-Router Operator. Please +check out our [contributor agreement](https://ubuntu.com/legal/contributors) if you're interested in contributing to the solution. diff --git a/README.md b/README.md index 67f24ad4..79ef57c4 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,39 @@ -# mongo-single-kernel-library -Library containing shared code for MongoDB operators (mongodb, mongos, VM and k8s) +# Mongo Operators Single Kernel library + +Library containing shared code for MongoDB operators (mongodb, mongos, VM and k8s). + +The goal of this library is to provide reusable and shared code for the four +mongo charms: + +* [MongoDB VM](https://github.com/canonical/mongodb-operator/) +* [MongoDB Kubernetes](https://github.com/canonical/mongodb-k8s-operator/) +* [Mongos VM](https://github.com/canonical/mongos-operator/) +* [Mongos Kubernetes](https://github.com/canonical/mongos-k8s-operator/) + +## Code layout + +The source code can be found in [./single_kernel_mongo/](./single_kernel_mongo/) +The layout is organised as so: + +* [configurations](./single_kernel_mongo/config) +* [core services](./single_kernel_mongo/core/) +* [events handlers](./single_kernel_mongo/events/) +* [event managers](./single_kernel_mongo/managers/) +* [charm state](./single_kernel_mongo/state/) +* [charm workloads](./single_kernel_mongo/workload/) +* [utils and helpers](./single_kernel_mongo/utils/) +* [abstract charm skeleton](./single_kernel_mongo/abstract_charm.py) + +## Project and community + +Charmed Mongos K8s is an open source project that warmly welcomes community contributions, suggestions, fixes, and constructive feedback. + +* Check our [Code of Conduct](https://ubuntu.com/community/ethos/code-of-conduct) +* Raise software issues or feature requests on [GitHub](https://github.com/canonical/mongo-single-kernel-library/issues) +* Report security issues through [LaunchPad](https://wiki.ubuntu.com/DebuggingSecurity#How%20to%20File). +* Meet the community and chat with us on [Matrix](https://matrix.to/#/#charmhub-data-platform:ubuntu.com) +* [Contribute](https://github.com/canonical/mongo-single-kernel-library/blob/main/CONTRIBUTING.md) to the code + +## License + +The Mongo Single Library is free software, distributed under the Apache Software License, version 2.0. See [LICENSE](https://github.com/canonical/mongo-single-kernel-library/blob/main/LICENSE) for more information. diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..45446242 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1542 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.6.2.post1" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +files = [ + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "attrs" +version = "24.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, +] + +[package.extras] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] + +[[package]] +name = "certifi" +version = "2024.8.30" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "codespell" +version = "2.3.0" +description = "Codespell" +optional = false +python-versions = ">=3.8" +files = [ + {file = "codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1"}, + {file = "codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f"}, +] + +[package.extras] +dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"] +hard-encoding-detection = ["chardet"] +toml = ["tomli"] +types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coverage" +version = "7.6.7" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "coverage-7.6.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:108bb458827765d538abcbf8288599fee07d2743357bdd9b9dad456c287e121e"}, + {file = "coverage-7.6.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c973b2fe4dc445cb865ab369df7521df9c27bf40715c837a113edaa2aa9faf45"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c6b24007c4bcd0b19fac25763a7cac5035c735ae017e9a349b927cfc88f31c1"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acbb8af78f8f91b3b51f58f288c0994ba63c646bc1a8a22ad072e4e7e0a49f1c"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad32a981bcdedb8d2ace03b05e4fd8dace8901eec64a532b00b15217d3677dd2"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:34d23e28ccb26236718a3a78ba72744212aa383141961dd6825f6595005c8b06"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e25bacb53a8c7325e34d45dddd2f2fbae0dbc230d0e2642e264a64e17322a777"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af05bbba896c4472a29408455fe31b3797b4d8648ed0a2ccac03e074a77e2314"}, + {file = "coverage-7.6.7-cp310-cp310-win32.whl", hash = "sha256:796c9b107d11d2d69e1849b2dfe41730134b526a49d3acb98ca02f4985eeff7a"}, + {file = "coverage-7.6.7-cp310-cp310-win_amd64.whl", hash = "sha256:987a8e3da7da4eed10a20491cf790589a8e5e07656b6dc22d3814c4d88faf163"}, + {file = "coverage-7.6.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7e61b0e77ff4dddebb35a0e8bb5a68bf0f8b872407d8d9f0c726b65dfabe2469"}, + {file = "coverage-7.6.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1a5407a75ca4abc20d6252efeb238377a71ce7bda849c26c7a9bece8680a5d99"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df002e59f2d29e889c37abd0b9ee0d0e6e38c24f5f55d71ff0e09e3412a340ec"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:673184b3156cba06154825f25af33baa2671ddae6343f23175764e65a8c4c30b"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e69ad502f1a2243f739f5bd60565d14a278be58be4c137d90799f2c263e7049a"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:60dcf7605c50ea72a14490d0756daffef77a5be15ed1b9fea468b1c7bda1bc3b"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9c2eb378bebb2c8f65befcb5147877fc1c9fbc640fc0aad3add759b5df79d55d"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c0317288f032221d35fa4cbc35d9f4923ff0dfd176c79c9b356e8ef8ef2dff4"}, + {file = "coverage-7.6.7-cp311-cp311-win32.whl", hash = "sha256:951aade8297358f3618a6e0660dc74f6b52233c42089d28525749fc8267dccd2"}, + {file = "coverage-7.6.7-cp311-cp311-win_amd64.whl", hash = "sha256:5e444b8e88339a2a67ce07d41faabb1d60d1004820cee5a2c2b54e2d8e429a0f"}, + {file = "coverage-7.6.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f07ff574986bc3edb80e2c36391678a271d555f91fd1d332a1e0f4b5ea4b6ea9"}, + {file = "coverage-7.6.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:49ed5ee4109258973630c1f9d099c7e72c5c36605029f3a91fe9982c6076c82b"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3e8796434a8106b3ac025fd15417315d7a58ee3e600ad4dbcfddc3f4b14342c"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3b925300484a3294d1c70f6b2b810d6526f2929de954e5b6be2bf8caa1f12c1"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c42ec2c522e3ddd683dec5cdce8e62817afb648caedad9da725001fa530d354"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0266b62cbea568bd5e93a4da364d05de422110cbed5056d69339bd5af5685433"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e5f2a0f161d126ccc7038f1f3029184dbdf8f018230af17ef6fd6a707a5b881f"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c132b5a22821f9b143f87446805e13580b67c670a548b96da945a8f6b4f2efbb"}, + {file = "coverage-7.6.7-cp312-cp312-win32.whl", hash = "sha256:7c07de0d2a110f02af30883cd7dddbe704887617d5c27cf373362667445a4c76"}, + {file = "coverage-7.6.7-cp312-cp312-win_amd64.whl", hash = "sha256:fd49c01e5057a451c30c9b892948976f5d38f2cbd04dc556a82743ba8e27ed8c"}, + {file = "coverage-7.6.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:46f21663e358beae6b368429ffadf14ed0a329996248a847a4322fb2e35d64d3"}, + {file = "coverage-7.6.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:40cca284c7c310d622a1677f105e8507441d1bb7c226f41978ba7c86979609ab"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77256ad2345c29fe59ae861aa11cfc74579c88d4e8dbf121cbe46b8e32aec808"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87ea64b9fa52bf395272e54020537990a28078478167ade6c61da7ac04dc14bc"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d608a7808793e3615e54e9267519351c3ae204a6d85764d8337bd95993581a8"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdd94501d65adc5c24f8a1a0eda110452ba62b3f4aeaba01e021c1ed9cb8f34a"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:82c809a62e953867cf57e0548c2b8464207f5f3a6ff0e1e961683e79b89f2c55"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bb684694e99d0b791a43e9fc0fa58efc15ec357ac48d25b619f207c41f2fd384"}, + {file = "coverage-7.6.7-cp313-cp313-win32.whl", hash = "sha256:963e4a08cbb0af6623e61492c0ec4c0ec5c5cf74db5f6564f98248d27ee57d30"}, + {file = "coverage-7.6.7-cp313-cp313-win_amd64.whl", hash = "sha256:14045b8bfd5909196a90da145a37f9d335a5d988a83db34e80f41e965fb7cb42"}, + {file = "coverage-7.6.7-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f2c7a045eef561e9544359a0bf5784b44e55cefc7261a20e730baa9220c83413"}, + {file = "coverage-7.6.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dd4e4a49d9c72a38d18d641135d2fb0bdf7b726ca60a103836b3d00a1182acd"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c95e0fa3d1547cb6f021ab72f5c23402da2358beec0a8e6d19a368bd7b0fb37"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f63e21ed474edd23f7501f89b53280014436e383a14b9bd77a648366c81dce7b"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead9b9605c54d15be228687552916c89c9683c215370c4a44f1f217d2adcc34d"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0573f5cbf39114270842d01872952d301027d2d6e2d84013f30966313cadb529"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:e2c8e3384c12dfa19fa9a52f23eb091a8fad93b5b81a41b14c17c78e23dd1d8b"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:70a56a2ec1869e6e9fa69ef6b76b1a8a7ef709972b9cc473f9ce9d26b5997ce3"}, + {file = "coverage-7.6.7-cp313-cp313t-win32.whl", hash = "sha256:dbba8210f5067398b2c4d96b4e64d8fb943644d5eb70be0d989067c8ca40c0f8"}, + {file = "coverage-7.6.7-cp313-cp313t-win_amd64.whl", hash = "sha256:dfd14bcae0c94004baba5184d1c935ae0d1231b8409eb6c103a5fd75e8ecdc56"}, + {file = "coverage-7.6.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:37a15573f988b67f7348916077c6d8ad43adb75e478d0910957394df397d2874"}, + {file = "coverage-7.6.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b6cce5c76985f81da3769c52203ee94722cd5d5889731cd70d31fee939b74bf0"}, + {file = "coverage-7.6.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ab9763d291a17b527ac6fd11d1a9a9c358280adb320e9c2672a97af346ac2c"}, + {file = "coverage-7.6.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6cf96ceaa275f071f1bea3067f8fd43bec184a25a962c754024c973af871e1b7"}, + {file = "coverage-7.6.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aee9cf6b0134d6f932d219ce253ef0e624f4fa588ee64830fcba193269e4daa3"}, + {file = "coverage-7.6.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2bc3e45c16564cc72de09e37413262b9f99167803e5e48c6156bccdfb22c8327"}, + {file = "coverage-7.6.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:623e6965dcf4e28a3debaa6fcf4b99ee06d27218f46d43befe4db1c70841551c"}, + {file = "coverage-7.6.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:850cfd2d6fc26f8346f422920ac204e1d28814e32e3a58c19c91980fa74d8289"}, + {file = "coverage-7.6.7-cp39-cp39-win32.whl", hash = "sha256:c296263093f099da4f51b3dff1eff5d4959b527d4f2f419e16508c5da9e15e8c"}, + {file = "coverage-7.6.7-cp39-cp39-win_amd64.whl", hash = "sha256:90746521206c88bdb305a4bf3342b1b7316ab80f804d40c536fc7d329301ee13"}, + {file = "coverage-7.6.7-pp39.pp310-none-any.whl", hash = "sha256:0ddcb70b3a3a57581b450571b31cb774f23eb9519c2aaa6176d3a84c9fc57671"}, + {file = "coverage-7.6.7.tar.gz", hash = "sha256:d79d4826e41441c9a118ff045e4bccb9fdbdcb1d02413e7ea6eb5c87b5439d24"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "cryptography" +version = "43.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "dacite" +version = "1.8.1" +description = "Simple creation of data classes from dictionaries." +optional = false +python-versions = ">=3.6" +files = [ + {file = "dacite-1.8.1-py3-none-any.whl", hash = "sha256:cc31ad6fdea1f49962ea42db9421772afe01ac5442380d9a99fcf3d188c61afe"}, +] + +[package.extras] +dev = ["black", "coveralls", "mypy", "pre-commit", "pylint", "pytest (>=5)", "pytest-benchmark", "pytest-cov"] + +[[package]] +name = "distlib" +version = "0.3.9" +description = "Distribution utilities" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +description = "DNS toolkit" +optional = false +python-versions = ">=3.9" +files = [ + {file = "dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86"}, + {file = "dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1"}, +] + +[package.extras] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "hypercorn (>=0.16.0)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "quart-trio (>=0.11.0)", "sphinx (>=7.2.0)", "sphinx-rtd-theme (>=2.0.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=43)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=1.0.0)"] +idna = ["idna (>=3.7)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "factory-boy" +version = "3.3.1" +description = "A versatile test fixtures replacement based on thoughtbot's factory_bot for Ruby." +optional = false +python-versions = ">=3.8" +files = [ + {file = "factory_boy-3.3.1-py2.py3-none-any.whl", hash = "sha256:7b1113c49736e1e9995bc2a18f4dbf2c52cf0f841103517010b1d825712ce3ca"}, + {file = "factory_boy-3.3.1.tar.gz", hash = "sha256:8317aa5289cdfc45f9cae570feb07a6177316c82e34d14df3c2e1f22f26abef0"}, +] + +[package.dependencies] +Faker = ">=0.7.0" + +[package.extras] +dev = ["Django", "Pillow", "SQLAlchemy", "coverage", "flake8", "isort", "mongoengine", "mongomock", "mypy", "tox", "wheel (>=0.32.0)", "zest.releaser[recommended]"] +doc = ["Sphinx", "sphinx-rtd-theme", "sphinxcontrib-spelling"] + +[[package]] +name = "faker" +version = "33.0.0" +description = "Faker is a Python package that generates fake data for you." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Faker-33.0.0-py3-none-any.whl", hash = "sha256:68e5580cb6b4226710886e595eabc13127149d6e71e9d1db65506a7fbe2c7fce"}, + {file = "faker-33.0.0.tar.gz", hash = "sha256:9b01019c1ddaf2253ca2308c0472116e993f4ad8fc9905f82fa965e0c6f932e9"}, +] + +[package.dependencies] +python-dateutil = ">=2.4" +typing-extensions = "*" + +[[package]] +name = "filelock" +version = "3.16.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.27.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "identify" +version = "2.6.2" +description = "File identification library for Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "identify-2.6.2-py2.py3-none-any.whl", hash = "sha256:c097384259f49e372f4ea00a19719d95ae27dd5ff0fd77ad630aa891306b82f3"}, + {file = "identify-2.6.2.tar.gz", hash = "sha256:fab5c716c24d7a789775228823797296a2994b075fb6080ac83a102772a98cbd"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "jinja2" +version = "3.1.4" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jsonschema" +version = "4.23.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] + +[[package]] +name = "jsonschema-specifications" +version = "2024.10.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.9" +files = [ + {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, + {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "lightkube" +version = "0.15.5" +description = "Lightweight kubernetes client library" +optional = false +python-versions = "*" +files = [ + {file = "lightkube-0.15.5-py3-none-any.whl", hash = "sha256:0d93be743cbeae022d18a1d3fbb45d1df58f9a603ea0061237842658f68d93fd"}, + {file = "lightkube-0.15.5.tar.gz", hash = "sha256:5edbfd1aee83398374179f41f4897519e8f89dc9754c866d40bbdc68c49c033f"}, +] + +[package.dependencies] +httpx = ">=0.24.0" +lightkube-models = ">=1.15.12.0" +PyYAML = "*" + +[package.extras] +dev = ["pytest", "pytest-asyncio (<0.17.0)", "respx"] + +[[package]] +name = "lightkube-models" +version = "1.31.1.8" +description = "Models and Resources for lightkube module" +optional = false +python-versions = "*" +files = [ + {file = "lightkube-models-1.31.1.8.tar.gz", hash = "sha256:14fbfa990b4d3393fa4ac3e9e46d67514c4d659508e296b30f1a5d254eecc097"}, + {file = "lightkube_models-1.31.1.8-py3-none-any.whl", hash = "sha256:50c0e2dd2c125cd9b50e93269e2d212bcbec19f7b00de91aa66a5ec320772fae"}, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mongomock" +version = "4.3.0" +description = "Fake pymongo stub for testing simple MongoDB-dependent code" +optional = false +python-versions = "*" +files = [ + {file = "mongomock-4.3.0-py2.py3-none-any.whl", hash = "sha256:5ef86bd12fc8806c6e7af32f21266c61b6c4ba96096f85129852d1c4fec1327e"}, + {file = "mongomock-4.3.0.tar.gz", hash = "sha256:32667b79066fabc12d4f17f16a8fd7361b5f4435208b3ba32c226e52212a8c30"}, +] + +[package.dependencies] +packaging = "*" +pytz = "*" +sentinels = "*" + +[package.extras] +pyexecjs = ["pyexecjs"] +pymongo = ["pymongo"] + +[[package]] +name = "mypy" +version = "1.13.0" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "ops" +version = "2.15.0" +description = "The Python library behind great charms" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ops-2.15.0-py3-none-any.whl", hash = "sha256:8e47ab8a814301776b0ff42b32544ebdece7f1639168d2c86dc7a25930d2e493"}, + {file = "ops-2.15.0.tar.gz", hash = "sha256:f3bad7417e98e8f390523fad097702eed16e99b38a25e9fe856aad226474b057"}, +] + +[package.dependencies] +PyYAML = "==6.*" +websocket-client = "==1.*" + +[package.extras] +docs = ["canonical-sphinx-extensions", "furo", "linkify-it-py", "myst-parser", "pyspelling", "sphinx (==6.2.1)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-design", "sphinx-notfound-page", "sphinx-tabs", "sphinxcontrib-jquery", "sphinxext-opengraph"] + +[[package]] +name = "overrides" +version = "7.7.0" +description = "A decorator to automatically detect mismatch when overriding a method." +optional = false +python-versions = ">=3.6" +files = [ + {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, + {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, +] + +[[package]] +name = "packaging" +version = "24.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] + +[[package]] +name = "parameterized" +version = "0.9.0" +description = "Parameterized testing with any Python test framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"}, + {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"}, +] + +[package.extras] +dev = ["jinja2"] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "poetry-core" +version = "1.9.1" +description = "Poetry PEP 517 Build Backend" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "poetry_core-1.9.1-py3-none-any.whl", hash = "sha256:6f45dd3598e0de8d9b0367360253d4c5d4d0110c8f5c71120a14f0e0f116c1a0"}, + {file = "poetry_core-1.9.1.tar.gz", hash = "sha256:7a2d49214bf58b4f17f99d6891d947a9836c9899a67a5069f52d7b67217f61b8"}, +] + +[[package]] +name = "pre-commit" +version = "4.0.1" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +optional = false +python-versions = ">=3.9" +files = [ + {file = "pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878"}, + {file = "pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pydantic" +version = "2.9.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.23.4" +typing-extensions = [ + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] + +[[package]] +name = "pydantic-core" +version = "2.23.4" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-settings" +version = "2.6.1" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_settings-2.6.1-py3-none-any.whl", hash = "sha256:7fb0637c786a558d3103436278a7c4f1cfd29ba8973238a50c5bb9a55387da87"}, + {file = "pydantic_settings-2.6.1.tar.gz", hash = "sha256:e0f92546d8a9923cb8941689abf85d6601a8c19a23e97a34b2964a2e3f813ca0"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" + +[package.extras] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "pymongo" +version = "4.10.1" +description = "Python driver for MongoDB " +optional = false +python-versions = ">=3.8" +files = [ + {file = "pymongo-4.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e699aa68c4a7dea2ab5a27067f7d3e08555f8d2c0dc6a0c8c60cfd9ff2e6a4b1"}, + {file = "pymongo-4.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:70645abc714f06b4ad6b72d5bf73792eaad14e3a2cfe29c62a9c81ada69d9e4b"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae2fd94c9fe048c94838badcc6e992d033cb9473eb31e5710b3707cba5e8aee2"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ded27a4a5374dae03a92e084a60cdbcecd595306555bda553b833baf3fc4868"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ecc2455e3974a6c429687b395a0bc59636f2d6aedf5785098cf4e1f180f1c71"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920fee41f7d0259f5f72c1f1eb331bc26ffbdc952846f9bd8c3b119013bb52c"}, + {file = "pymongo-4.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0a15665b2d6cf364f4cd114d62452ce01d71abfbd9c564ba8c74dcd7bbd6822"}, + {file = "pymongo-4.10.1-cp310-cp310-win32.whl", hash = "sha256:29e1c323c28a4584b7095378ff046815e39ff82cdb8dc4cc6dfe3acf6f9ad1f8"}, + {file = "pymongo-4.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:88dc4aa45f8744ccfb45164aedb9a4179c93567bbd98a33109d7dc400b00eb08"}, + {file = "pymongo-4.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:57ee6becae534e6d47848c97f6a6dff69e3cce7c70648d6049bd586764febe59"}, + {file = "pymongo-4.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6f437a612f4d4f7aca1812311b1e84477145e950fdafe3285b687ab8c52541f3"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a970fd3117ab40a4001c3dad333bbf3c43687d90f35287a6237149b5ccae61d"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c4d0e7cd08ef9f8fbf2d15ba281ed55604368a32752e476250724c3ce36c72e"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca6f700cff6833de4872a4e738f43123db34400173558b558ae079b5535857a4"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cec237c305fcbeef75c0bcbe9d223d1e22a6e3ba1b53b2f0b79d3d29c742b45b"}, + {file = "pymongo-4.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3337804ea0394a06e916add4e5fac1c89902f1b6f33936074a12505cab4ff05"}, + {file = "pymongo-4.10.1-cp311-cp311-win32.whl", hash = "sha256:778ac646ce6ac1e469664062dfe9ae1f5c9961f7790682809f5ec3b8fda29d65"}, + {file = "pymongo-4.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:9df4ab5594fdd208dcba81be815fa8a8a5d8dedaf3b346cbf8b61c7296246a7a"}, + {file = "pymongo-4.10.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fbedc4617faa0edf423621bb0b3b8707836687161210d470e69a4184be9ca011"}, + {file = "pymongo-4.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7bd26b2aec8ceeb95a5d948d5cc0f62b0eb6d66f3f4230705c1e3d3d2c04ec76"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb104c3c2a78d9d85571c8ac90ec4f95bca9b297c6eee5ada71fabf1129e1674"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4924355245a9c79f77b5cda2db36e0f75ece5faf9f84d16014c0a297f6d66786"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11280809e5dacaef4971113f0b4ff4696ee94cfdb720019ff4fa4f9635138252"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5d55f2a82e5eb23795f724991cac2bffbb1c0f219c0ba3bf73a835f97f1bb2e"}, + {file = "pymongo-4.10.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e974ab16a60be71a8dfad4e5afccf8dd05d41c758060f5d5bda9a758605d9a5d"}, + {file = "pymongo-4.10.1-cp312-cp312-win32.whl", hash = "sha256:544890085d9641f271d4f7a47684450ed4a7344d6b72d5968bfae32203b1bb7c"}, + {file = "pymongo-4.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:dcc07b1277e8b4bf4d7382ca133850e323b7ab048b8353af496d050671c7ac52"}, + {file = "pymongo-4.10.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:90bc6912948dfc8c363f4ead54d54a02a15a7fee6cfafb36dc450fc8962d2cb7"}, + {file = "pymongo-4.10.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:594dd721b81f301f33e843453638e02d92f63c198358e5a0fa8b8d0b1218dabc"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0783e0c8e95397c84e9cf8ab092ab1e5dd7c769aec0ef3a5838ae7173b98dea0"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6fb6a72e88df46d1c1040fd32cd2d2c5e58722e5d3e31060a0393f04ad3283de"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e3a593333e20c87415420a4fb76c00b7aae49b6361d2e2205b6fece0563bf40"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72e2ace7456167c71cfeca7dcb47bd5dceda7db2231265b80fc625c5e8073186"}, + {file = "pymongo-4.10.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ad05eb9c97e4f589ed9e74a00fcaac0d443ccd14f38d1258eb4c39a35dd722b"}, + {file = "pymongo-4.10.1-cp313-cp313-win32.whl", hash = "sha256:ee4c86d8e6872a61f7888fc96577b0ea165eb3bdb0d841962b444fa36001e2bb"}, + {file = "pymongo-4.10.1-cp313-cp313-win_amd64.whl", hash = "sha256:45ee87a4e12337353242bc758accc7fb47a2f2d9ecc0382a61e64c8f01e86708"}, + {file = "pymongo-4.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:442ca247f53ad24870a01e80a71cd81b3f2318655fd9d66748ee2bd1b1569d9e"}, + {file = "pymongo-4.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23e1d62df5592518204943b507be7b457fb8a4ad95a349440406fd42db5d0923"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6131bc6568b26e7495a9f3ef2b1700566b76bbecd919f4472bfe90038a61f425"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdeba88c540c9ed0338c0b2062d9f81af42b18d6646b3e6dda05cf6edd46ada9"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15a624d752dd3c89d10deb0ef6431559b6d074703cab90a70bb849ece02adc6b"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba164e73fdade9b4614a2497321c5b7512ddf749ed508950bdecc28d8d76a2d9"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9235fa319993405ae5505bf1333366388add2e06848db7b3deee8f990b69808e"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e4a65567bd17d19f03157c7ec992c6530eafd8191a4e5ede25566792c4fe3fa2"}, + {file = "pymongo-4.10.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f1945d48fb9b8a87d515da07f37e5b2c35b364a435f534c122e92747881f4a7c"}, + {file = "pymongo-4.10.1-cp38-cp38-win32.whl", hash = "sha256:345f8d340802ebce509f49d5833cc913da40c82f2e0daf9f60149cacc9ca680f"}, + {file = "pymongo-4.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:3a70d5efdc0387ac8cd50f9a5f379648ecfc322d14ec9e1ba8ec957e5d08c372"}, + {file = "pymongo-4.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15b1492cc5c7cd260229590be7218261e81684b8da6d6de2660cf743445500ce"}, + {file = "pymongo-4.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:95207503c41b97e7ecc7e596d84a61f441b4935f11aa8332828a754e7ada8c82"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb99f003c720c6d83be02c8f1a7787c22384a8ca9a4181e406174db47a048619"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2bc1ee4b1ca2c4e7e6b7a5e892126335ec8d9215bcd3ac2fe075870fefc3358"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93a0833c10a967effcd823b4e7445ec491f0bf6da5de0ca33629c0528f42b748"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f56707497323150bd2ed5d63067f4ffce940d0549d4ea2dfae180deec7f9363"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:409ab7d6c4223e5c85881697f365239dd3ed1b58f28e4124b846d9d488c86880"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dac78a650dc0637d610905fd06b5fa6419ae9028cf4d04d6a2657bc18a66bbce"}, + {file = "pymongo-4.10.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1ec3fa88b541e0481aff3c35194c9fac96e4d57ec5d1c122376000eb28c01431"}, + {file = "pymongo-4.10.1-cp39-cp39-win32.whl", hash = "sha256:e0e961923a7b8a1c801c43552dcb8153e45afa41749d9efbd3a6d33f45489f7a"}, + {file = "pymongo-4.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:dabe8bf1ad644e6b93f3acf90ff18536d94538ca4d27e583c6db49889e98e48f"}, + {file = "pymongo-4.10.1.tar.gz", hash = "sha256:a9de02be53b6bb98efe0b9eda84ffa1ec027fcb23a2de62c4f941d9a2f2f3330"}, +] + +[package.dependencies] +dnspython = ">=1.16.0,<3.0.0" + +[package.extras] +aws = ["pymongo-auth-aws (>=1.1.0,<2.0.0)"] +docs = ["furo (==2023.9.10)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-autobuild (>=2020.9.1)", "sphinx-rtd-theme (>=2,<3)", "sphinxcontrib-shellcheck (>=1,<2)"] +encryption = ["certifi", "pymongo-auth-aws (>=1.1.0,<2.0.0)", "pymongocrypt (>=1.10.0,<2.0.0)"] +gssapi = ["pykerberos", "winkerberos (>=0.5.0)"] +ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] +snappy = ["python-snappy"] +test = ["pytest (>=8.2)", "pytest-asyncio (>=0.24.0)"] +zstd = ["zstandard"] + +[[package]] +name = "pyopenssl" +version = "24.2.1" +description = "Python wrapper module around the OpenSSL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyOpenSSL-24.2.1-py3-none-any.whl", hash = "sha256:967d5719b12b243588573f39b0c677637145c7a1ffedcd495a487e58177fbb8d"}, + {file = "pyopenssl-24.2.1.tar.gz", hash = "sha256:4247f0dbe3748d560dcbb2ff3ea01af0f9a1a001ef5f7c4c647956ed8cbf0e95"}, +] + +[package.dependencies] +cryptography = ">=41.0.5,<44" + +[package.extras] +docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"] +test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"] + +[[package]] +name = "pytest" +version = "8.3.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-mock" +version = "3.14.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pytz" +version = "2024.2" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "referencing" +version = "0.35.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, + {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "rpds-py" +version = "0.21.0" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.9" +files = [ + {file = "rpds_py-0.21.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a017f813f24b9df929674d0332a374d40d7f0162b326562daae8066b502d0590"}, + {file = "rpds_py-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:20cc1ed0bcc86d8e1a7e968cce15be45178fd16e2ff656a243145e0b439bd250"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad116dda078d0bc4886cb7840e19811562acdc7a8e296ea6ec37e70326c1b41c"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:808f1ac7cf3b44f81c9475475ceb221f982ef548e44e024ad5f9e7060649540e"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de552f4a1916e520f2703ec474d2b4d3f86d41f353e7680b597512ffe7eac5d0"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efec946f331349dfc4ae9d0e034c263ddde19414fe5128580f512619abed05f1"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b80b4690bbff51a034bfde9c9f6bf9357f0a8c61f548942b80f7b66356508bf5"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:085ed25baac88953d4283e5b5bd094b155075bb40d07c29c4f073e10623f9f2e"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:daa8efac2a1273eed2354397a51216ae1e198ecbce9036fba4e7610b308b6153"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:95a5bad1ac8a5c77b4e658671642e4af3707f095d2b78a1fdd08af0dfb647624"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3e53861b29a13d5b70116ea4230b5f0f3547b2c222c5daa090eb7c9c82d7f664"}, + {file = "rpds_py-0.21.0-cp310-none-win32.whl", hash = "sha256:ea3a6ac4d74820c98fcc9da4a57847ad2cc36475a8bd9683f32ab6d47a2bd682"}, + {file = "rpds_py-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:b8f107395f2f1d151181880b69a2869c69e87ec079c49c0016ab96860b6acbe5"}, + {file = "rpds_py-0.21.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5555db3e618a77034954b9dc547eae94166391a98eb867905ec8fcbce1308d95"}, + {file = "rpds_py-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:97ef67d9bbc3e15584c2f3c74bcf064af36336c10d2e21a2131e123ce0f924c9"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ab2c2a26d2f69cdf833174f4d9d86118edc781ad9a8fa13970b527bf8236027"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4e8921a259f54bfbc755c5bbd60c82bb2339ae0324163f32868f63f0ebb873d9"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a7ff941004d74d55a47f916afc38494bd1cfd4b53c482b77c03147c91ac0ac3"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5145282a7cd2ac16ea0dc46b82167754d5e103a05614b724457cffe614f25bd8"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de609a6f1b682f70bb7163da745ee815d8f230d97276db049ab447767466a09d"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40c91c6e34cf016fa8e6b59d75e3dbe354830777fcfd74c58b279dceb7975b75"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d2132377f9deef0c4db89e65e8bb28644ff75a18df5293e132a8d67748397b9f"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0a9e0759e7be10109645a9fddaaad0619d58c9bf30a3f248a2ea57a7c417173a"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e20da3957bdf7824afdd4b6eeb29510e83e026473e04952dca565170cd1ecc8"}, + {file = "rpds_py-0.21.0-cp311-none-win32.whl", hash = "sha256:f71009b0d5e94c0e86533c0b27ed7cacc1239cb51c178fd239c3cfefefb0400a"}, + {file = "rpds_py-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:e168afe6bf6ab7ab46c8c375606298784ecbe3ba31c0980b7dcbb9631dcba97e"}, + {file = "rpds_py-0.21.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:30b912c965b2aa76ba5168fd610087bad7fcde47f0a8367ee8f1876086ee6d1d"}, + {file = "rpds_py-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca9989d5d9b1b300bc18e1801c67b9f6d2c66b8fd9621b36072ed1df2c977f72"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f54e7106f0001244a5f4cf810ba8d3f9c542e2730821b16e969d6887b664266"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fed5dfefdf384d6fe975cc026886aece4f292feaf69d0eeb716cfd3c5a4dd8be"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590ef88db231c9c1eece44dcfefd7515d8bf0d986d64d0caf06a81998a9e8cab"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f983e4c2f603c95dde63df633eec42955508eefd8d0f0e6d236d31a044c882d7"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b229ce052ddf1a01c67d68166c19cb004fb3612424921b81c46e7ea7ccf7c3bf"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ebf64e281a06c904a7636781d2e973d1f0926a5b8b480ac658dc0f556e7779f4"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:998a8080c4495e4f72132f3d66ff91f5997d799e86cec6ee05342f8f3cda7dca"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:98486337f7b4f3c324ab402e83453e25bb844f44418c066623db88e4c56b7c7b"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a78d8b634c9df7f8d175451cfeac3810a702ccb85f98ec95797fa98b942cea11"}, + {file = "rpds_py-0.21.0-cp312-none-win32.whl", hash = "sha256:a58ce66847711c4aa2ecfcfaff04cb0327f907fead8945ffc47d9407f41ff952"}, + {file = "rpds_py-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:e860f065cc4ea6f256d6f411aba4b1251255366e48e972f8a347cf88077b24fd"}, + {file = "rpds_py-0.21.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ee4eafd77cc98d355a0d02f263efc0d3ae3ce4a7c24740010a8b4012bbb24937"}, + {file = "rpds_py-0.21.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:688c93b77e468d72579351a84b95f976bd7b3e84aa6686be6497045ba84be560"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c38dbf31c57032667dd5a2f0568ccde66e868e8f78d5a0d27dcc56d70f3fcd3b"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d6129137f43f7fa02d41542ffff4871d4aefa724a5fe38e2c31a4e0fd343fb0"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520ed8b99b0bf86a176271f6fe23024323862ac674b1ce5b02a72bfeff3fff44"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaeb25ccfb9b9014a10eaf70904ebf3f79faaa8e60e99e19eef9f478651b9b74"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af04ac89c738e0f0f1b913918024c3eab6e3ace989518ea838807177d38a2e94"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9b76e2afd585803c53c5b29e992ecd183f68285b62fe2668383a18e74abe7a3"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5afb5efde74c54724e1a01118c6e5c15e54e642c42a1ba588ab1f03544ac8c7a"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:52c041802a6efa625ea18027a0723676a778869481d16803481ef6cc02ea8cb3"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ee1e4fc267b437bb89990b2f2abf6c25765b89b72dd4a11e21934df449e0c976"}, + {file = "rpds_py-0.21.0-cp313-none-win32.whl", hash = "sha256:0c025820b78817db6a76413fff6866790786c38f95ea3f3d3c93dbb73b632202"}, + {file = "rpds_py-0.21.0-cp313-none-win_amd64.whl", hash = "sha256:320c808df533695326610a1b6a0a6e98f033e49de55d7dc36a13c8a30cfa756e"}, + {file = "rpds_py-0.21.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2c51d99c30091f72a3c5d126fad26236c3f75716b8b5e5cf8effb18889ced928"}, + {file = "rpds_py-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbd7504a10b0955ea287114f003b7ad62330c9e65ba012c6223dba646f6ffd05"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dcc4949be728ede49e6244eabd04064336012b37f5c2200e8ec8eb2988b209c"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f414da5c51bf350e4b7960644617c130140423882305f7574b6cf65a3081cecb"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9afe42102b40007f588666bc7de82451e10c6788f6f70984629db193849dced1"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b929c2bb6e29ab31f12a1117c39f7e6d6450419ab7464a4ea9b0b417174f044"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8404b3717da03cbf773a1d275d01fec84ea007754ed380f63dfc24fb76ce4592"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e12bb09678f38b7597b8346983d2323a6482dcd59e423d9448108c1be37cac9d"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:58a0e345be4b18e6b8501d3b0aa540dad90caeed814c515e5206bb2ec26736fd"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c3761f62fcfccf0864cc4665b6e7c3f0c626f0380b41b8bd1ce322103fa3ef87"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c2b2f71c6ad6c2e4fc9ed9401080badd1469fa9889657ec3abea42a3d6b2e1ed"}, + {file = "rpds_py-0.21.0-cp39-none-win32.whl", hash = "sha256:b21747f79f360e790525e6f6438c7569ddbfb1b3197b9e65043f25c3c9b489d8"}, + {file = "rpds_py-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:0626238a43152918f9e72ede9a3b6ccc9e299adc8ade0d67c5e142d564c9a83d"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6b4ef7725386dc0762857097f6b7266a6cdd62bfd209664da6712cb26acef035"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6bc0e697d4d79ab1aacbf20ee5f0df80359ecf55db33ff41481cf3e24f206919"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da52d62a96e61c1c444f3998c434e8b263c384f6d68aca8274d2e08d1906325c"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:98e4fe5db40db87ce1c65031463a760ec7906ab230ad2249b4572c2fc3ef1f9f"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30bdc973f10d28e0337f71d202ff29345320f8bc49a31c90e6c257e1ccef4333"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:faa5e8496c530f9c71f2b4e1c49758b06e5f4055e17144906245c99fa6d45356"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32eb88c30b6a4f0605508023b7141d043a79b14acb3b969aa0b4f99b25bc7d4a"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a89a8ce9e4e75aeb7fa5d8ad0f3fecdee813802592f4f46a15754dcb2fd6b061"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:241e6c125568493f553c3d0fdbb38c74babf54b45cef86439d4cd97ff8feb34d"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:3b766a9f57663396e4f34f5140b3595b233a7b146e94777b97a8413a1da1be18"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:af4a644bf890f56e41e74be7d34e9511e4954894d544ec6b8efe1e21a1a8da6c"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3e30a69a706e8ea20444b98a49f386c17b26f860aa9245329bab0851ed100677"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:031819f906bb146561af051c7cef4ba2003d28cff07efacef59da973ff7969ba"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b876f2bc27ab5954e2fd88890c071bd0ed18b9c50f6ec3de3c50a5ece612f7a6"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc5695c321e518d9f03b7ea6abb5ea3af4567766f9852ad1560f501b17588c7b"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b4de1da871b5c0fd5537b26a6fc6814c3cc05cabe0c941db6e9044ffbb12f04a"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:878f6fea96621fda5303a2867887686d7a198d9e0f8a40be100a63f5d60c88c9"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8eeec67590e94189f434c6d11c426892e396ae59e4801d17a93ac96b8c02a6c"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff2eba7f6c0cb523d7e9cff0903f2fe1feff8f0b2ceb6bd71c0e20a4dcee271"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a429b99337062877d7875e4ff1a51fe788424d522bd64a8c0a20ef3021fdb6ed"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:d167e4dbbdac48bd58893c7e446684ad5d425b407f9336e04ab52e8b9194e2ed"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eb2de8a147ffe0626bfdc275fc6563aa7bf4b6db59cf0d44f0ccd6ca625a24e"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e78868e98f34f34a88e23ee9ccaeeec460e4eaf6db16d51d7a9b883e5e785a5e"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4991ca61656e3160cdaca4851151fd3f4a92e9eba5c7a530ab030d6aee96ec89"}, + {file = "rpds_py-0.21.0.tar.gz", hash = "sha256:ed6378c9d66d0de903763e7706383d60c33829581f0adff47b6535f1802fa6db"}, +] + +[[package]] +name = "ruff" +version = "0.7.4" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"}, + {file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"}, + {file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"}, + {file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"}, + {file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"}, + {file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"}, + {file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"}, +] + +[[package]] +name = "sentinels" +version = "1.0.0" +description = "Various objects to denote special meanings in python" +optional = false +python-versions = "*" +files = [ + {file = "sentinels-1.0.0.tar.gz", hash = "sha256:7be0704d7fe1925e397e92d18669ace2f619c92b5d4eb21a89f31e026f9ff4b1"}, +] + +[[package]] +name = "shellcheck-py" +version = "0.10.0.1" +description = "Python wrapper around invoking shellcheck (https://www.shellcheck.net/)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "shellcheck_py-0.10.0.1-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:48f08965cafbb3363b265c4ef40628ffced19cb6fc7c4bb5ce72d32cbcfb4bb9"}, + {file = "shellcheck_py-0.10.0.1-py2.py3-none-macosx_14_0_arm64.whl", hash = "sha256:8f3bf12ee6d0845dd5ac1a7bac8c4b1fec0379e115950986883c9488af40ada7"}, + {file = "shellcheck_py-0.10.0.1-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1c266f7f54cd286057c592ead3095f93d123acdcabf048879a7d8900c3aac7b"}, + {file = "shellcheck_py-0.10.0.1-py2.py3-none-win_amd64.whl", hash = "sha256:be73a16931c05f79643ff74b6519d1e1203b394583ab8c68a48a8e7f257d1090"}, + {file = "shellcheck_py-0.10.0.1.tar.gz", hash = "sha256:390826b340b8c19173922b0da5ef7b66ef34d4d087dc48aad3e01f7e77e164d9"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "tenacity" +version = "8.5.0" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, + {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, +] + +[package.extras] +doc = ["reno", "sphinx"] +test = ["pytest", "tornado (>=4.5)", "typeguard"] + +[[package]] +name = "tomli" +version = "2.1.0" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20240917" +description = "Typing stubs for PyYAML" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"}, + {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "virtualenv" +version = "20.27.1" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.8" +files = [ + {file = "virtualenv-20.27.1-py3-none-any.whl", hash = "sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4"}, + {file = "virtualenv-20.27.1.tar.gz", hash = "sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + +[[package]] +name = "websocket-client" +version = "1.8.0" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, +] + +[package.extras] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "1b84a77c4cac5063b4fdb8c9d7a4faf009597457f6bd424c1af50030d6a44e6a" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..25050a58 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,167 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +[tool.poetry] +name = "mongo-single-kernel-library" +version = "0.0.1" +description = "Shared and reusable code for Mongo-related charms" +authors = ["Neha Oudin "] +readme = "README.md" +license = "Apache-2.0" +package-mode = true +packages = [ + {include = "single_kernel_mongo"}, +] +include = [".single_kernel_mongo/templates/"] +homepage = "https://github.com/canonical/mongo-single-kernel-library" +repository = "https://github.com/canonical/mongo-single-kernel-library" +classifiers = [ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "Operating System :: POSIX :: Linux", +] + +[tool.poetry.urls] +"Bug Tracker" = "https://github.com/canonical/mongo-single-kernel-library/issues" +"Matrix" = "https://matrix.to/#/#charmhub-data-platform:ubuntu.com" +"Contribute" = "https://github.com/canonical/mongo-single-kernel-library/blob/main/CONTRIBUTING.md" + +[tool.poetry.dependencies] +python = "^3.10" +poetry-core = "^1.9.0" +ops = "~2.15.0" +overrides = "^7.7.0" +pydantic = "~2.9.0" +pydantic-settings = "*" +pyOpenSSL = "^24.2.1" +pyyaml = "^6.0.1" +tenacity = "^8.2.3" +pymongo = "*" +jinja2 = "*" +lightkube = "*" +dacite = "^1.8.0" + + +[build-system] +requires = ["poetry-core>=1.9.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry.group.charm-libs.dependencies] +cryptography = "^43.0.1" # tls_certificates lib v3 +jsonschema = "^4.22.0" # tls_certificates lib v3 +ops = "~2.15.0" +pydantic = "~2.9.0" +pydantic-settings = "*" +pyOpenSSL = "^24.2.1" +pymongo = "*" + +[tool.poetry.group.dev.dependencies] +pre-commit = "^4.0.1" +ruff = "^0.7.2" +pydantic = "~2.9.0" +mypy = "*" +types-PyYAML = "*" + +[tool.poetry.group.format] +optional = true + +[tool.poetry.group.format.dependencies] +ruff = "^0.7.2" +pydantic = "~2.9.0" +mypy = "*" +types-PyYAML = "*" + +[tool.poetry.group.lint] +optional = true + +[tool.poetry.group.lint.dependencies] +ruff = "^0.7.2" +tomli = "*" +codespell = "^2.2.6" +shellcheck-py = "^0.10.0.1" +pydantic = "~2.9.0" +mypy = "*" +types-PyYAML = "*" + +[tool.poetry.group.unit.dependencies] +coverage = {extras = ["toml"], version = "^7.5.0"} +pytest = "^8.1.1" +parameterized = "^0.9.0" +factory_boy = "*" +mongomock = "^4.2.0.post1" +pytest-mock = "*" + +[tool.ruff] +target-version = "py310" +line-length = 100 +exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv", "single_kernel_mongo/lib"] + +[tool.ruff.lint] +select = ["F", "E", "W", "C", "N", "R", "D", "I001", "UP"] +# Ignore W503, E501 because using black creates errors with this +# Ignore D107 Missing docstring in __init__ +ignore = ["E501", "D107", "D417"] +fixable = ["ALL"] + +[tool.ruff.lint.pycodestyle] +max-doc-length = 99 + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.lint.mccabe] +max-complexity = 10 + +[tool.ruff.lint.flake8-copyright] +author = "Canonical Ltd." +notice-rgx = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s" + + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" + +[tool.ruff.lint.per-file-ignores] +# D100, D101, D102, D103: Ignore missing docstrings in tests +"tests/*" = ["D100","D101","D102","D103","D104"] + +[tool.coverage.run] +branch = true +omit = [ + "single_kernel_mongo/lib/charms/data_platform_libs/*", + "single_kernel_mongo/lib/charms/operator_libs_linux/*", + "single_kernel_mongo/state/abstract_state.py", + "single_kernel_mongo/config/audit_config.py", + "*__init__*" +] + +[tool.coverage.report] +show_missing = true + +[tool.pytest.ini_options] +minversion = "6.0" +log_cli_level = "INFO" +markers = ["unstable"] +filterwarnings = [ + "error:RuntimeWarning" +] + +[tool.mypy] +exclude = [ + "./build/", + "./dist/", + "./site/", + "./venv", + "./__pypackages__/", + "^single_kernel_mongo/lib/.*" +] +# https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-ignore-missing-imports +ignore_missing_imports = true +plugins = ["pydantic.mypy"] + +[tool.codespell] +ignore-words=".codespellignore" +skip="*/single_kernel_mongo/lib/*" diff --git a/single_kernel_mongo/__init__.py b/single_kernel_mongo/__init__.py new file mode 100644 index 00000000..5b103c9d --- /dev/null +++ b/single_kernel_mongo/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Mongo Operators Single Kernel. + +Reusable code for the four Mongo related charms: +* [configurations](./config) +* [core services](./core/) +* [events handlers](./events/) +* [event managers](./managers/) +* [charm state](./state/) +* [charm workloads](./workload/) +* [utils and helpers](./utils/) +* [abstract charm skeleton](./abstract_charm.py) +""" diff --git a/single_kernel_mongo/abstract_charm.py b/single_kernel_mongo/abstract_charm.py new file mode 100644 index 00000000..630ee4e4 --- /dev/null +++ b/single_kernel_mongo/abstract_charm.py @@ -0,0 +1,50 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Skeleton for the abstract charm.""" + +import logging +from typing import ClassVar, TypeVar + +from single_kernel_mongo.config.literals import Substrates +from single_kernel_mongo.core.structured_config import MongoConfigModel +from single_kernel_mongo.core.typed_charm import TypedCharmBase +from single_kernel_mongo.events.lifecycle import LifecycleEventsHandler +from single_kernel_mongo.managers.mongodb_operator import MongoDBOperator +from single_kernel_mongo.status import StatusManager + +T = TypeVar("T", bound=MongoConfigModel) + +logger = logging.getLogger(__name__) + + +class AbstractMongoCharm(TypedCharmBase[T]): + """An abstract mongo charm.""" + + config_type: type[T] + substrate: ClassVar[Substrates] + peer_rel_name: ClassVar[str] + name: ClassVar[str] + + def __init__(self, *args): + super().__init__(*args) + self.status_manager = StatusManager(self) + self.operator = MongoDBOperator(self) + self.workload = self.operator.workload + + self.framework.observe(getattr(self.on, "install"), self.on_install) + self.framework.observe(getattr(self.on, "leader_elected"), self.on_leader_elected) + + # Register the role events handler after the global ones so that they get the priority. + self.lifecycle = LifecycleEventsHandler(self.operator, self.peer_rel_name) + + def on_install(self, _): + """First install event handler.""" + if self.substrate == "vm": + self.status_manager.to_maintenance("installing MongoDB") + if not self.workload.install(): + self.status_manager.to_blocked("couldn't install MongoDB") + return + + def on_leader_elected(self, _): + """Set the role in the databag.""" + self.operator.state.app_peer_data.role = self.parsed_config.role diff --git a/single_kernel_mongo/config/__init__.py b/single_kernel_mongo/config/__init__.py new file mode 100644 index 00000000..9274fd5b --- /dev/null +++ b/single_kernel_mongo/config/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The configuration objects for all combinations.""" diff --git a/single_kernel_mongo/config/audit_config.py b/single_kernel_mongo/config/audit_config.py new file mode 100644 index 00000000..b565e25c --- /dev/null +++ b/single_kernel_mongo/config/audit_config.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The audit log configuration values.""" + +from dataclasses import dataclass + + +@dataclass(frozen=True) +class AuditLog: + """Audit log related configuration.""" + + format: str = "JSON" + destination: str = "file" diff --git a/single_kernel_mongo/config/literals.py b/single_kernel_mongo/config/literals.py new file mode 100644 index 00000000..f667c8b1 --- /dev/null +++ b/single_kernel_mongo/config/literals.py @@ -0,0 +1,92 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Literal string for the different charms.""" + +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Generic, Literal, TypeVar + +Substrates = Literal["vm", "k8s"] + +LOCALHOST = "127.0.0.1" + +CONTAINER = "mongod" + + +class CharmRole(str, Enum): + """Charm Role Name.""" + + MONGODB = "mongodb" + MONGOS = "mongos" + + +class Scope(str, Enum): + """Peer relations scope.""" + + APP = "app" + UNIT = "unit" + + +class MongoPorts(int, Enum): + """The default Mongo ports.""" + + MONGODB_PORT = 27017 + MONGOS_PORT = 27018 + + +class InternalUsers(str, Enum): + """The three allowed internal users.""" + + OPERATOR = "operator" + BACKUP = "backup" + MONITOR = "monitor" + + +SECRETS_APP = [f"{user}-password" for user in InternalUsers] + ["keyfile"] + + +@dataclass(frozen=True) +class Snap: + """The Snap related information.""" + + name: str = "charmed-mongodb" + channel: str = "6/edge" + revision: int = 123 + + +SNAP = Snap(channel="6/edge/use-snap-config-for-services", revision=124) + +T = TypeVar("T", bound=str | int) + + +@dataclass(frozen=True) +class WorkloadUser(Generic[T]): + """The system users for a workload.""" + + user: T + group: T + + +@dataclass(frozen=True) +class KubernetesUser(WorkloadUser[str]): + """The system user for kubernetes pods.""" + + user: str = "mongodb" + group: str = "mongodb" + + +@dataclass(frozen=True) +class VmUser(WorkloadUser[int]): + """The system users for vm workloads.""" + + user: int = 584788 + group: int = 0 + + +CRON_FILE = Path("/etc/cron.d/mongodb") +ENVIRONMENT_FILE = Path("/etc/environment") + +SECRETS_UNIT: list[str] = [] + +MAX_PASSWORD_LENGTH = 4096 diff --git a/single_kernel_mongo/config/logrotate_config.py b/single_kernel_mongo/config/logrotate_config.py new file mode 100644 index 00000000..4633896f --- /dev/null +++ b/single_kernel_mongo/config/logrotate_config.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Logrotate parameters.""" + +from dataclasses import dataclass +from importlib import resources as impresources +from importlib.abc import Traversable +from pathlib import Path + +from single_kernel_mongo import templates + +TEMPLATE_DIRECTORY = impresources.files(templates) + + +@dataclass(frozen=True) +class LogRotateConfig: + """The logrotate parameters and useful static configuration.""" + + max_log_size: str = "50M" + max_rotations_to_keep: int = 10 + log_rotate_template: Traversable = TEMPLATE_DIRECTORY / "logrotate.j2" + rendered_template: Path = Path("/etc/logrotate.d/mongodb") + log_status_dir: Path = Path("/var/lib/logrotate") diff --git a/single_kernel_mongo/config/mongo_paths.py b/single_kernel_mongo/config/mongo_paths.py new file mode 100644 index 00000000..effbe8ac --- /dev/null +++ b/single_kernel_mongo/config/mongo_paths.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Paths for Mongo charms.""" + +SNAP_NAME = "charmed-mongodb" + +VM_PATH = { + "mongod": { + "ENVIRONMENT": "/etc/environment", + "CONF": f"/var/snap/{SNAP_NAME}/current/etc/mongod", + "DATA": f"/var/snap/{SNAP_NAME}/common/var/lib/mongodb", + "LOGS": f"/var/snap/{SNAP_NAME}/common/var/log/mongodb", + "ETC": f"/var/snap/{SNAP_NAME}/current/etc", + "VAR": f"/var/snap/{SNAP_NAME}/common/var", + "BIN": "/snap/bin", + "SHELL": "/snap/bin/charmed-mongodb.mongosh", + "LICENSES": f"/snap/{SNAP_NAME}/current/licenses", + } +} +K8S_PATH = { + "mongod": { + "ENVIRONMENT": "/etc/environment", + "CONF": "/etc/mongod", + "DATA": "/var/lib/mongodb", + "LOGS": "var/log/mongodb", + "ETC": "/etc", + "VAR": "/var/", + "BIN": "/usr/bin/", + "SHELL": "/usr/bin/mongosh", + "LICENSES": "/licenses", + } +} diff --git a/single_kernel_mongo/config/relations.py b/single_kernel_mongo/config/relations.py new file mode 100644 index 00000000..dd515669 --- /dev/null +++ b/single_kernel_mongo/config/relations.py @@ -0,0 +1,39 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Configuration for MongoDB Charm.""" + +from enum import Enum + + +class RelationNames(str, Enum): + """The different relations.""" + + DATABASE = "database" + PEERS = "database-peers" + ROUTER_PEERS = "router-peers" + SHARDING = "sharding" + CONFIG_SERVER = "config-server" + CLUSTER = "cluster" + MONGOS_PROXY = "mongos_proxy" + UPGRADE_VERSION = "upgrade-version-a" + + +class Scopes(str, Enum): + """The two scopes.""" + + APP_SCOPE = "app" + UNIT_SCOPE = "unit" + + +class ExternalRequirerRelations(str, Enum): + """The relations we require externally.""" + + TLS = "certificates" + S3_CREDENTIALS = "s3-credentials" + + +class ExternalProviderRelations(str, Enum): + """The relations we provide to non mongo related charms.""" + + COS_AGENT = "cos-agent" diff --git a/single_kernel_mongo/config/roles.py b/single_kernel_mongo/config/roles.py new file mode 100644 index 00000000..66ae7c8e --- /dev/null +++ b/single_kernel_mongo/config/roles.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The different roles.""" + +from dataclasses import dataclass + +from single_kernel_mongo.config.literals import Substrates +from single_kernel_mongo.config.mongo_paths import K8S_PATH, VM_PATH + + +@dataclass +class Role: + """Defines a role for the charm.""" + + substrate: Substrates + paths: dict[str, str] + + +VM_MONGO = Role(substrate="vm", paths=VM_PATH["mongod"]) +K8S_MONGO = Role(substrate="k8s", paths=K8S_PATH["mongod"]) + +ROLES = {"vm": VM_MONGO, "k8s": K8S_MONGO} diff --git a/single_kernel_mongo/core/__init__.py b/single_kernel_mongo/core/__init__.py new file mode 100644 index 00000000..485fb0c5 --- /dev/null +++ b/single_kernel_mongo/core/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The core objects and code for mongo charms.""" diff --git a/single_kernel_mongo/core/exceptions.py b/single_kernel_mongo/core/exceptions.py new file mode 100644 index 00000000..7f35c1b4 --- /dev/null +++ b/single_kernel_mongo/core/exceptions.py @@ -0,0 +1,7 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""All exceptions definitions.""" + + +class AmbiguousConfigError(Exception): + """Raised when the config could correspond to a mongod config or mongos config.""" diff --git a/single_kernel_mongo/core/k8s_worload.py b/single_kernel_mongo/core/k8s_worload.py new file mode 100644 index 00000000..ebfee41a --- /dev/null +++ b/single_kernel_mongo/core/k8s_worload.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Kubernetes workload definition.""" + +from itertools import chain +from logging import getLogger +from pathlib import Path + +from ops import Container +from ops.pebble import ChangeError, ExecError +from typing_extensions import override + +from single_kernel_mongo.config.literals import KubernetesUser +from single_kernel_mongo.core.workload import WorkloadBase +from single_kernel_mongo.exceptions import WorkloadExecError, WorkloadServiceError + +logger = getLogger(__name__) + + +class KubernetesWorkload(WorkloadBase): + """Wrapper for performing common operations specific to the Kafka container.""" + + substrate = "k8s" + container: Container # We always have a container in a Kubernetes Workload + users = KubernetesUser() + + def __init__(self, container: Container | None) -> None: + if not container: + raise AttributeError("Container is required.") + + self.container = container + + @property + @override + def container_can_connect(self) -> bool: + return self.container.can_connect() + + @property + @override + def snap_present(self) -> bool: + return True + + @override + def install(self) -> bool: + return True + + @override + def start(self) -> None: + try: + self.container.add_layer(self.layer_name, self.layer, combine=True) + self.container.restart(self.service) + except ChangeError as e: + logger.exception(str(e)) + raise WorkloadServiceError(e.err) from e + + @override + def stop(self) -> None: + try: + self.container.stop(self.service) + except ChangeError as e: + logger.exception(str(e)) + raise WorkloadServiceError(e.err) from e + + @override + def restart(self) -> None: + try: + self.start() + except ChangeError as e: + logger.exception(str(e)) + raise WorkloadServiceError(e.err) from e + + @override + def mkdir(self, path: Path, make_parents: bool = False) -> None: + self.container.make_dir(path, make_parents=make_parents) + + @override + def read(self, path: Path) -> list[str]: + if not self.container.exists(path): + return [] + with self.container.pull(path) as f: + return f.read().split("\n") + + @override + def write(self, path: Path, content: str, mode: str = "w") -> None: + self.container.push( + path, + content, + make_dirs=True, + permissions=0o400, + user=self.users.user, + group=self.users.group, + ) + + @override + def delete(self, path: Path): + self.container.remove_path(path) + + @override + def copy_to_unit(self, src: Path, destination: Path): + license_file = self.container.pull(path=src) + destination.write_text(license_file.read()) + + @override + def get_env(self) -> dict[str, str]: + return ( + self.container.get_plan() + .to_dict() + .get("services", {}) + .get(self.service, {}) + .get("environment", {}) + ) + + @override + def update_env(self, parameters: chain[str]): + content = " ".join(parameters) + self._env = content + + @override + def exec( + self, + command: list[str], # type: ignore[override] + env: dict[str, str] | None = None, + working_dir: str | None = None, + ) -> str: + try: + process = self.container.exec( + command=command, + environment=env, + working_dir=working_dir, + combine_stderr=True, + ) + output, _ = process.wait_output() + return output + except ExecError as e: + logger.debug(e) + raise WorkloadExecError( + e.command, + e.exit_code, + e.stdout, + e.stderr, + ) from e + + @override + def run_bin_command( + self, + bin_keyword: str, + bin_args: list[str] = [], + environment: dict[str, str] = {}, + ) -> str: + command = [f"{self.paths.binaries_path}/{self.bin_cmd}", bin_keyword, *bin_args] + return self.exec(command=command, env=environment or None) + + @override + def active(self) -> bool: + if not self.container.can_connect(): + return False + + if self.service not in self.container.get_services(): + return False + + return self.container.get_service(self.service).is_running() + + @override + def setup_cron(self, lines: list[str]) -> None: + raise NotImplementedError("VM Specific.") diff --git a/single_kernel_mongo/core/operator.py b/single_kernel_mongo/core/operator.py new file mode 100644 index 00000000..bc66a0c1 --- /dev/null +++ b/single_kernel_mongo/core/operator.py @@ -0,0 +1,72 @@ +#!/usr/bin/python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Abstract Operator for Mongo Related Charms.""" + +from __future__ import annotations + +from abc import ABC +from typing import TYPE_CHECKING, ClassVar + +from ops.framework import Object +from ops.model import Unit + +from single_kernel_mongo.config.literals import CharmRole + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + + +class OperatorProtocol(ABC, Object): + """Protocol for a charm operator.""" + + charm: AbstractMongoCharm + name: ClassVar[CharmRole] + + def on_install(self) -> None: + """Handles the install event.""" + ... + + def on_start(self) -> None: + """Handles the start event.""" + ... + + def on_secret_changed(self, secret_label: str, secret_id: str) -> None: + """Handles the secret changed events.""" + + def on_config_changed(self) -> None: + """Handles the config changed events.""" + ... + + def on_storage_attached(self) -> None: + """Handles the storage attached events.""" + ... + + def on_storage_detaching(self) -> None: + """Handles the storage attached events.""" + ... + + def on_leader_elected(self) -> None: + """Handles the leader elected events.""" + ... + + def on_update_status(self) -> None: + """Handle the status update events.""" + ... + + def on_relation_joined(self) -> None: + """Handles the relation changed events.""" + ... + + def on_relation_changed(self) -> None: + """Handles the relation changed events.""" + ... + + def on_relation_departed(self, departing_unit: Unit | None) -> None: + """Handles the relation departed events.""" + ... + + def on_stop(self) -> None: + """Handles the stop event.""" + ... diff --git a/single_kernel_mongo/core/secrets.py b/single_kernel_mongo/core/secrets.py new file mode 100644 index 00000000..bd4a654a --- /dev/null +++ b/single_kernel_mongo/core/secrets.py @@ -0,0 +1,183 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Secrets related helper classes/functions.""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + +from ops import Secret, SecretInfo +from ops.charm import CharmBase +from ops.model import ModelError, SecretNotFoundError + +from single_kernel_mongo.config.literals import Scope +from single_kernel_mongo.exceptions import SecretAlreadyExistsError + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm +SECRET_DELETED_LABEL = "None" + +logger = logging.getLogger(__name__) + + +def generate_secret_label(app_name: str, scope: Scope) -> str: + """Generate unique group_mappings for secrets within a relation context. + + Defined as a standalone function, as the choice on secret labels definition belongs to the + Application Logic. To be kept separate from classes below, which are simply to provide a + (smart) abstraction layer above Juju Secrets. + """ + members = [app_name, scope.value] + return f"{'.'.join(members)}" + + +# Secret cache + + +class CachedSecret: + """Abstraction layer above direct Juju access with caching. + + The data structure is precisely re-using/simulating Juju Secrets behavior, while + also making sure not to fetch a secret multiple times within the same event scope. + """ + + def __init__(self, charm: CharmBase, label: str, secret_uri: str | None = None): + self._secret_meta: Secret | None = None + self._secret_content: dict = {} + self._secret_uri = secret_uri + self.label = label + self.charm = charm + + def add_secret(self, content: dict[str, str], scope: Scope) -> Secret: + """Create a new secret.""" + if self._secret_uri: + raise SecretAlreadyExistsError( + "Secret is already defined with uri %s", self._secret_uri + ) + + if scope == Scope.APP: + secret = self.charm.app.add_secret(content, label=self.label) + else: + secret = self.charm.unit.add_secret(content, label=self.label) + self._secret_uri = secret.id + self._secret_meta = secret + return self._secret_meta + + @property + def meta(self) -> Secret | None: + """Getting cached secret meta-information.""" + if self._secret_meta: + return self._secret_meta + + if not (self._secret_uri or self.label): + return None + + try: + self._secret_meta = self.charm.model.get_secret(label=self.label) + except SecretNotFoundError: + if self._secret_uri: + self._secret_meta = self.charm.model.get_secret( + id=self._secret_uri, label=self.label + ) + return self._secret_meta + + def get_content(self) -> dict[str, str]: + """Getting cached secret content.""" + if not self._secret_content: + if self.meta: + try: + self._secret_content = self.meta.get_content(refresh=True) + except (ValueError, ModelError) as err: + # https://bugs.launchpad.net/juju/+bug/2042596 + # Only triggered when 'refresh' is set + known_model_errors = [ + "ERROR either URI or label should be used for getting an owned secret but not both", + "ERROR secret owner cannot use --refresh", + ] + if isinstance(err, ModelError) and not any( + msg in str(err) for msg in known_model_errors + ): + raise + # Due to: ValueError: Secret owner cannot use refresh=True + self._secret_content = self.meta.get_content() + return self._secret_content + + def set_content(self, content: dict[str, str]) -> None: + """Setting cached secret content.""" + if self.meta: + self.meta.set_content(content) + self._secret_content = content + + def get_info(self) -> SecretInfo | None: + """Wrapper function for get the corresponding call on the Secret object if any.""" + if self.meta: + return self.meta.get_info() + return None + + +class SecretCache: + """A data structure storing CachedSecret objects.""" + + def __init__(self, charm: AbstractMongoCharm): + self.charm = charm + self._secrets: dict[str, CachedSecret] = {} + + def get(self, scope: Scope, uri: str | None = None) -> CachedSecret | None: + """Getting a secret from Juju Secret store or cache.""" + label = generate_secret_label(self.charm.app.name, scope) + if not self._secrets.get(label): + secret = CachedSecret(self.charm, label, uri) + if secret.meta: + self._secrets[label] = secret + return self._secrets.get(label) + + def get_for_key(self, scope: Scope, key: str, uri: str | None = None) -> str | None: + """Get this key in the secret.""" + secret = self.get(scope, uri) + if not secret: + return None + value = secret.get_content().get(key) + if value != SECRET_DELETED_LABEL: + return value + return None + + def add(self, content: dict[str, str], scope: Scope) -> CachedSecret: + """Adding a secret to Juju Secret.""" + label = generate_secret_label(self.charm.app.name, scope) + if self._secrets.get(label): + raise SecretAlreadyExistsError(f"Secret {label} already exists") + + secret = CachedSecret(self.charm, label) + secret.add_secret(content, scope) + self._secrets[label] = secret + return self._secrets[label] + + def set(self, key: str, content: str, scope: Scope) -> CachedSecret: + """Set or Add secret.""" + secret = self.get(scope) + if not secret: + return self.add({key: content}, scope) + secret_content = secret.get_content() + secret_content.update({key: content}) + secret.set_content(secret_content) + return secret + + def remove(self, scope: Scope, key: str) -> None: + """Removing a secret.""" + secret = self.get(scope) + + if not secret: + return + + content = secret.get_content() + + if not content.get(key) or content[key] == SECRET_DELETED_LABEL: + logger.error(f"Non-existing secret {scope}:{key} was attempted to be removed.") + return + + content[key] = SECRET_DELETED_LABEL + secret.set_content(content) + + +# END: Secret cache diff --git a/single_kernel_mongo/core/structured_config.py b/single_kernel_mongo/core/structured_config.py new file mode 100644 index 00000000..78b6b281 --- /dev/null +++ b/single_kernel_mongo/core/structured_config.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Structure configuration for the Mongo charms.""" + +from enum import Enum +from typing import Annotated, TypeVar + +from pydantic import BaseModel, ConfigDict, Field, PlainSerializer + +# Generic TypeVar for serializers +T = TypeVar("T") + +# Serialize enums as their str +SerializeLiteralAsStr = Annotated[ + T, + PlainSerializer(func=lambda v: str(v), return_type=str, when_used="always"), +] + + +class BaseConfigModel(BaseModel): + """Class to be used for defining the structured configuration options.""" + + def __getitem__(self, x): + """Return the item using the notation instance[key].""" + return getattr(self, x.replace("-", "_")) + + +# Useful enums +class MongoDBRoles(str, Enum): + """The different accepted roles for a charm.""" + + UNKNOWN = "" + REPLICATION = "replication" + CONFIG_SERVER = "config-server" + SHARD = "shard" + MONGOS = "mongos" + + +class ExposeExternalEnum(str, Enum): + """The possible values for the expose-external config value.""" + + NODEPORT = "nodeport" + NONE = "none" + + +# NewType for typing (ghost type) +class MongoConfigModel(BaseConfigModel): + """Default class for typing.""" + + expose_external: ExposeExternalEnum = ExposeExternalEnum.NONE + role: SerializeLiteralAsStr[MongoDBRoles] + auto_delete: bool = Field(default=False, alias="auto-delete") + + +# The config for MongoDB Charms +class MongoDBCharmConfig(MongoConfigModel): + """The structured configuration of a MongoDB charm.""" + + model_config = ConfigDict(use_enum_values=True, extra="allow") + + role: SerializeLiteralAsStr[MongoDBRoles] = Field(default=MongoDBRoles.REPLICATION) + + expose_external: ExposeExternalEnum = ExposeExternalEnum.NONE + + +# The config for Mongos Charms (unused in case of mongos VM) +class MongosCharmConfig(MongoConfigModel): + """The structured configuration of a Mongos charm.""" + + model_config = ConfigDict(use_enum_values=True, extra="allow") + + role: SerializeLiteralAsStr[MongoDBRoles] = MongoDBRoles.MONGOS + expose_external: SerializeLiteralAsStr[ExposeExternalEnum] = Field( + default=ExposeExternalEnum.NONE, alias="expose-external" + ) diff --git a/single_kernel_mongo/core/typed_charm.py b/single_kernel_mongo/core/typed_charm.py new file mode 100644 index 00000000..53b17d2f --- /dev/null +++ b/single_kernel_mongo/core/typed_charm.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Pydantic Typed charm.""" + +from typing import Generic, TypeVar + +from ops.charm import CharmBase +from pydantic import BaseModel + +T = TypeVar("T", bound=BaseModel) + + +class TypedCharmBase(CharmBase, Generic[T]): + """Class to be used for extending config-typed charms.""" + + config_type: type[T] + + @property + def parsed_config(self) -> T: + """Return the config parsed as a pydantic model.""" + return self.config_type.model_validate(self.model.config) diff --git a/single_kernel_mongo/core/vm_workload.py b/single_kernel_mongo/core/vm_workload.py new file mode 100644 index 00000000..21e88580 --- /dev/null +++ b/single_kernel_mongo/core/vm_workload.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Kubernetes workload definition.""" + +import subprocess +from collections.abc import Mapping +from itertools import chain +from logging import getLogger +from pathlib import Path +from shutil import copyfile + +from ops import Container +from tenacity import retry, retry_if_result, stop_after_attempt, wait_fixed +from typing_extensions import override + +from single_kernel_mongo.config.literals import ( + CRON_FILE, + SNAP, + VmUser, +) +from single_kernel_mongo.core.workload import WorkloadBase +from single_kernel_mongo.exceptions import WorkloadExecError, WorkloadServiceError +from single_kernel_mongo.lib.charms.operator_libs_linux.v1 import snap + +logger = getLogger(__name__) + + +class VMWorkload(WorkloadBase): + """Wrapper for performing common operations specific to the Kafka Snap.""" + + substrate = "vm" + container = None + users = VmUser() + + def __init__(self, container: Container | None) -> None: + self.snap = SNAP + self.mongod = snap.SnapCache()[self.snap.name] + + @property + @override + def container_can_connect(self) -> bool: + return True # Always True on VM + + @property + @override + def snap_present(self) -> bool: + return self.mongod.present + + @override + def start(self) -> None: + try: + self.mongod.start(services=[self.service]) + except snap.SnapError as e: + logger.exception(str(e)) + raise WorkloadServiceError(str(e)) from e + + @override + def get_env(self) -> dict[str, str]: + return {self.env_var: self.mongod.get(self.snap_param)} + + @override + def update_env(self, parameters: chain[str]): + content = " ".join(parameters) + self.mongod.set({self.snap_param: content}) + + @override + def stop(self) -> None: + try: + self.mongod.stop(services=[self.service]) + except snap.SnapError as e: + logger.exception(str(e)) + raise WorkloadServiceError(str(e)) from e + + @override + def restart(self) -> None: + try: + self.mongod.restart(services=[self.service]) + except snap.SnapError as e: + logger.exception(str(e)) + raise WorkloadServiceError(str(e)) from e + + @override + def mkdir(self, path: Path, make_parents: bool = False) -> None: + path.mkdir(exist_ok=True, parents=make_parents) + + @override + def read(self, path: Path) -> list[str]: + if not path.is_file(): + return [] + return path.read_text().splitlines() + + @override + def write(self, path: Path, content: str, mode: str = "w") -> None: # pragma: nocover + path.parent.mkdir(parents=True, exist_ok=True) + with open(path, mode) as f: + f.write(content) + + if path == self.paths.keyfile: + path.chmod(0o400) + else: + path.chmod(0o440) + + self.exec(["chown", "-R", f"{self.users.user}:{self.users.group}", f"{path}"]) + + @override + def delete(self, path: Path) -> None: + if not path.exists() or not path.is_file(): + return + path.unlink() + + @override + def copy_to_unit(self, src: Path, destination: Path) -> None: + copyfile(src, destination) + + @override + def exec( + self, + command: list[str] | str, + env: Mapping[str, str] | None = None, + working_dir: str | None = None, + ) -> str: + try: + output = subprocess.check_output( + command, + stderr=subprocess.PIPE, + universal_newlines=True, + shell=isinstance(command, str), + env=env, + cwd=working_dir, + ) + logger.debug(f"{output=}") + return output + except subprocess.CalledProcessError as e: + logger.error(f"cmd failed - cmd={e.cmd}, stdout={e.stdout}, stderr={e.stderr}") + raise WorkloadExecError( + e.cmd, + e.returncode, + e.stdout, + e.stderr, + ) + + @override + def run_bin_command( + self, + bin_keyword: str, + bin_args: list[str] = [], + environment: dict[str, str] = {}, + ) -> str: + command = [ + f"{self.paths.binaries_path}/charmed-mongodb.{self.bin_cmd}", + bin_keyword, + *bin_args, + ] + return self.exec(command=command, env=environment) + + @override + @retry( + wait=wait_fixed(1), + stop=stop_after_attempt(5), + retry=retry_if_result(lambda result: result is False), + retry_error_callback=lambda _: False, + ) + def active(self) -> bool: + try: + return self.mongod.services[self.service]["active"] + except KeyError: + return False + + @override + def install(self) -> bool: + """Loads the MongoDB snap from LP. + + Returns: + True if successfully installed. False otherwise. + """ + try: + self.mongod.ensure( + snap.SnapState.Latest, + channel=self.snap.channel, + revision=self.snap.revision, + ) + self.mongod.hold() + + return True + except snap.SnapError as err: + logger.error(f"Failed to install {self.snap.name}. Reason: {err}.") + return False + + @override + def setup_cron(self, lines: list[str]) -> None: # pragma: nocover + CRON_FILE.write_text("\n".join(lines)) diff --git a/single_kernel_mongo/core/workload.py b/single_kernel_mongo/core/workload.py new file mode 100644 index 00000000..79aacf6f --- /dev/null +++ b/single_kernel_mongo/core/workload.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Abstract workload definition for Mongo charms.""" + +import secrets +import string +from abc import abstractmethod +from itertools import chain +from pathlib import Path +from typing import ClassVar, Protocol + +from ops import Container +from ops.pebble import Layer + +from single_kernel_mongo.config.literals import WorkloadUser +from single_kernel_mongo.config.roles import Role + + +class MongoPaths: + """Object to store the common paths for a mongodb instance.""" + + def __init__(self, role: Role): + self.conf_path = role.paths["CONF"] + self.data_path = role.paths["DATA"] + self.binaries_path = role.paths["BIN"] + self.var_path: str = role.paths["VAR"] + self.etc_path: str = role.paths["ETC"] + self.logs_path = role.paths["LOGS"] + self.shell_path = role.paths["SHELL"] + self.licenses_path = role.paths["LICENSES"] + + def __eq__(self, other: object) -> bool: # noqa: D105 + if not isinstance(other, MongoPaths): + return NotImplemented # pragma: nocover + return self.conf_path == other.conf_path + + @property + def common_path(self) -> Path: + """The common path.""" + return Path(self.var_path).parent + + @property + def config_file(self) -> Path: + """The main mongod config file.""" + return Path(f"{self.conf_path}/mongod.conf") + + @property + def socket_path(self) -> Path: + """The socket path for internal connectivity.""" + return Path(f"{self.var_path}/mongodb-27018.sock") + + @property + def keyfile(self) -> Path: + """The keyfile of mongod instance.""" + return Path(f"{self.conf_path}/keyFile") + + @property + def log_file(self) -> Path: + """The main mongodb log file.""" + return Path(f"{self.logs_path}/mongodb.log") + + @property + def audit_file(self) -> Path: + """The main mongod config file.""" + return Path(f"{self.logs_path}/audit.log") + + @property + def ext_pem_file(self) -> Path: + """External connectivity PEM file.""" + return Path(f"{self.conf_path}/external-cert.pem") + + @property + def ext_ca_file(self) -> Path: + """External connectivity CA file.""" + return Path(f"{self.conf_path}/external-ca.crt") + + @property + def int_pem_file(self) -> Path: + """Internal connectivity PEM file.""" + return Path(f"{self.conf_path}/internal-cert.pem") + + @property + def int_ca_file(self) -> Path: + """Internal connectivity CA file.""" + return Path(f"{self.conf_path}/internal-ca.crt") + + @property + def tls_files(self) -> tuple[Path, Path, Path, Path]: + """Tuple of all TLS files.""" + return ( + self.ext_pem_file, + self.ext_ca_file, + self.int_pem_file, + self.int_ca_file, + ) + + +class WorkloadProtocol(Protocol): # pragma: nocover + """The protocol for workloads.""" + + substrate: ClassVar[str] + paths: MongoPaths + service: ClassVar[str] + layer_name: ClassVar[str] + container: Container | None + users: ClassVar[WorkloadUser] + bin_cmd: ClassVar[str] + env_var: ClassVar[str] + snap_param: ClassVar[str] + _env: str = "" + + @abstractmethod + def install(self) -> bool: + """Installs the workload snap. + + VM-only: on k8s, just returns True. + """ + + @property + @abstractmethod + def snap_present(self) -> bool: + """Checks if the snap is present or not. + + VM-only: on k8s, just returns True. + """ + + @abstractmethod + def start(self) -> None: + """Starts the workload service.""" + ... + + @abstractmethod + def stop(self) -> None: + """Stops the workload service.""" + ... + + @abstractmethod + def restart(self) -> None: + """Restarts the workload service.""" + ... + + @abstractmethod + def mkdir(self, path: Path, make_parents: bool = False) -> None: + """Creates a directory on the filesystem.""" + ... + + @abstractmethod + def read(self, path: Path) -> list[str]: + """Reads a file from the workload. + + Args: + path: the full filepath to read from + + Returns: + List of string lines from the specified path + """ + ... + + @abstractmethod + def write(self, path: Path, content: str, mode: str = "w") -> None: + """Writes content to a workload file. + + Args: + content: string of content to write + path: the full filepath to write to + mode: the write mode. Usually "w" for write, or "a" for append. Default "w" + """ + ... + + @abstractmethod + def delete(self, path: Path) -> None: + """Deletes the file from the unit. + + Args: + path: the full filepath of the file to delete. + """ + ... + + @abstractmethod + def copy_to_unit(self, src: Path, destination: Path) -> None: + """Copy a file from the workload to the unit running the charm. + + In case of VM, copies from the filesystem to itself. + In case of Substrate, pulls the file and writes it locally. + + Args: + src: The source path on the workload. + destination: The destination path on the local filesystem. + """ + ... + + @abstractmethod + def exec( + self, + command: list[str] | str, + env: dict[str, str] | None = None, + working_dir: str | None = None, + ) -> str: + """Runs a command on the workload substrate.""" + ... + + @abstractmethod + def run_bin_command( + self, + bin_keyword: str, + bin_args: list[str] = [], + environment: dict[str, str] = {}, + ) -> str: + """Runs service bin command with desired args. + + Args: + bin_keyword: the kafka shell script to run + e.g `configs`, `topics` etc + bin_args: the shell command args + environment: A dictionary of environment variables + + Returns: + String of service bin command output + """ + ... + + @abstractmethod + def active(self) -> bool: + """Checks that the workload is active.""" + ... + + @abstractmethod + def get_env(self) -> dict[str, str]: + """Returns the environment as defined by /etc/environment.""" + ... + + @abstractmethod + def update_env(self, parameters: chain[str]): + """Updates the environment with the new values.""" + ... + + def get_version(self) -> str: + """Get the workload version. + + Returns: + String of mongo version + """ + if not self.active(): + return "" + + try: + version = Path("workload_version").read_text().strip() + except: # noqa: E722 + version = "" + return version + + @property + @abstractmethod + def layer(self) -> Layer: + """Gets the Pebble Layer definition for the current workload.""" + ... + + @property + @abstractmethod + def container_can_connect(self) -> bool: + """Flag to check if workload container can connect.""" + ... + + @abstractmethod + def setup_cron(self, lines: list[str]) -> None: + """[VM Specific] Setup a cron.""" + ... + + @staticmethod + def generate_password() -> str: + """Creates randomized string for use as app passwords. + + Returns: + String of 32 randomized letter+digit characters + """ + return "".join([secrets.choice(string.ascii_letters + string.digits) for _ in range(32)]) + + @staticmethod + def generate_keyfile() -> str: + """Key file used for authentication between replica set peers. + + Returns: + A maximum allowed random string. + """ + choices = string.ascii_letters + string.digits + return "".join([secrets.choice(choices) for _ in range(1024)]) + + +class WorkloadBase(WorkloadProtocol): # pragma: nocover + """Base interface for common workload operations.""" + + def __init__(self, container: Container | None): + self.container = container diff --git a/single_kernel_mongo/events/__init__.py b/single_kernel_mongo/events/__init__.py new file mode 100644 index 00000000..f5ea8842 --- /dev/null +++ b/single_kernel_mongo/events/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The event handlers and code for mongo charms.""" diff --git a/single_kernel_mongo/events/backups.py b/single_kernel_mongo/events/backups.py new file mode 100644 index 00000000..bdf6b92e --- /dev/null +++ b/single_kernel_mongo/events/backups.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Manager for handling backup events.""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + +from ops import MaintenanceStatus +from ops.charm import ActionEvent, RelationJoinedEvent +from ops.framework import Object + +from single_kernel_mongo.config.relations import ExternalRequirerRelations +from single_kernel_mongo.exceptions import ( + ListBackupError, + PBMBusyError, + RestoreError, + ResyncError, + SetPBMConfigError, + WorkloadExecError, + WorkloadServiceError, +) +from single_kernel_mongo.lib.charms.data_platform_libs.v0.s3 import ( + CredentialsChangedEvent, + S3Requirer, +) +from single_kernel_mongo.utils.event_helpers import ( + defer_event_with_info_log, + fail_action_with_error_log, + success_action_with_info_log, +) + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + from single_kernel_mongo.managers.mongodb_operator import MongoDBOperator + + +logger = logging.getLogger(__name__) + +INVALID_S3_INTEGRATION_STATUS = ( + "Relation to s3-integrator is not supported, config role must be config-server." +) + + +class BackupEventsHandler(Object): + """Event Handler for managing backups and S3 integration.""" + + def __init__(self, dependent: MongoDBOperator): + super().__init__(parent=dependent, key="backup") + self.dependent = dependent + self.manager = self.dependent.backup_manager + self.charm: AbstractMongoCharm = dependent.charm + self.relation_name = ExternalRequirerRelations.S3_CREDENTIALS + self.s3_client = S3Requirer(self.charm, self.relation_name) + + self.framework.observe( + self.charm.on[self.relation_name].relation_joined, + self._on_s3_relation_joined, + ) + self.framework.observe( + self.s3_client.on.credentials_changed, self._on_s3_credential_changed + ) + self.framework.observe(self.charm.on.create_backup_action, self._on_create_backup_action) + self.framework.observe(self.charm.on.list_backups_action, self._on_list_backups_action) + self.framework.observe(self.charm.on.restore_action, self._on_restore_action) + + def _on_s3_relation_joined(self, event: RelationJoinedEvent) -> None: + """Checks for valid integration for s3-integrations.""" + if self.dependent.state.upgrade_in_progress: + logger.warning( + "Adding s3-relations is not supported during an upgrade. The charm may be in a broken, unrecoverable state." + ) + event.defer() + return + if not self.manager.is_valid_s3_integration(): + logger.info( + "Shard does not support S3 relations. Please relate s3-integrator to config-server only." + ) + self.charm.status_manager.to_blocked(INVALID_S3_INTEGRATION_STATUS) + + def _on_s3_credential_changed(self, event: CredentialsChangedEvent): + action = "configure-pbm" + if self.dependent.state.upgrade_in_progress: + logger.warning( + "Changing s3-credentials is not supported during an upgrade. The charm may be in a broken, unrecoverable state." + ) + event.defer() + return + if not self.manager.is_valid_s3_integration(): + logger.debug( + "Shard does not support s3 relations, please relate s3-integrator to config-server only." + ) + self.charm.status_manager.to_blocked(INVALID_S3_INTEGRATION_STATUS) + return + if not self.manager.workload.active(): + defer_event_with_info_log( + logger, event, action, "Set PBM configurations, pbm-agent service not found." + ) + return + + # Get the credentials from S3 connection + credentials = self.s3_client.get_s3_connection_info() + + try: + self.manager.set_config_options(credentials=credentials) + self.manager.resync_config_options() + except SetPBMConfigError: + self.charm.status_manager.to_blocked("couldn't configure s3 backup options.") + return + except WorkloadServiceError as e: + self.charm.status_manager.to_blocked("couldn't start pbm") + logger.error("An exception occurred when starting pbm agent, error: %s.", str(e)) + return + except ResyncError: + self.charm.status_manager.to_waiting("waiting to sync s3 configurations.") + defer_event_with_info_log( + logger, event, action, "Sync-ing configurations needs more time." + ) + return + except PBMBusyError: + self.charm.status_manager.to_waiting("waiting to sync s3 configurations.") + defer_event_with_info_log( + logger, + event, + action, + "Cannot update configs while PBM is running, must wait for PBM action to finish.", + ) + return + except WorkloadExecError as e: + self.charm.status_manager.to_blocked(self.manager.process_pbm_error(e.stdout)) + return + + self.charm.status_manager.set_and_share_status(self.manager.get_status()) + + def _on_create_backup_action(self, event: ActionEvent): + action = "backup" + check, reason = self.pass_sanity_checks() + if not check: + fail_action_with_error_log(logger, event, action, reason) + return + if not self.charm.unit.is_leader(): + fail_action_with_error_log( + logger, event, action, "The action can be run only on leader unit." + ) + + check, reason = self.manager.can_backup() + if not check: + fail_action_with_error_log(logger, event, action, reason) + return + + try: + backup_id = self.manager.create_backup_action() + self.charm.status_manager.set_and_share_status( + MaintenanceStatus(f"backup started/running, backup id:'{backup_id}'") + ) + success_action_with_info_log( + logger, + event, + action, + {"backup-status": f"backup started. backup id: {backup_id}"}, + ) + except Exception as e: + fail_action_with_error_log( + logger, + event, + action, + str(e), + ) + + def _on_list_backups_action(self, event: ActionEvent): + action = "list-backups" + check, reason = self.pass_sanity_checks() + if not check: + fail_action_with_error_log(logger, event, action, reason) + return + + check, reason = self.manager.can_list_backup() + if not check: + fail_action_with_error_log(logger, event, action, reason) + return + + try: + formatted_list = self.manager.list_backup_action() + success_action_with_info_log(logger, event, action, {"backups": formatted_list}) + except ListBackupError as e: + fail_action_with_error_log(logger, event, action, str(e)) + return + + def _on_restore_action(self, event: ActionEvent): + action = "restore" + + backup_id = str(event.params.get("backup-id", "")) + remapping_pattern = str(event.params.get("remap-pattern", "")) + + if self.dependent.state.upgrade_in_progress: + fail_action_with_error_log( + logger, event, action, "Restoring a backup is not supported during an upgrade." + ) + return + check, message = self.pass_sanity_checks() + if not check: + fail_action_with_error_log(logger, event, action, message) + return + if not self.charm.unit.is_leader(): + fail_action_with_error_log( + logger, event, action, "The action can be run only on a leader unit." + ) + return + + check, reason = self.manager.can_restore( + backup_id, + remapping_pattern, + ) + if not check: + fail_action_with_error_log(logger, event, action, reason) + return + + try: + self.manager.restore_backup(backup_id=backup_id, remapping_pattern=remapping_pattern) + self.charm.status_manager.set_and_share_status( + MaintenanceStatus(f"restore started/running, backup id:'{backup_id}'") + ) + success_action_with_info_log( + logger, event, action, {"restore-status": "restore started"} + ) + except ResyncError: + raise + except RestoreError as restore_error: + fail_action_with_error_log(logger, event, action, str(restore_error)) + + def pass_sanity_checks(self) -> tuple[bool, str]: + """Return True if basic pre-conditions for running backup actions are met. + + No matter what backup-action is being run, these requirements must be met. + """ + if self.manager.state.s3_relation is None: + return False, "Relation with s3-integrator charm missing, cannot restore from a backup." + if not self.manager.is_valid_s3_integration(): + return ( + False, + "Shards do not support backup operations, please run action on config-server.", + ) + + return True, "" diff --git a/single_kernel_mongo/events/lifecycle.py b/single_kernel_mongo/events/lifecycle.py new file mode 100644 index 00000000..b6a5a8d0 --- /dev/null +++ b/single_kernel_mongo/events/lifecycle.py @@ -0,0 +1,145 @@ +#!/usr/bin/python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Events handler for lifecycle events. + +In charge of handling the lifecycle events such as install, start, pebble ready, etc. +""" + +import logging + +from ops.charm import ( + ConfigChangedEvent, + InstallEvent, + LeaderElectedEvent, + RelationChangedEvent, + RelationDepartedEvent, + RelationJoinedEvent, + SecretChangedEvent, + StartEvent, + StopEvent, + StorageAttachedEvent, + StorageDetachingEvent, + UpdateStatusEvent, +) +from ops.framework import Object + +from single_kernel_mongo.core.operator import OperatorProtocol +from single_kernel_mongo.exceptions import ( + ContainerNotReadyError, + UpgradeInProgressError, + WorkloadServiceError, +) + +logger = logging.getLogger(__name__) + + +class LifecycleEventsHandler(Object): + """Events handler for lifecycle events. + + In charge of handling the lifecycle events such as install, start, pebble ready, etc. + """ + + def __init__(self, dependent: OperatorProtocol, rel_name: str): + super().__init__(parent=dependent, key=dependent.name) + self.dependent = dependent + self.charm = dependent.charm + self.relation_name = rel_name + + self.framework.observe(getattr(self.charm.on, "install"), self.on_install) + self.framework.observe(getattr(self.charm.on, "start"), self.on_start) + self.framework.observe(getattr(self.charm.on, "stop"), self.on_stop) + self.framework.observe(getattr(self.charm.on, "leader_elected"), self.on_leader_elected) + + if self.charm.substrate == "k8s": + self.framework.observe(getattr(self.charm.on, "mongod_pebble_ready"), self.on_start) + + self.framework.observe(getattr(self.charm.on, "config_changed"), self.on_config_changed) + self.framework.observe(getattr(self.charm.on, "update_status"), self.on_update_status) + self.framework.observe(getattr(self.charm.on, "secret_changed"), self.on_secret_changed) + + self.framework.observe(self.charm.on[rel_name].relation_joined, self.on_relation_joined) + self.framework.observe(self.charm.on[rel_name].relation_changed, self.on_relation_changed) + self.framework.observe(self.charm.on[rel_name].relation_departed, self.on_relation_departed) + + self.framework.observe( + getattr(self.charm.on, "mongodb_storage_attached"), self.on_storage_attached + ) + self.framework.observe( + getattr(self.charm.on, "mongodb_storage_detaching"), self.on_storage_detaching + ) + + def on_start(self, event: StartEvent): + """Start event.""" + try: + self.dependent.on_start() + except Exception as e: + logger.error(f"Deferring because of {e}") + event.defer() + return + + def on_stop(self, event: StopEvent): + """Stop event.""" + ... + + def on_install(self, event: InstallEvent): + """Install event.""" + try: + self.dependent.on_install() + except (ContainerNotReadyError, WorkloadServiceError): + event.defer() + return + + def on_leader_elected(self, event: LeaderElectedEvent): + """Leader elected event.""" + self.dependent.on_leader_elected() + + def on_config_changed(self, event: ConfigChangedEvent): + """Config Changed Event.""" + try: + self.dependent.on_config_changed() + except UpgradeInProgressError: + event.defer() + return + + def on_update_status(self, event: UpdateStatusEvent): + """Update Status Event.""" + try: + self.dependent.on_update_status() + except Exception: + return + + def on_secret_changed(self, event: SecretChangedEvent): + """Secret changed event.""" + self.dependent.on_secret_changed( + secret_label=event.secret.label or "", + secret_id=event.secret.id or "", + ) + + def on_relation_joined(self, event: RelationJoinedEvent): + """Relation joined event.""" + try: + self.dependent.on_relation_joined() + except UpgradeInProgressError: + event.defer() + return + + def on_relation_changed(self, event: RelationChangedEvent): + """Relation changed event.""" + try: + self.dependent.on_relation_changed() + except UpgradeInProgressError: + event.defer() + return + + def on_relation_departed(self, event: RelationDepartedEvent): + """Relation departed event.""" + self.dependent.on_relation_departed(departing_unit=event.departing_unit) + + def on_storage_attached(self, event: StorageAttachedEvent): + """Storage Attached Event.""" + self.dependent.on_storage_attached() + + def on_storage_detaching(self, event: StorageDetachingEvent): + """Storage Detaching Event.""" + self.dependent.on_storage_detaching() diff --git a/single_kernel_mongo/events/passwords.py b/single_kernel_mongo/events/passwords.py new file mode 100644 index 00000000..9f6de8fe --- /dev/null +++ b/single_kernel_mongo/events/passwords.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Event handlers for password-related Juju Actions.""" + +import logging +from enum import Enum +from typing import TYPE_CHECKING + +from ops.charm import ActionEvent +from ops.framework import Object +from ops.model import MaintenanceStatus + +from single_kernel_mongo.core.structured_config import MongoDBRoles +from single_kernel_mongo.exceptions import SetPasswordError +from single_kernel_mongo.utils.event_helpers import fail_action_with_error_log +from single_kernel_mongo.utils.mongodb_users import CharmUsers, OperatorUser + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + from single_kernel_mongo.managers.mongodb_operator import MongoDBOperator + + +logger = logging.getLogger(__name__) + + +class PasswordActionParameter(str, Enum): + """Actions related config for MongoDB Charm.""" + + PASSWORD = "password" + USERNAME = "username" + + +class PasswordActionEvents(Object): + """Event handlers for password-related Juju Actions.""" + + def __init__(self, dependent: "MongoDBOperator"): + super().__init__(dependent, key="password_events") + self.dependent = dependent + self.charm: AbstractMongoCharm = dependent.charm + self.framework.observe( + getattr(self.charm.on, "set_password_action"), self._set_password_action + ) + self.framework.observe( + getattr(self.charm.on, "get_password_action"), + self._get_password_action, + ) + + def _set_password_action(self, event: ActionEvent) -> None: + """Handler for set-password action. + + Set the password for a specific user, if no passwords are passed, generate them. + """ + action = "set-password" + if not self.model.unit.is_leader(): + fail_action_with_error_log( + logger, event, action, "Password rotation must be called on leader unit" + ) + return + if self.dependent.state.upgrade_in_progress: + fail_action_with_error_log( + logger, + event, + action, + "Cannot set passwords while an upgrade is in progress.", + ) + return + if self.dependent.state.is_role(MongoDBRoles.SHARD): + fail_action_with_error_log( + logger, + event, + action, + "Cannot set password on shard, please set password on config-server.", + ) + return + if isinstance(self.dependent.backup_manager.get_status(), MaintenanceStatus): + fail_action_with_error_log( + logger, + event, + action, + "Cannot change password while a backup/restore is in progress.", + ) + return + username = event.params.get(PasswordActionParameter.USERNAME, OperatorUser.username) + password = event.params.get(PasswordActionParameter.PASSWORD) + if username not in CharmUsers: + fail_action_with_error_log( + logger, + event, + action, + f"The action can be run only for users used by the charm: {', '.join(CharmUsers)} not {username}", + ) + return + try: + passwd, secret_id = self.dependent.on_set_password_action(username, password) + except SetPasswordError as e: + fail_action_with_error_log(logger, event, action, str(e)) + + event.set_results({PasswordActionParameter.PASSWORD: passwd, "secret-id": secret_id}) + return + + def _get_password_action(self, event: ActionEvent) -> None: + action = "get-password" + username = event.params.get(PasswordActionParameter.USERNAME, OperatorUser.username) + if username not in CharmUsers: + fail_action_with_error_log( + logger, + event, + action, + f"The action can be run only for users used by the charm: {', '.join(CharmUsers)} not {username}", + ) + return + + if not username: + return + password = self.dependent.on_get_password_action(username) + event.set_results({PasswordActionParameter.PASSWORD: password}) diff --git a/single_kernel_mongo/exceptions.py b/single_kernel_mongo/exceptions.py new file mode 100644 index 00000000..89a5bd8f --- /dev/null +++ b/single_kernel_mongo/exceptions.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""All general exceptions.""" + + +class WorkloadExecError(Exception): + """Raised when a workload fails to exec a command.""" + + def __init__( + self, + cmd: str | list[str], + return_code: int, + stdout: str | None, + stderr: str | None, + ): + super().__init__(self) + self.cmd = cmd + self.return_code = return_code + self.stdout = stdout or "" + self.stderr = stderr or "" + + def __str__(self) -> str: + """Repr of error.""" + return f"cmd failed ({self.return_code}) - cmd={self.cmd}, stdout={self.stdout}, stderr={self.stderr}" + + +class WorkloadServiceError(Exception): + """Raised when a service fail to start/stop/restart.""" + + +class WorkloadNotReadyError(Exception): + """Raised when a service is not ready yet.""" + + +class ResyncError(Exception): + """Raised when pbm is resyncing configurations and is not ready to be used.""" + + +class SetPBMConfigError(Exception): + """Raised when pbm cannot configure a given option.""" + + +class PBMBusyError(Exception): + """Raised when PBM is busy and cannot run another operation.""" + + +class RestoreError(Exception): + """Raised when restore backup operation is failed.""" + + +class BackupError(Exception): + """Raised when create backup operation is failed.""" + + +class ListBackupError(Exception): + """Raised when list backup operation is failed.""" + + +class FailedToFindNodePortError(Exception): + """Raised when NodePort cannot be found, but is excepted to be present.""" + + +class FailedToFindServiceError(Exception): + """Raised when service cannot be found, but is excepted to be present.""" + + +class FailedToGetHostsError(Exception): + """Raised when we fail to get the host.""" + + +class SecretAlreadyExistsError(Exception): + """Raised when we try to push a secret that already exists.""" + + +class SetPasswordError(Exception): + """Raised when setting the password failed for a reason.""" + + +class ShardingMigrationError(Exception): + """Raised when there is an attempt to change the role of a sharding component.""" + + +class ContainerNotReadyError(Exception): + """Raised when the container is not ready.""" + + +class UpgradeInProgressError(Exception): + """Raised when an upgrade is in progress.""" + + +class OpenPortFailedError(Exception): + """Raised when we fail to open ports.""" diff --git a/single_kernel_mongo/lib/charms/data_platform_libs/v0/data_interfaces.py b/single_kernel_mongo/lib/charms/data_platform_libs/v0/data_interfaces.py new file mode 100644 index 00000000..aaed2e52 --- /dev/null +++ b/single_kernel_mongo/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -0,0 +1,3739 @@ +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Library to manage the relation for the data-platform products. + +This library contains the Requires and Provides classes for handling the relation +between an application and multiple managed application supported by the data-team: +MySQL, Postgresql, MongoDB, Redis, and Kafka. + +### Database (MySQL, Postgresql, MongoDB, and Redis) + +#### Requires Charm +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- database_created: event emitted when the requested database is created. +- endpoints_changed: event emitted when the read/write endpoints of the database have changed. +- read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed. Event is not triggered if read/write endpoints changed too. + +If it is needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It is also possible to provide an alias for each different database cluster/relation. + +So, it is possible to differentiate the clusters in two ways. +The first is to use the remote application name, i.e., `event.relation.app.name`, as above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` + +When it's needed to check whether a plugin (extension) is enabled on the PostgreSQL +charm, you can use the is_postgresql_plugin_enabled method. To use that, you need to +add the following dependency to your charmcraft.yaml file: + +```yaml + +parts: + charm: + charm-binary-python-packages: + - psycopg[binary] + +``` + +### Provider Charm + +Following an example of using the DatabaseRequestedEvent, in the context of the +database charm code: + +```python +from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides + +class SampleCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + # Charm events defined in the database provides charm library. + self.provided_database = DatabaseProvides(self, relation_name="database") + self.framework.observe(self.provided_database.on.database_requested, + self._on_database_requested) + # Database generic helper + self.database = DatabaseHelper() + + def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: + # Handle the event triggered by a new database requested in the relation + # Retrieve the database name using the charm library. + db_name = event.database + # generate a new user credential + username = self.database.generate_user() + password = self.database.generate_password() + # set the credentials for the relation + self.provided_database.set_credentials(event.relation.id, username, password) + # set other variables for the relation event.set_tls("False") +``` +As shown above, the library provides a custom event (database_requested) to handle +the situation when an application charm requests a new database to be created. +It's preferred to subscribe to this event instead of relation changed event to avoid +creating a new database when other information other than a database name is +exchanged in the relation databag. + +### Kafka + +This library is the interface to use and interact with the Kafka charm. This library contains +custom events that add convenience to manage Kafka, and provides methods to consume the +application related data. + +#### Requirer Charm + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + BootstrapServerChangedEvent, + KafkaRequires, + TopicCreatedEvent, +) + +class ApplicationCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.kafka = KafkaRequires(self, "kafka_client", "test-topic") + self.framework.observe( + self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed + ) + self.framework.observe( + self.kafka.on.topic_created, self._on_kafka_topic_created + ) + + def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent): + # Event triggered when a bootstrap server was changed for this application + + new_bootstrap_server = event.bootstrap_server + ... + + def _on_kafka_topic_created(self, event: TopicCreatedEvent): + # Event triggered when a topic was created for this application + username = event.username + password = event.password + tls = event.tls + tls_ca= event.tls_ca + bootstrap_server event.bootstrap_server + consumer_group_prefic = event.consumer_group_prefix + zookeeper_uris = event.zookeeper_uris + ... + +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- topic_created: event emitted when the requested topic is created. +- bootstrap_server_changed: event emitted when the bootstrap server have changed. +- credential_changed: event emitted when the credentials of Kafka changed. + +### Provider Charm + +Following the previous example, this is an example of the provider charm. + +```python +class SampleCharm(CharmBase): + +from charms.data_platform_libs.v0.data_interfaces import ( + KafkaProvides, + TopicRequestedEvent, +) + + def __init__(self, *args): + super().__init__(*args) + + # Default charm events. + self.framework.observe(self.on.start, self._on_start) + + # Charm events defined in the Kafka Provides charm library. + self.kafka_provider = KafkaProvides(self, relation_name="kafka_client") + self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested) + # Kafka generic helper + self.kafka = KafkaHelper() + + def _on_topic_requested(self, event: TopicRequestedEvent): + # Handle the on_topic_requested event. + + topic = event.topic + relation_id = event.relation.id + # set connection info in the databag relation + self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server()) + self.kafka_provider.set_credentials(relation_id, username=username, password=password) + self.kafka_provider.set_consumer_group_prefix(relation_id, ...) + self.kafka_provider.set_tls(relation_id, "False") + self.kafka_provider.set_zookeeper_uris(relation_id, ...) + +``` +As shown above, the library provides a custom event (topic_requested) to handle +the situation when an application charm requests a new topic to be created. +It is preferred to subscribe to this event instead of relation changed event to avoid +creating a new topic when other information other than a topic name is +exchanged in the relation databag. +""" + +import copy +import json +import logging +from abc import ABC, abstractmethod +from collections import UserDict, namedtuple +from datetime import datetime +from enum import Enum +from typing import ( + Callable, + Dict, + ItemsView, + KeysView, + List, + Optional, + Set, + Tuple, + Union, + ValuesView, +) + +from ops import JujuVersion, Model, Secret, SecretInfo, SecretNotFoundError +from ops.charm import ( + CharmBase, + CharmEvents, + RelationChangedEvent, + RelationCreatedEvent, + RelationEvent, + SecretChangedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Application, ModelError, Relation, Unit + +# The unique Charmhub library identifier, never change it +LIBID = "6c3e6b6680d64e9c89e611d1a15f65be" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 39 + +PYDEPS = ["ops>=2.0.0"] + +# Starting from what LIBPATCH number to apply legacy solutions +# v0.17 was the last version without secrets +LEGACY_SUPPORT_FROM = 17 + +logger = logging.getLogger(__name__) + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +added - keys that were added +changed - keys that still exist but have new values +deleted - key that were deleted""" + + +PROV_SECRET_PREFIX = "secret-" +REQ_SECRET_FIELDS = "requested-secrets" +GROUP_MAPPING_FIELD = "secret_group_mapping" +GROUP_SEPARATOR = "@" + +MODEL_ERRORS = { + "not_leader": "this unit is not the leader", + "no_label_and_uri": "ERROR either URI or label should be used for getting an owned secret but not both", + "owner_no_refresh": "ERROR secret owner cannot use --refresh", +} + + +############################################################################## +# Exceptions +############################################################################## + + +class DataInterfacesError(Exception): + """Common ancestor for DataInterfaces related exceptions.""" + + +class SecretError(DataInterfacesError): + """Common ancestor for Secrets related exceptions.""" + + +class SecretAlreadyExistsError(SecretError): + """A secret that was to be added already exists.""" + + +class SecretsUnavailableError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class SecretsIllegalUpdateError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class IllegalOperationError(DataInterfacesError): + """To be used when an operation is not allowed to be performed.""" + + +############################################################################## +# Global helpers / utilities +############################################################################## + +############################################################################## +# Databag handling and comparison methods +############################################################################## + + +def get_encoded_dict( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[Dict[str, str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "{}")) + if isinstance(data, dict): + return data + logger.error("Unexpected datatype for %s instead of dict.", str(data)) + + +def get_encoded_list( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[List[str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "[]")) + if isinstance(data, list): + return data + logger.error("Unexpected datatype for %s instead of list.", str(data)) + + +def set_encoded_field( + relation: Relation, + member: Union[Unit, Application], + field: str, + value: Union[str, list, Dict[str, str]], +) -> None: + """Set an encoded field from relation data.""" + relation.data[member].update({field: json.dumps(value)}) + + +def diff(event: RelationChangedEvent, bucket: Optional[Union[Unit, Application]]) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + bucket: bucket of the databag (app or unit) + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the application relation databag. + if not bucket: + return Diff([], [], []) + + old_data = get_encoded_dict(event.relation, bucket, "data") + + if not old_data: + old_data = {} + + # Retrieve the new data from the event relation databag. + new_data = ( + {key: value for key, value in event.relation.data[event.app].items() if key != "data"} + if event.app + else {} + ) + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() # pyright: ignore [reportAssignmentType] + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() # pyright: ignore [reportAssignmentType] + # These are the keys that already existed in the databag, + # but had their values changed. + changed = { + key + for key in old_data.keys() & new_data.keys() # pyright: ignore [reportAssignmentType] + if old_data[key] != new_data[key] # pyright: ignore [reportAssignmentType] + } + # Convert the new_data to a serializable format and save it for a next diff check. + set_encoded_field(event.relation, bucket, "data", new_data) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + +############################################################################## +# Module decorators +############################################################################## + + +def leader_only(f): + """Decorator to ensure that only leader can perform given operation.""" + + def wrapper(self, *args, **kwargs): + if self.component == self.local_app and not self.local_unit.is_leader(): + logger.error( + "This operation (%s()) can only be performed by the leader unit", f.__name__ + ) + return + return f(self, *args, **kwargs) + + wrapper.leader_only = True + return wrapper + + +def juju_secrets_only(f): + """Decorator to ensure that certain operations would be only executed on Juju3.""" + + def wrapper(self, *args, **kwargs): + if not self.secrets_enabled: + raise SecretsUnavailableError("Secrets unavailable on current Juju version") + return f(self, *args, **kwargs) + + return wrapper + + +def dynamic_secrets_only(f): + """Decorator to ensure that certain operations would be only executed when NO static secrets are defined.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields: + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +def either_static_or_dynamic_secrets(f): + """Decorator to ensure that static and dynamic secrets won't be used in parallel.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields and set(self.current_secret_fields) - set( + self.static_secret_fields + ): + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +def legacy_apply_from_version(version: int) -> Callable: + """Decorator to decide whether to apply a legacy function or not. + + Based on LEGACY_SUPPORT_FROM module variable value, the importer charm may only want + to apply legacy solutions starting from a specific LIBPATCH. + + NOTE: All 'legacy' functions have to be defined and called in a way that they return `None`. + This results in cleaner and more secure execution flows in case the function may be disabled. + This requirement implicitly means that legacy functions change the internal state strictly, + don't return information. + """ + + def decorator(f: Callable[..., None]): + """Signature is ensuring None return value.""" + f.legacy_version = version + + def wrapper(self, *args, **kwargs) -> None: + if version >= LEGACY_SUPPORT_FROM: + return f(self, *args, **kwargs) + + return wrapper + + return decorator + + +############################################################################## +# Helper classes +############################################################################## + + +class Scope(Enum): + """Peer relations scope.""" + + APP = "app" + UNIT = "unit" + + +class SecretGroup(str): + """Secret groups specific type.""" + + +class SecretGroupsAggregate(str): + """Secret groups with option to extend with additional constants.""" + + def __init__(self): + self.USER = SecretGroup("user") + self.TLS = SecretGroup("tls") + self.EXTRA = SecretGroup("extra") + + def __setattr__(self, name, value): + """Setting internal constants.""" + if name in self.__dict__: + raise RuntimeError("Can't set constant!") + else: + super().__setattr__(name, SecretGroup(value)) + + def groups(self) -> list: + """Return the list of stored SecretGroups.""" + return list(self.__dict__.values()) + + def get_group(self, group: str) -> Optional[SecretGroup]: + """If the input str translates to a group name, return that.""" + return SecretGroup(group) if group in self.groups() else None + + +SECRET_GROUPS = SecretGroupsAggregate() + + +class CachedSecret: + """Locally cache a secret. + + The data structure is precisely re-using/simulating as in the actual Secret Storage + """ + + KNOWN_MODEL_ERRORS = [MODEL_ERRORS["no_label_and_uri"], MODEL_ERRORS["owner_no_refresh"]] + + def __init__( + self, + model: Model, + component: Union[Application, Unit], + label: str, + secret_uri: Optional[str] = None, + legacy_labels: List[str] = [], + ): + self._secret_meta = None + self._secret_content = {} + self._secret_uri = secret_uri + self.label = label + self._model = model + self.component = component + self.legacy_labels = legacy_labels + self.current_label = None + + @property + def meta(self) -> Optional[Secret]: + """Getting cached secret meta-information.""" + if not self._secret_meta: + if not (self._secret_uri or self.label): + return + + try: + self._secret_meta = self._model.get_secret(label=self.label) + except SecretNotFoundError: + # Falling back to seeking for potential legacy labels + self._legacy_compat_find_secret_by_old_label() + + # If still not found, to be checked by URI, to be labelled with the proposed label + if not self._secret_meta and self._secret_uri: + self._secret_meta = self._model.get_secret(id=self._secret_uri, label=self.label) + return self._secret_meta + + ########################################################################## + # Backwards compatibility / Upgrades + ########################################################################## + # These functions are used to keep backwards compatibility on rolling upgrades + # Policy: + # All data is kept intact until the first write operation. (This allows a minimal + # grace period during which rollbacks are fully safe. For more info see the spec.) + # All data involves: + # - databag contents + # - secrets content + # - secret labels (!!!) + # Legacy functions must return None, and leave an equally consistent state whether + # they are executed or skipped (as a high enough versioned execution environment may + # not require so) + + # Compatibility + + @legacy_apply_from_version(34) + def _legacy_compat_find_secret_by_old_label(self) -> None: + """Compatibility function, allowing to find a secret by a legacy label. + + This functionality is typically needed when secret labels changed over an upgrade. + Until the first write operation, we need to maintain data as it was, including keeping + the old secret label. In order to keep track of the old label currently used to access + the secret, and additional 'current_label' field is being defined. + """ + for label in self.legacy_labels: + try: + self._secret_meta = self._model.get_secret(label=label) + except SecretNotFoundError: + pass + else: + if label != self.label: + self.current_label = label + return + + # Migrations + + @legacy_apply_from_version(34) + def _legacy_migration_to_new_label_if_needed(self) -> None: + """Helper function to re-create the secret with a different label. + + Juju does not provide a way to change secret labels. + Thus whenever moving from secrets version that involves secret label changes, + we "re-create" the existing secret, and attach the new label to the new + secret, to be used from then on. + + Note: we replace the old secret with a new one "in place", as we can't + easily switch the containing SecretCache structure to point to a new secret. + Instead we are changing the 'self' (CachedSecret) object to point to the + new instance. + """ + if not self.current_label or not (self.meta and self._secret_meta): + return + + # Create a new secret with the new label + content = self._secret_meta.get_content() + self._secret_uri = None + + # It will be nice to have the possibility to check if we are the owners of the secret... + try: + self._secret_meta = self.add_secret(content, label=self.label) + except ModelError as err: + if MODEL_ERRORS["not_leader"] not in str(err): + raise + self.current_label = None + + ########################################################################## + # Public functions + ########################################################################## + + def add_secret( + self, + content: Dict[str, str], + relation: Optional[Relation] = None, + label: Optional[str] = None, + ) -> Secret: + """Create a new secret.""" + if self._secret_uri: + raise SecretAlreadyExistsError( + "Secret is already defined with uri %s", self._secret_uri + ) + + label = self.label if not label else label + + secret = self.component.add_secret(content, label=label) + if relation and relation.app != self._model.app: + # If it's not a peer relation, grant is to be applied + secret.grant(relation) + self._secret_uri = secret.id + self._secret_meta = secret + return self._secret_meta + + def get_content(self) -> Dict[str, str]: + """Getting cached secret content.""" + if not self._secret_content: + if self.meta: + try: + self._secret_content = self.meta.get_content(refresh=True) + except (ValueError, ModelError) as err: + # https://bugs.launchpad.net/juju/+bug/2042596 + # Only triggered when 'refresh' is set + if isinstance(err, ModelError) and not any( + msg in str(err) for msg in self.KNOWN_MODEL_ERRORS + ): + raise + # Due to: ValueError: Secret owner cannot use refresh=True + self._secret_content = self.meta.get_content() + return self._secret_content + + def set_content(self, content: Dict[str, str]) -> None: + """Setting cached secret content.""" + if not self.meta: + return + + # DPE-4182: do not create new revision if the content stay the same + if content == self.get_content(): + return + + if content: + self._legacy_migration_to_new_label_if_needed() + self.meta.set_content(content) + self._secret_content = content + else: + self.meta.remove_all_revisions() + + def get_info(self) -> Optional[SecretInfo]: + """Wrapper function to apply the corresponding call on the Secret object within CachedSecret if any.""" + if self.meta: + return self.meta.get_info() + + def remove(self) -> None: + """Remove secret.""" + if not self.meta: + raise SecretsUnavailableError("Non-existent secret was attempted to be removed.") + try: + self.meta.remove_all_revisions() + except SecretNotFoundError: + pass + self._secret_content = {} + self._secret_meta = None + self._secret_uri = None + + +class SecretCache: + """A data structure storing CachedSecret objects.""" + + def __init__(self, model: Model, component: Union[Application, Unit]): + self._model = model + self.component = component + self._secrets: Dict[str, CachedSecret] = {} + + def get( + self, label: str, uri: Optional[str] = None, legacy_labels: List[str] = [] + ) -> Optional[CachedSecret]: + """Getting a secret from Juju Secret store or cache.""" + if not self._secrets.get(label): + secret = CachedSecret( + self._model, self.component, label, uri, legacy_labels=legacy_labels + ) + if secret.meta: + self._secrets[label] = secret + return self._secrets.get(label) + + def add(self, label: str, content: Dict[str, str], relation: Relation) -> CachedSecret: + """Adding a secret to Juju Secret.""" + if self._secrets.get(label): + raise SecretAlreadyExistsError(f"Secret {label} already exists") + + secret = CachedSecret(self._model, self.component, label) + secret.add_secret(content, relation) + self._secrets[label] = secret + return self._secrets[label] + + def remove(self, label: str) -> None: + """Remove a secret from the cache.""" + if secret := self.get(label): + try: + secret.remove() + self._secrets.pop(label) + except (SecretsUnavailableError, KeyError): + pass + else: + return + logging.debug("Non-existing Juju Secret was attempted to be removed %s", label) + + +################################################################################ +# Relation Data base/abstract ancestors (i.e. parent classes) +################################################################################ + + +# Base Data + + +class DataDict(UserDict): + """Python Standard Library 'dict' - like representation of Relation Data.""" + + def __init__(self, relation_data: "Data", relation_id: int): + self.relation_data = relation_data + self.relation_id = relation_id + + @property + def data(self) -> Dict[str, str]: + """Return the full content of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_data([self.relation_id]) + try: + result_remote = self.relation_data.fetch_relation_data([self.relation_id]) + except NotImplementedError: + result_remote = {self.relation_id: {}} + if result: + result_remote[self.relation_id].update(result[self.relation_id]) + return result_remote.get(self.relation_id, {}) + + def __setitem__(self, key: str, item: str) -> None: + """Set an item of the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, {key: item}) + + def __getitem__(self, key: str) -> str: + """Get an item of the Abstract Relation Data dictionary.""" + result = None + + # Avoiding "leader_only" error when cross-charm non-leader unit, not to report useless error + if ( + not hasattr(self.relation_data.fetch_my_relation_field, "leader_only") + or self.relation_data.component != self.relation_data.local_app + or self.relation_data.local_unit.is_leader() + ): + result = self.relation_data.fetch_my_relation_field(self.relation_id, key) + + if not result: + try: + result = self.relation_data.fetch_relation_field(self.relation_id, key) + except NotImplementedError: + pass + + if not result: + raise KeyError + return result + + def __eq__(self, d: dict) -> bool: + """Equality.""" + return self.data == d + + def __repr__(self) -> str: + """String representation Abstract Relation Data dictionary.""" + return repr(self.data) + + def __len__(self) -> int: + """Length of the Abstract Relation Data dictionary.""" + return len(self.data) + + def __delitem__(self, key: str) -> None: + """Delete an item of the Abstract Relation Data dictionary.""" + self.relation_data.delete_relation_data(self.relation_id, [key]) + + def has_key(self, key: str) -> bool: + """Does the key exist in the Abstract Relation Data dictionary?""" + return key in self.data + + def update(self, items: Dict[str, str]): + """Update the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, items) + + def keys(self) -> KeysView[str]: + """Keys of the Abstract Relation Data dictionary.""" + return self.data.keys() + + def values(self) -> ValuesView[str]: + """Values of the Abstract Relation Data dictionary.""" + return self.data.values() + + def items(self) -> ItemsView[str, str]: + """Items of the Abstract Relation Data dictionary.""" + return self.data.items() + + def pop(self, item: str) -> str: + """Pop an item of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_field(self.relation_id, item) + if not result: + raise KeyError(f"Item {item} doesn't exist.") + self.relation_data.delete_relation_data(self.relation_id, [item]) + return result + + def __contains__(self, item: str) -> bool: + """Does the Abstract Relation Data dictionary contain item?""" + return item in self.data.values() + + def __iter__(self): + """Iterate through the Abstract Relation Data dictionary.""" + return iter(self.data) + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + """Safely get an item of the Abstract Relation Data dictionary.""" + try: + if result := self[key]: + return result + except KeyError: + return default + + +class Data(ABC): + """Base relation data mainpulation (abstract) class.""" + + SCOPE = Scope.APP + + # Local map to associate mappings with secrets potentially as a group + SECRET_LABEL_MAP = { + "username": SECRET_GROUPS.USER, + "password": SECRET_GROUPS.USER, + "uris": SECRET_GROUPS.USER, + "tls": SECRET_GROUPS.TLS, + "tls-ca": SECRET_GROUPS.TLS, + } + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + self._model = model + self.local_app = self._model.app + self.local_unit = self._model.unit + self.relation_name = relation_name + self._jujuversion = None + self.component = self.local_app if self.SCOPE == Scope.APP else self.local_unit + self.secrets = SecretCache(self._model, self.component) + self.data_component = None + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return [ + relation + for relation in self._model.relations[self.relation_name] + if self._is_relation_active(relation) + ] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + if not self._jujuversion: + self._jujuversion = JujuVersion.from_environ() + return self._jujuversion.has_secrets + + @property + def secret_label_map(self): + """Exposing secret-label map via a property -- could be overridden in descendants!""" + return self.SECRET_LABEL_MAP + + # Mandatory overrides for internal/helper methods + + @abstractmethod + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + raise NotImplementedError + + @abstractmethod + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation.""" + raise NotImplementedError + + @abstractmethod + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + # Optional overrides + + def _legacy_apply_on_fetch(self) -> None: + """This function should provide a list of compatibility functions to be applied when fetching (legacy) data.""" + pass + + def _legacy_apply_on_update(self, fields: List[str]) -> None: + """This function should provide a list of compatibility functions to be applied when writing data. + + Since data may be at a legacy version, migration may be mandatory. + """ + pass + + def _legacy_apply_on_delete(self, fields: List[str]) -> None: + """This function should provide a list of compatibility functions to be applied when deleting (legacy) data.""" + pass + + # Internal helper methods + + @staticmethod + def _is_relation_active(relation: Relation): + """Whether the relation is active based on contained data.""" + try: + _ = repr(relation.data) + return True + except (RuntimeError, ModelError): + return False + + @staticmethod + def _is_secret_field(field: str) -> bool: + """Is the field in question a secret reference (URI) field or not?""" + return field.startswith(PROV_SECRET_PREFIX) + + @staticmethod + def _generate_secret_label( + relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{relation_name}.{relation_id}.{group_mapping}.secret" + + def _generate_secret_field_name(self, group_mapping: SecretGroup) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{PROV_SECRET_PREFIX}{group_mapping}" + + def _relation_from_secret_label(self, secret_label: str) -> Optional[Relation]: + """Retrieve the relation that belongs to a secret label.""" + contents = secret_label.split(".") + + if not (contents and len(contents) >= 3): + return + + contents.pop() # ".secret" at the end + contents.pop() # Group mapping + relation_id = contents.pop() + try: + relation_id = int(relation_id) + except ValueError: + return + + # In case '.' character appeared in relation name + relation_name = ".".join(contents) + + try: + return self.get_relation(relation_name, relation_id) + except ModelError: + return + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + if group := self.secret_label_map.get(key): + secret_fieldnames_grouped.setdefault(group, []).append(key) + else: + secret_fieldnames_grouped.setdefault(SECRET_GROUPS.EXTRA, []).append(key) + return secret_fieldnames_grouped + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + if (secret := self._get_relation_secret(relation.id, group)) and ( + secret_data := secret.get_content() + ): + return { + k: v for k, v in secret_data.items() if not secret_fields or k in secret_fields + } + return {} + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return { + k: v + for k, v in content.items() + if k in secret_fields and k not in self.secret_label_map.keys() + } + + return { + k: v + for k, v in content.items() + if k in secret_fields and self.secret_label_map.get(k) == group_mapping + } + + @juju_secrets_only + def _get_relation_secret_data( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[Dict[str, str]]: + """Retrieve contents of a Juju Secret that's been stored in the relation databag.""" + secret = self._get_relation_secret(relation_id, group_mapping, relation_name) + if secret: + return secret.get_content() + + # Core operations on Relation Fields manipulations (regardless whether the field is in the databag or in a secret) + # Internal functions to be called directly from transparent public interface functions (+closely related helpers) + + def _process_secret_fields( + self, + relation: Relation, + req_secret_fields: Optional[List[str]], + impacted_rel_fields: List[str], + operation: Callable, + *args, + **kwargs, + ) -> Tuple[Dict[str, str], Set[str]]: + """Isolate target secret fields of manipulation, and execute requested operation by Secret Group.""" + result = {} + + # If the relation started on a databag, we just stay on the databag + # (Rolling upgrades may result in a relation starting on databag, getting secrets enabled on-the-fly) + # self.local_app is sufficient to check (ignored if Requires, never has secrets -- works if Provider) + fallback_to_databag = ( + req_secret_fields + and (self.local_unit == self._model.unit and self.local_unit.is_leader()) + and set(req_secret_fields) & set(relation.data[self.component]) + ) + + normal_fields = set(impacted_rel_fields) + if req_secret_fields and self.secrets_enabled and not fallback_to_databag: + normal_fields = normal_fields - set(req_secret_fields) + secret_fields = set(impacted_rel_fields) - set(normal_fields) + + secret_fieldnames_grouped = self._group_secret_fields(list(secret_fields)) + + for group in secret_fieldnames_grouped: + # operation() should return nothing when all goes well + if group_result := operation(relation, group, secret_fields, *args, **kwargs): + # If "meaningful" data was returned, we take it. (Some 'operation'-s only return success/failure.) + if isinstance(group_result, dict): + result.update(group_result) + else: + # If it wasn't found as a secret, let's give it a 2nd chance as "normal" field + # Needed when Juju3 Requires meets Juju2 Provider + normal_fields |= set(secret_fieldnames_grouped[group]) + return (result, normal_fields) + + def _fetch_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching databag contents when no secrets are involved. + + Since the Provider's databag is the only one holding secrest, we can apply + a simplified workflow to read the Require's side's databag. + This is used typically when the Provider side wants to read the Requires side's data, + or when the Requires side may want to read its own data. + """ + if component not in relation.data or not relation.data[component]: + return {} + + if fields: + return { + k: relation.data[component][k] for k in fields if k in relation.data[component] + } + else: + return dict(relation.data[component]) + + def _fetch_relation_data_with_secrets( + self, + component: Union[Application, Unit], + req_secret_fields: Optional[List[str]], + relation: Relation, + fields: Optional[List[str]] = None, + ) -> Dict[str, str]: + """Fetching databag contents when secrets may be involved. + + This function has internal logic to resolve if a requested field may be "hidden" + within a Relation Secret, or directly available as a databag field. Typically + used to read the Provider side's databag (eigher by the Requires side, or by + Provider side itself). + """ + result = {} + normal_fields = [] + + if not fields: + if component not in relation.data: + return {} + + all_fields = list(relation.data[component].keys()) + normal_fields = [field for field in all_fields if not self._is_secret_field(field)] + fields = normal_fields + req_secret_fields if req_secret_fields else normal_fields + + if fields: + result, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._get_group_secret_contents + ) + + # Processing "normal" fields. May include leftover from what we couldn't retrieve as a secret. + # (Typically when Juju3 Requires meets Juju2 Provider) + if normal_fields: + result.update( + self._fetch_relation_data_without_secrets(component, relation, list(normal_fields)) + ) + return result + + def _update_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, data: Dict[str, str] + ) -> None: + """Updating databag contents when no secrets are involved.""" + if component not in relation.data or relation.data[component] is None: + return + + if relation: + relation.data[component].update(data) + + def _delete_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, fields: List[str] + ) -> None: + """Remove databag fields 'fields' from Relation.""" + if component not in relation.data or relation.data[component] is None: + return + + for field in fields: + try: + relation.data[component].pop(field) + except KeyError: + logger.debug( + "Non-existing field '%s' was attempted to be removed from the databag (relation ID: %s)", + str(field), + str(relation.id), + ) + pass + + # Public interface methods + # Handling Relation Fields seamlessly, regardless if in databag or a Juju Secret + + def as_dict(self, relation_id: int) -> UserDict: + """Dict behavior representation of the Abstract Data.""" + return DataDict(self, relation_id) + + def get_relation(self, relation_name, relation_id) -> Relation: + """Safe way of retrieving a relation.""" + relation = self._model.get_relation(relation_name, relation_id) + + if not relation: + raise DataInterfacesError( + "Relation %s %s couldn't be retrieved", relation_name, relation_id + ) + + return relation + + def get_secret_uri(self, relation: Relation, group: SecretGroup) -> Optional[str]: + """Get the secret URI for the corresponding group.""" + secret_field = self._generate_secret_field_name(group) + return relation.data[self.component].get(secret_field) + + def set_secret_uri(self, relation: Relation, group: SecretGroup, secret_uri: str) -> None: + """Set the secret URI for the corresponding group.""" + secret_field = self._generate_secret_field_name(group) + relation.data[self.component][secret_field] = secret_uri + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + Function cannot be used in `*-relation-broken` events and will raise an exception. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation ID). + """ + self._legacy_apply_on_fetch() + + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or (relation_ids and relation.id in relation_ids): + data[relation.id] = self._fetch_specific_relation_data(relation, fields) + return data + + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data.""" + return ( + self.fetch_relation_data([relation_id], [field], relation_name) + .get(relation_id, {}) + .get(field) + ) + + def fetch_my_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Optional[Dict[int, Dict[str, str]]]: + """Fetch data of the 'owner' (or 'this app') side of the relation. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + self._legacy_apply_on_fetch() + + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or relation.id in relation_ids: + data[relation.id] = self._fetch_my_specific_relation_data(relation, fields) + return data + + def fetch_my_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data -- owner side. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + if relation_data := self.fetch_my_relation_data([relation_id], [field], relation_name): + return relation_data.get(relation_id, {}).get(field) + + @leader_only + def update_relation_data(self, relation_id: int, data: dict) -> None: + """Update the data within the relation.""" + self._legacy_apply_on_update(list(data.keys())) + + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._update_relation_data(relation, data) + + @leader_only + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """Remove field from the relation.""" + self._legacy_apply_on_delete(fields) + + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._delete_relation_data(relation, fields) + + +class EventHandlers(Object): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: Data, unique_key: str = ""): + """Manager of base client relations.""" + if not unique_key: + unique_key = relation_data.relation_name + super().__init__(charm, unique_key) + + self.charm = charm + self.relation_data = relation_data + + self.framework.observe( + charm.on[self.relation_data.relation_name].relation_changed, + self._on_relation_changed_event, + ) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.relation_data.data_component) + + @abstractmethod + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +# Base ProviderData and RequiresData + + +class ProviderData(Data): + """Base provides-side of the data products relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + super().__init__(model, relation_name) + self.data_component = self.local_app + + # Private methods handling secrets + + @juju_secrets_only + def _add_relation_secret( + self, + relation: Relation, + group_mapping: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + uri_to_databag=True, + ) -> bool: + """Add a new Juju Secret that will be registered in the relation databag.""" + if uri_to_databag and self.get_secret_uri(relation, group_mapping): + logging.error("Secret for relation %s already exists, not adding again", relation.id) + return False + + content = self._content_for_secret_group(data, secret_fields, group_mapping) + + label = self._generate_secret_label(self.relation_name, relation.id, group_mapping) + secret = self.secrets.add(label, content, relation) + + # According to lint we may not have a Secret ID + if uri_to_databag and secret.meta and secret.meta.id: + self.set_secret_uri(relation, group_mapping, secret.meta.id) + + # Return the content that was added + return True + + @juju_secrets_only + def _update_relation_secret( + self, + relation: Relation, + group_mapping: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group_mapping) + + if not secret: + logging.error("Can't update secret for relation %s", relation.id) + return False + + content = self._content_for_secret_group(data, secret_fields, group_mapping) + + old_content = secret.get_content() + full_content = copy.deepcopy(old_content) + full_content.update(content) + secret.set_content(full_content) + + # Return True on success + return True + + def _add_or_update_relation_secrets( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + uri_to_databag=True, + ) -> bool: + """Update contents for Secret group. If the Secret doesn't exist, create it.""" + if self._get_relation_secret(relation.id, group): + return self._update_relation_secret(relation, group, secret_fields, data) + else: + return self._add_relation_secret(relation, group, secret_fields, data, uri_to_databag) + + @juju_secrets_only + def _delete_relation_secret( + self, relation: Relation, group: SecretGroup, secret_fields: List[str], fields: List[str] + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group) + + if not secret: + logging.error("Can't delete secret for relation %s", str(relation.id)) + return False + + old_content = secret.get_content() + new_content = copy.deepcopy(old_content) + for field in fields: + try: + new_content.pop(field) + except KeyError: + logging.debug( + "Non-existing secret was attempted to be removed %s, %s", + str(relation.id), + str(field), + ) + return False + + # Remove secret from the relation if it's fully gone + if not new_content: + field = self._generate_secret_field_name(group) + try: + relation.data[self.component].pop(field) + except KeyError: + pass + label = self._generate_secret_label(self.relation_name, relation.id, group) + self.secrets.remove(label) + else: + secret.set_content(new_content) + + # Return the content that was removed + return True + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + if secret := self.secrets.get(label): + return secret + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + if secret_uri := self.get_secret_uri(relation, group_mapping): + return self.secrets.get(label, secret_uri) + + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching relation data for Provider. + + NOTE: Since all secret fields are in the Provider side of the databag, we don't need to worry about that + """ + if not relation.app: + return {} + + return self._fetch_relation_data_without_secrets(relation.app, relation, fields) + + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> dict: + """Fetching our own relation data.""" + secret_fields = None + if relation.app: + secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + return self._fetch_relation_data_with_secrets( + self.local_app, + secret_fields, + relation, + fields, + ) + + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Set values for fields not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, + req_secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + ) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.local_app, relation, normal_content) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete fields from the Relation not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.local_app, relation, list(normal_fields)) + + # Public methods - "native" + + def set_credentials(self, relation_id: int, username: str, password: str) -> None: + """Set credentials. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + username: user that was created. + password: password of the created user. + """ + self.update_relation_data(relation_id, {"username": username, "password": password}) + + def set_tls(self, relation_id: int, tls: str) -> None: + """Set whether TLS is enabled. + + Args: + relation_id: the identifier for a particular relation. + tls: whether tls is enabled (True or False). + """ + self.update_relation_data(relation_id, {"tls": tls}) + + def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: + """Set the TLS CA in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + tls_ca: TLS certification authority. + """ + self.update_relation_data(relation_id, {"tls-ca": tls_ca}) + + # Public functions -- inherited + + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) + + +class RequirerData(Data): + """Requirer-side of the relation.""" + + SECRET_FIELDS = ["username", "password", "tls", "tls-ca", "uris"] + + def __init__( + self, + model, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of base client relations.""" + super().__init__(model, relation_name) + self.extra_user_roles = extra_user_roles + self._secret_fields = list(self.SECRET_FIELDS) + if additional_secret_fields: + self._secret_fields += additional_secret_fields + self.data_component = self.local_unit + + @property + def secret_fields(self) -> Optional[List[str]]: + """Local access to secrets field, in case they are being used.""" + if self.secrets_enabled: + return self._secret_fields + + # Internal helper functions + + def _register_secret_to_relation( + self, relation_name: str, relation_id: int, secret_id: str, group: SecretGroup + ): + """Fetch secrets and apply local label on them. + + [MAGIC HERE] + If we fetch a secret using get_secret(id=, label=), + then will be "stuck" on the Secret object, whenever it may + appear (i.e. as an event attribute, or fetched manually) on future occasions. + + This will allow us to uniquely identify the secret on Provider side (typically on + 'secret-changed' events), and map it to the corresponding relation. + """ + label = self._generate_secret_label(relation_name, relation_id, group) + + # Fetching the Secret's meta information ensuring that it's locally getting registered with + CachedSecret(self._model, self.component, label, secret_id).meta + + def _register_secrets_to_relation(self, relation: Relation, params_name_list: List[str]): + """Make sure that secrets of the provided list are locally 'registered' from the databag. + + More on 'locally registered' magic is described in _register_secret_to_relation() method + """ + if not relation.app: + return + + for group in SECRET_GROUPS.groups(): + secret_field = self._generate_secret_field_name(group) + if secret_field in params_name_list and ( + secret_uri := self.get_secret_uri(relation, group) + ): + self._register_secret_to_relation(relation.name, relation.id, secret_uri, group) + + def _is_resource_created_for_relation(self, relation: Relation) -> bool: + if not relation.app: + return False + + data = self.fetch_relation_data([relation.id], ["username", "password"]).get( + relation.id, {} + ) + return bool(data.get("username")) and bool(data.get("password")) + + # Public functions + + def get_secret_uri(self, relation: Relation, group: SecretGroup) -> Optional[str]: + """Getting relation secret URI for the corresponding Secret Group.""" + secret_field = self._generate_secret_field_name(group) + return relation.data[relation.app].get(secret_field) + + def set_secret_uri(self, relation: Relation, group: SecretGroup, uri: str) -> None: + """Setting relation secret URI is not possible for a Requirer.""" + raise NotImplementedError("Requirer can not change the relation secret URI.") + + def is_resource_created(self, relation_id: Optional[int] = None) -> bool: + """Check if the resource has been created. + + This function can be used to check if the Provider answered with data in the charm code + when outside an event callback. + + Args: + relation_id (int, optional): When provided the check is done only for the relation id + provided, otherwise the check is done for all relations + + Returns: + True or False + + Raises: + IndexError: If relation_id is provided but that relation does not exist + """ + if relation_id is not None: + try: + relation = [relation for relation in self.relations if relation.id == relation_id][ + 0 + ] + return self._is_resource_created_for_relation(relation) + except IndexError: + raise IndexError(f"relation id {relation_id} cannot be accessed") + else: + return ( + all( + self._is_resource_created_for_relation(relation) for relation in self.relations + ) + if self.relations + else False + ) + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group) + return self.secrets.get(label) + + def _fetch_specific_relation_data( + self, relation, fields: Optional[List[str]] = None + ) -> Dict[str, str]: + """Fetching Requirer data -- that may include secrets.""" + if not relation.app: + return {} + return self._fetch_relation_data_with_secrets( + relation.app, self.secret_fields, relation, fields + ) + + def _fetch_my_specific_relation_data(self, relation, fields: Optional[List[str]]) -> dict: + """Fetching our own relation data.""" + return self._fetch_relation_data_without_secrets(self.local_app, relation, fields) + + def _update_relation_data(self, relation: Relation, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + return self._update_relation_data_without_secrets(self.local_app, relation, data) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Deletes a set of fields from the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + fields: list containing the field names that should be removed from the relation. + """ + return self._delete_relation_data_without_secrets(self.local_app, relation, fields) + + # Public functions -- inherited + + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) + + +class RequirerEventHandlers(EventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + self.framework.observe( + self.charm.on[relation_data.relation_name].relation_created, + self._on_relation_created_event, + ) + self.framework.observe( + charm.on.secret_changed, + self._on_secret_changed_event, + ) + + # Event handlers + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the relation is created.""" + if not self.relation_data.local_unit.is_leader(): + return + + if self.relation_data.secret_fields: # pyright: ignore [reportAttributeAccessIssue] + set_encoded_field( + event.relation, + self.relation_data.component, + REQ_SECRET_FIELDS, + self.relation_data.secret_fields, # pyright: ignore [reportAttributeAccessIssue] + ) + + @abstractmethod + def _on_secret_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +################################################################################ +# Peer Relation Data +################################################################################ + + +class DataPeerData(RequirerData, ProviderData): + """Represents peer relations data.""" + + SECRET_FIELDS = [] + SECRET_FIELD_NAME = "internal_secret" + SECRET_LABEL_MAP = {} + + def __init__( + self, + model, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + RequirerData.__init__( + self, + model, + relation_name, + extra_user_roles, + additional_secret_fields, + ) + self.secret_field_name = secret_field_name if secret_field_name else self.SECRET_FIELD_NAME + self.deleted_label = deleted_label + self._secret_label_map = {} + + # Legacy information holders + self._legacy_labels = [] + self._legacy_secret_uri = None + + # Secrets that are being dynamically added within the scope of this event handler run + self._new_secrets = [] + self._additional_secret_group_mapping = additional_secret_group_mapping + + for group, fields in additional_secret_group_mapping.items(): + if group not in SECRET_GROUPS.groups(): + setattr(SECRET_GROUPS, group, group) + for field in fields: + secret_group = SECRET_GROUPS.get_group(group) + internal_field = self._field_to_internal_name(field, secret_group) + self._secret_label_map.setdefault(group, []).append(internal_field) + self._secret_fields.append(internal_field) + + @property + def scope(self) -> Optional[Scope]: + """Turn component information into Scope.""" + if isinstance(self.component, Application): + return Scope.APP + if isinstance(self.component, Unit): + return Scope.UNIT + + @property + def secret_label_map(self) -> Dict[str, str]: + """Property storing secret mappings.""" + return self._secret_label_map + + @property + def static_secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return self._secret_fields + + @property + def secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return ( + self.static_secret_fields if self.static_secret_fields else self.current_secret_fields + ) + + @property + def current_secret_fields(self) -> List[str]: + """Helper method to get all currently existing secret fields (added statically or dynamically).""" + if not self.secrets_enabled: + return [] + + if len(self._model.relations[self.relation_name]) > 1: + raise ValueError(f"More than one peer relation on {self.relation_name}") + + relation = self._model.relations[self.relation_name][0] + fields = [] + + ignores = [SECRET_GROUPS.get_group("user"), SECRET_GROUPS.get_group("tls")] + for group in SECRET_GROUPS.groups(): + if group in ignores: + continue + if content := self._get_group_secret_contents(relation, group): + fields += list(content.keys()) + return list(set(fields) | set(self._new_secrets)) + + @dynamic_secrets_only + def set_secret( + self, + relation_id: int, + field: str, + value: str, + group_mapping: Optional[SecretGroup] = None, + ) -> None: + """Public interface method to add a Relation Data field specifically as a Juju Secret. + + Args: + relation_id: ID of the relation + field: The secret field that is to be added + value: The string value of the secret + group_mapping: The name of the "secret group", in case the field is to be added to an existing secret + """ + self._legacy_apply_on_update([field]) + + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + self._new_secrets.append(full_field) + if self.valid_field_pattern(field, full_field): + self.update_relation_data(relation_id, {full_field: value}) + + # Unlike for set_secret(), there's no harm using this operation with static secrets + # The restricion is only added to keep the concept clear + @dynamic_secrets_only + def get_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to fetch secrets only.""" + self._legacy_apply_on_fetch() + + full_field = self._field_to_internal_name(field, group_mapping) + if ( + self.secrets_enabled + and full_field not in self.current_secret_fields + and field not in self.current_secret_fields + ): + return + if self.valid_field_pattern(field, full_field): + return self.fetch_my_relation_field(relation_id, full_field) + + @dynamic_secrets_only + def delete_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to delete secrets only.""" + self._legacy_apply_on_delete([field]) + + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + logger.warning(f"Secret {field} from group {group_mapping} was not found") + return + + if self.valid_field_pattern(field, full_field): + self.delete_relation_data(relation_id, [full_field]) + + ########################################################################## + # Helpers + ########################################################################## + + @staticmethod + def _field_to_internal_name(field: str, group: Optional[SecretGroup]) -> str: + if not group or group == SECRET_GROUPS.EXTRA: + return field + return f"{field}{GROUP_SEPARATOR}{group}" + + @staticmethod + def _internal_name_to_field(name: str) -> Tuple[str, SecretGroup]: + parts = name.split(GROUP_SEPARATOR) + if not len(parts) > 1: + return (parts[0], SECRET_GROUPS.EXTRA) + secret_group = SECRET_GROUPS.get_group(parts[1]) + if not secret_group: + raise ValueError(f"Invalid secret field {name}") + return (parts[0], secret_group) + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + field, group = self._internal_name_to_field(key) + secret_fieldnames_grouped.setdefault(group, []).append(field) + return secret_fieldnames_grouped + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return {k: v for k, v in content.items() if k in self.secret_fields} + return { + self._internal_name_to_field(k)[0]: v + for k, v in content.items() + if k in self.secret_fields + } + + def valid_field_pattern(self, field: str, full_field: str) -> bool: + """Check that no secret group is attempted to be used together without secrets being enabled. + + Secrets groups are impossible to use with versions that are not yet supporting secrets. + """ + if not self.secrets_enabled and full_field != field: + logger.error( + f"Can't access {full_field}: no secrets available (i.e. no secret groups either)." + ) + return False + return True + + ########################################################################## + # Backwards compatibility / Upgrades + ########################################################################## + # These functions are used to keep backwards compatibility on upgrades + # Policy: + # All data is kept intact until the first write operation. (This allows a minimal + # grace period during which rollbacks are fully safe. For more info see spec.) + # All data involves: + # - databag + # - secrets content + # - secret labels (!!!) + # Legacy functions must return None, and leave an equally consistent state whether + # they are executed or skipped (as a high enough versioned execution environment may + # not require so) + + # Full legacy stack for each operation + + def _legacy_apply_on_fetch(self) -> None: + """All legacy functions to be applied on fetch.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + + def _legacy_apply_on_update(self, fields) -> None: + """All legacy functions to be applied on update.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + self._legacy_migration_remove_secret_from_databag(relation, fields) + self._legacy_migration_remove_secret_field_name_from_databag(relation) + + def _legacy_apply_on_delete(self, fields) -> None: + """All legacy functions to be applied on delete.""" + relation = self._model.relations[self.relation_name][0] + self._legacy_compat_generate_prev_labels() + self._legacy_compat_secret_uri_from_databag(relation) + self._legacy_compat_check_deleted_label(relation, fields) + + # Compatibility + + @legacy_apply_from_version(18) + def _legacy_compat_check_deleted_label(self, relation, fields) -> None: + """Helper function for legacy behavior. + + As long as https://bugs.launchpad.net/juju/+bug/2028094 wasn't fixed, + we did not delete fields but rather kept them in the secret with a string value + expressing invalidity. This function is maintainnig that behavior when needed. + """ + if not self.deleted_label: + return + + current_data = self.fetch_my_relation_data([relation.id], fields) + if current_data is not None: + # Check if the secret we wanna delete actually exists + # Given the "deleted label", here we can't rely on the default mechanism (i.e. 'key not found') + if non_existent := (set(fields) & set(self.secret_fields)) - set( + current_data.get(relation.id, []) + ): + logger.debug( + "Non-existing secret %s was attempted to be removed.", + ", ".join(non_existent), + ) + + @legacy_apply_from_version(18) + def _legacy_compat_secret_uri_from_databag(self, relation) -> None: + """Fetching the secret URI from the databag, in case stored there.""" + self._legacy_secret_uri = relation.data[self.component].get( + self._generate_secret_field_name(), None + ) + + @legacy_apply_from_version(34) + def _legacy_compat_generate_prev_labels(self) -> None: + """Generator for legacy secret label names, for backwards compatibility. + + Secret label is part of the data that MUST be maintained across rolling upgrades. + In case there may be a change on a secret label, the old label must be recognized + after upgrades, and left intact until the first write operation -- when we roll over + to the new label. + + This function keeps "memory" of previously used secret labels. + NOTE: Return value takes decorator into account -- all 'legacy' functions may return `None` + + v0.34 (rev69): Fixing issue https://github.com/canonical/data-platform-libs/issues/155 + meant moving from '.' (i.e. 'mysql.app', 'mysql.unit') + to labels '..' (like 'peer.mysql.app') + """ + if self._legacy_labels: + return + + result = [] + members = [self._model.app.name] + if self.scope: + members.append(self.scope.value) + result.append(f"{'.'.join(members)}") + self._legacy_labels = result + + # Migration + + @legacy_apply_from_version(18) + def _legacy_migration_remove_secret_from_databag(self, relation, fields: List[str]) -> None: + """For Rolling Upgrades -- when moving from databag to secrets usage. + + Practically what happens here is to remove stuff from the databag that is + to be stored in secrets. + """ + if not self.secret_fields: + return + + secret_fields_passed = set(self.secret_fields) & set(fields) + for field in secret_fields_passed: + if self._fetch_relation_data_without_secrets(self.component, relation, [field]): + self._delete_relation_data_without_secrets(self.component, relation, [field]) + + @legacy_apply_from_version(18) + def _legacy_migration_remove_secret_field_name_from_databag(self, relation) -> None: + """Making sure that the old databag URI is gone. + + This action should not be executed more than once. + + There was a phase (before moving secrets usage to libs) when charms saved the peer + secret URI to the databag, and used this URI from then on to retrieve their secret. + When upgrading to charm versions using this library, we need to add a label to the + secret and access it via label from than on, and remove the old traces from the databag. + """ + # Nothing to do if 'internal-secret' is not in the databag + if not (relation.data[self.component].get(self._generate_secret_field_name())): + return + + # Making sure that the secret receives its label + # (This should have happened by the time we get here, rather an extra security measure.) + secret = self._get_relation_secret(relation.id) + + # Either app scope secret with leader executing, or unit scope secret + leader_or_unit_scope = self.component != self.local_app or self.local_unit.is_leader() + if secret and leader_or_unit_scope: + # Databag reference to the secret URI can be removed, now that it's labelled + relation.data[self.component].pop(self._generate_secret_field_name(), None) + + ########################################################################## + # Event handlers + ########################################################################## + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + ########################################################################## + # Overrides of Relation Data handling functions + ########################################################################## + + def _generate_secret_label( + self, relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + members = [relation_name, self._model.app.name] + if self.scope: + members.append(self.scope.value) + if group_mapping != SECRET_GROUPS.EXTRA: + members.append(group_mapping) + return f"{'.'.join(members)}" + + def _generate_secret_field_name(self, group_mapping: SecretGroup = SECRET_GROUPS.EXTRA) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{self.secret_field_name}" + + @juju_secrets_only + def _get_relation_secret( + self, + relation_id: int, + group_mapping: SecretGroup = SECRET_GROUPS.EXTRA, + relation_name: Optional[str] = None, + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret specifically for peer relations. + + In case this code may be executed within a rolling upgrade, and we may need to + migrate secrets from the databag to labels, we make sure to stick the correct + label on the secret, and clean up the local databag. + """ + if not relation_name: + relation_name = self.relation_name + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + + # URI or legacy label is only to applied when moving single legacy secret to a (new) label + if group_mapping == SECRET_GROUPS.EXTRA: + # Fetching the secret with fallback to URI (in case label is not yet known) + # Label would we "stuck" on the secret in case it is found + return self.secrets.get( + label, self._legacy_secret_uri, legacy_labels=self._legacy_labels + ) + return self.secrets.get(label) + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + secret_fields = [self._internal_name_to_field(k)[0] for k in secret_fields] + result = super()._get_group_secret_contents(relation, group, secret_fields) + if self.deleted_label: + result = {key: result[key] for key in result if result[key] != self.deleted_label} + if self._additional_secret_group_mapping: + return {self._field_to_internal_name(key, group): result[key] for key in result} + return result + + @either_static_or_dynamic_secrets + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + return self._fetch_relation_data_with_secrets( + self.component, self.secret_fields, relation, fields + ) + + @either_static_or_dynamic_secrets + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + _, normal_fields = self._process_secret_fields( + relation, + self.secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + uri_to_databag=False, + ) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.component, relation, normal_content) + + @either_static_or_dynamic_secrets + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + if self.secret_fields and self.deleted_label: + + _, normal_fields = self._process_secret_fields( + relation, + self.secret_fields, + fields, + self._update_relation_secret, + data={field: self.deleted_label for field in fields}, + ) + else: + _, normal_fields = self._process_secret_fields( + relation, self.secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.component, relation, list(normal_fields)) + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: + """This method makes no sense for a Peer Relation.""" + raise NotImplementedError( + "Peer Relation only supports 'self-side' fetch methods: " + "fetch_my_relation_data() and fetch_my_relation_field()" + ) + + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """This method makes no sense for a Peer Relation.""" + raise NotImplementedError( + "Peer Relation only supports 'self-side' fetch methods: " + "fetch_my_relation_data() and fetch_my_relation_field()" + ) + + ########################################################################## + # Public functions -- inherited + ########################################################################## + + fetch_my_relation_data = Data.fetch_my_relation_data + fetch_my_relation_field = Data.fetch_my_relation_field + + +class DataPeerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + +class DataPeer(DataPeerData, DataPeerEventHandlers): + """Represents peer relations.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerUnitData(DataPeerData): + """Unit data abstraction representation.""" + + SCOPE = Scope.UNIT + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class DataPeerUnit(DataPeerUnitData, DataPeerEventHandlers): + """Unit databag representation.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerOtherUnitData(DataPeerUnitData): + """Unit data abstraction representation.""" + + def __init__(self, unit: Unit, *args, **kwargs): + super().__init__(*args, **kwargs) + self.local_unit = unit + self.component = unit + + def update_relation_data(self, relation_id: int, data: dict) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to update data of another unit.") + + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to delete data of another unit.") + + +class DataPeerOtherUnitEventHandlers(DataPeerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: DataPeerUnitData): + """Manager of base client relations.""" + unique_key = f"{relation_data.relation_name}-{relation_data.local_unit.name}" + super().__init__(charm, relation_data, unique_key=unique_key) + + +class DataPeerOtherUnit(DataPeerOtherUnitData, DataPeerOtherUnitEventHandlers): + """Unit databag representation for another unit than the executor.""" + + def __init__( + self, + unit: Unit, + charm: CharmBase, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + DataPeerOtherUnitData.__init__( + self, + unit, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerOtherUnitEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Cross-charm Relatoins Data Handling and Evenets +################################################################################ + +# Generic events + + +class ExtraRoleEvent(RelationEvent): + """Base class for data events.""" + + @property + def extra_user_roles(self) -> Optional[str]: + """Returns the extra user roles that were requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("extra-user-roles") + + +class RelationEventWithSecret(RelationEvent): + """Base class for Relation Events that need to handle secrets.""" + + @property + def _secrets(self) -> dict: + """Caching secrets to avoid fetching them each time a field is referrd. + + DON'T USE the encapsulated helper variable outside of this function + """ + if not hasattr(self, "_cached_secrets"): + self._cached_secrets = {} + return self._cached_secrets + + def _get_secret(self, group) -> Optional[Dict[str, str]]: + """Retrieving secrets.""" + if not self.app: + return + if not self._secrets.get(group): + self._secrets[group] = None + secret_field = f"{PROV_SECRET_PREFIX}{group}" + if secret_uri := self.relation.data[self.app].get(secret_field): + secret = self.framework.model.get_secret(id=secret_uri) + self._secrets[group] = secret.get_content() + return self._secrets[group] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + return JujuVersion.from_environ().has_secrets + + +class AuthenticationEvent(RelationEventWithSecret): + """Base class for authentication fields for events. + + The amount of logic added here is not ideal -- but this was the only way to preserve + the interface when moving to Juju Secrets + """ + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("username") + + return self.relation.data[self.relation.app].get("username") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("password") + + return self.relation.data[self.relation.app].get("password") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls") + + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls-ca") + + return self.relation.data[self.relation.app].get("tls-ca") + + +# Database related events and fields + + +class DatabaseProvidesEvent(RelationEvent): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("database") + + +class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent): + """Event emitted when a new database is requested for use on this relation.""" + + @property + def external_node_connectivity(self) -> bool: + """Returns the requested external_node_connectivity field.""" + if not self.relation.app: + return False + + return ( + self.relation.data[self.relation.app].get("external-node-connectivity", "false") + == "true" + ) + + +class DatabaseProvidesEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_requested = EventSource(DatabaseRequestedEvent) + + +class DatabaseRequiresEvent(RelationEventWithSecret): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database name.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("database") + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints. + + In VM charms, this is the primary's address. + In kubernetes charms, this is the service to the primary pod. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoints") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints. + + In VM charms, this is the address of all the secondary instances. + In kubernetes charms, this is the service to all replica pod instances. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("replset") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch. + """ + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("uris") + + return self.relation.data[self.relation.app].get("uris") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseRequiresEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +# Database Provider and Requires + + +class DatabaseProviderData(ProviderData): + """Provider-side data of the database relations.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_database(self, relation_id: int, database_name: str) -> None: + """Set database name. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + database_name: database name. + """ + self.update_relation_data(relation_id, {"database": database_name}) + + def set_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database primary connections. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + In VM charms, only the primary's address should be passed as an endpoint. + In kubernetes charms, the service endpoint to the primary pod should be + passed as an endpoint. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self.update_relation_data(relation_id, {"endpoints": connection_strings}) + + def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database replicas connection strings. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self.update_relation_data(relation_id, {"read-only-endpoints": connection_strings}) + + def set_replset(self, relation_id: int, replset: str) -> None: + """Set replica set name in the application relation databag. + + MongoDB only. + + Args: + relation_id: the identifier for a particular relation. + replset: replica set name. + """ + self.update_relation_data(relation_id, {"replset": replset}) + + def set_uris(self, relation_id: int, uris: str) -> None: + """Set the database connection URIs in the application relation databag. + + MongoDB, Redis, and OpenSearch only. + + Args: + relation_id: the identifier for a particular relation. + uris: connection URIs. + """ + self.update_relation_data(relation_id, {"uris": uris}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the database version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + def set_subordinated(self, relation_id: int) -> None: + """Raises the subordinated flag in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + """ + self.update_relation_data(relation_id, {"subordinated": "true"}) + + +class DatabaseProviderEventHandlers(EventHandlers): + """Provider-side of the database relation handlers.""" + + on = DatabaseProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseProviderData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to calm down pyright, it can't parse that the same type is being used in the super() call above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a database requested event if the setup key (database name and optional + # extra user roles) was added to the relation databag by the application. + if "database" in diff.added: + getattr(self.on, "database_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class DatabaseProvides(DatabaseProviderData, DatabaseProviderEventHandlers): + """Provider-side of the database relations.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + DatabaseProviderData.__init__(self, charm.model, relation_name) + DatabaseProviderEventHandlers.__init__(self, charm, self) + + +class DatabaseRequirerData(RequirerData): + """Requirer-side of the database relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + """Manager of database client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.database = database_name + self.relations_aliases = relations_aliases + self.external_node_connectivity = external_node_connectivity + + def is_postgresql_plugin_enabled(self, plugin: str, relation_index: int = 0) -> bool: + """Returns whether a plugin is enabled in the database. + + Args: + plugin: name of the plugin to check. + relation_index: optional relation index to check the database + (default: 0 - first relation). + + PostgreSQL only. + """ + # Psycopg 3 is imported locally to avoid the need of its package installation + # when relating to a database charm other than PostgreSQL. + import psycopg + + # Return False if no relation is established. + if len(self.relations) == 0: + return False + + relation_id = self.relations[relation_index].id + host = self.fetch_relation_field(relation_id, "endpoints") + + # Return False if there is no endpoint available. + if host is None: + return False + + host = host.split(":")[0] + + content = self.fetch_relation_data([relation_id], ["username", "password"]).get( + relation_id, {} + ) + user = content.get("username") + password = content.get("password") + + connection_string = ( + f"host='{host}' dbname='{self.database}' user='{user}' password='{password}'" + ) + try: + with psycopg.connect(connection_string) as connection: + with connection.cursor() as cursor: + cursor.execute( + "SELECT TRUE FROM pg_extension WHERE extname=%s::text;", (plugin,) + ) + return cursor.fetchone() is not None + except psycopg.Error as e: + logger.exception( + f"failed to check whether {plugin} plugin is enabled in the database: %s", str(e) + ) + return False + + +class DatabaseRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + on = DatabaseRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseRequirerData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + # Define custom event names for each alias. + if self.relation_data.relations_aliases: + # Ensure the number of aliases does not exceed the maximum + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[ + self.relation_data.relation_name + ].limit + if len(self.relation_data.relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(self.relation_data.relations_aliases)}" + ) + + if self.relation_data.relations_aliases: + for relation_alias in self.relation_data.relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the unit data bag. + + Args: + relation_id: the identifier for a particular relation. + """ + # If no aliases were provided, return immediately. + if not self.relation_data.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation and relation.data[self.relation_data.local_unit].get("alias"): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relation_data.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_data.relation_name]: + alias = relation.data[self.relation_data.local_unit].get("alias") + if alias: + logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) + available_aliases.remove(alias) + + # Set the alias in the unit relation databag of the specific relation. + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation: + relation.data[self.relation_data.local_unit].update({"alias": available_aliases[0]}) + + # We need to set relation alias also on the application level so, + # it will be accessible in show-unit juju command, executed for a consumer application unit + if self.relation_data.local_unit.is_leader(): + self.relation_data.update_relation_data(relation_id, {"alias": available_aliases[0]}) + + def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + event: the relation changed event that was received. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(event.relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation was not found. + """ + for relation in self.charm.model.relations[self.relation_data.relation_name]: + if relation.id == relation_id: + return relation.data[self.relation_data.local_unit].get("alias") + return None + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the database relation is created.""" + super()._on_relation_created_event(event) + + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if not self.relation_data.local_unit.is_leader(): + return + + event_data = {"database": self.relation_data.database} + + if self.relation_data.extra_user_roles: + event_data["extra-user-roles"] = self.relation_data.extra_user_roles + + # set external-node-connectivity field + if self.relation_data.external_node_connectivity: + event_data["external-node-connectivity"] = "true" + + self.relation_data.update_relation_data(event.relation.id, event_data) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + is_subordinate = False + remote_unit_data = None + for key in event.relation.data.keys(): + if isinstance(key, Unit) and not key.name.startswith(self.charm.app.name): + remote_unit_data = event.relation.data[key] + elif isinstance(key, Application) and key.name != self.charm.app.name: + is_subordinate = event.relation.data[key].get("subordinated") == "true" + + if is_subordinate: + if not remote_unit_data: + return + + if remote_unit_data.get("state") != "ready": + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + # Check if the database is created + # (the database charm shared the credentials). + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("database created at %s", datetime.now()) + getattr(self.on, "database_created").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "database_created") + + # To avoid unnecessary application restarts do not trigger + # โ€œendpoints_changedโ€œ event if โ€œdatabase_createdโ€œ is triggered. + return + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "endpoints_changed") + + # To avoid unnecessary application restarts do not trigger + # โ€œread_only_endpoints_changedโ€œ event if โ€œendpoints_changedโ€œ is triggered. + return + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("read-only-endpoints changed on %s", datetime.now()) + getattr(self.on, "read_only_endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "read_only_endpoints_changed") + + +class DatabaseRequires(DatabaseRequirerData, DatabaseRequirerEventHandlers): + """Provider-side of the database relations.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + DatabaseRequirerData.__init__( + self, + charm.model, + relation_name, + database_name, + extra_user_roles, + relations_aliases, + additional_secret_fields, + external_node_connectivity, + ) + DatabaseRequirerEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Charm-specific Relations Data and Events +################################################################################ + +# Kafka Events + + +class KafkaProvidesEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("topic") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + +class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent): + """Event emitted when a new topic is requested for use on this relation.""" + + +class KafkaProvidesEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_requested = EventSource(TopicRequestedEvent) + + +class KafkaRequiresEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("topic") + + @property + def bootstrap_server(self) -> Optional[str]: + """Returns a comma-separated list of broker uris.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoints") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + @property + def zookeeper_uris(self) -> Optional[str]: + """Returns a comma separated list of Zookeeper uris.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("zookeeper-uris") + + +class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when a new topic is created for use on this relation.""" + + +class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when the bootstrap server is changed.""" + + +class KafkaRequiresEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_created = EventSource(TopicCreatedEvent) + bootstrap_server_changed = EventSource(BootstrapServerChangedEvent) + + +# Kafka Provides and Requires + + +class KafkaProviderData(ProviderData): + """Provider-side of the Kafka relation.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_topic(self, relation_id: int, topic: str) -> None: + """Set topic name in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + topic: the topic name. + """ + self.update_relation_data(relation_id, {"topic": topic}) + + def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None: + """Set the bootstrap server in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + bootstrap_server: the bootstrap server address. + """ + self.update_relation_data(relation_id, {"endpoints": bootstrap_server}) + + def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None: + """Set the consumer group prefix in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + consumer_group_prefix: the consumer group prefix string. + """ + self.update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix}) + + def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None: + """Set the zookeeper uris in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + zookeeper_uris: comma-separated list of ZooKeeper server uris. + """ + self.update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) + + +class KafkaProviderEventHandlers(EventHandlers): + """Provider-side of the Kafka relation.""" + + on = KafkaProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaProviderData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a topic requested event if the setup key (topic name and optional + # extra user roles) was added to the relation databag by the application. + if "topic" in diff.added: + getattr(self.on, "topic_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class KafkaProvides(KafkaProviderData, KafkaProviderEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + KafkaProviderData.__init__(self, charm.model, relation_name) + KafkaProviderEventHandlers.__init__(self, charm, self) + + +class KafkaRequirerData(RequirerData): + """Requirer-side of the Kafka relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of Kafka client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.topic = topic + self.consumer_group_prefix = consumer_group_prefix or "" + + @property + def topic(self): + """Topic to use in Kafka.""" + return self._topic + + @topic.setter + def topic(self, value): + # Avoid wildcards + if value == "*": + raise ValueError(f"Error on topic '{value}', cannot be a wildcard.") + self._topic = value + + +class KafkaRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the Kafka relation.""" + + on = KafkaRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaRequirerData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the Kafka relation is created.""" + super()._on_relation_created_event(event) + + if not self.relation_data.local_unit.is_leader(): + return + + # Sets topic, extra user roles, and "consumer-group-prefix" in the relation + relation_data = {"topic": self.relation_data.topic} + + if self.relation_data.extra_user_roles: + relation_data["extra-user-roles"] = self.relation_data.extra_user_roles + + if self.relation_data.consumer_group_prefix: + relation_data["consumer-group-prefix"] = self.relation_data.consumer_group_prefix + + self.relation_data.update_relation_data(event.relation.id, relation_data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the Kafka relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the topic is created + # (the Kafka charm shared the credentials). + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("topic created at %s", datetime.now()) + getattr(self.on, "topic_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # โ€œendpoints_changedโ€œ event if โ€œtopic_createdโ€œ is triggered. + return + + # Emit an endpoints (bootstrap-server) changed event if the Kafka endpoints + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "bootstrap_server_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +class KafkaRequires(KafkaRequirerData, KafkaRequirerEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + KafkaRequirerData.__init__( + self, + charm.model, + relation_name, + topic, + extra_user_roles, + consumer_group_prefix, + additional_secret_fields, + ) + KafkaRequirerEventHandlers.__init__(self, charm, self) + + +# Opensearch related events + + +class OpenSearchProvidesEvent(RelationEvent): + """Base class for OpenSearch events.""" + + @property + def index(self) -> Optional[str]: + """Returns the index that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("index") + + +class IndexRequestedEvent(OpenSearchProvidesEvent, ExtraRoleEvent): + """Event emitted when a new index is requested for use on this relation.""" + + +class OpenSearchProvidesEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that OpenSearch can emit. + """ + + index_requested = EventSource(IndexRequestedEvent) + + +class OpenSearchRequiresEvent(DatabaseRequiresEvent): + """Base class for OpenSearch requirer events.""" + + +class IndexCreatedEvent(AuthenticationEvent, OpenSearchRequiresEvent): + """Event emitted when a new index is created for use on this relation.""" + + +class OpenSearchRequiresEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that the opensearch requirer can emit. + """ + + index_created = EventSource(IndexCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + authentication_updated = EventSource(AuthenticationEvent) + + +# OpenSearch Provides and Requires Objects + + +class OpenSearchProvidesData(ProviderData): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_index(self, relation_id: int, index: str) -> None: + """Set the index in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + index: the index as it is _created_ on the provider charm. This needn't match the + requested index, and can be used to present a different index name if, for example, + the requested index is invalid. + """ + self.update_relation_data(relation_id, {"index": index}) + + def set_endpoints(self, relation_id: int, endpoints: str) -> None: + """Set the endpoints in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + endpoints: the endpoint addresses for opensearch nodes. + """ + self.update_relation_data(relation_id, {"endpoints": endpoints}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the opensearch version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + +class OpenSearchProvidesEventHandlers(EventHandlers): + """Provider-side of the OpenSearch relation.""" + + on = OpenSearchProvidesEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchProvidesData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit an index requested event if the setup key (index name and optional extra user roles) + # have been added to the relation databag by the application. + if "index" in diff.added: + getattr(self.on, "index_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class OpenSearchProvides(OpenSearchProvidesData, OpenSearchProvidesEventHandlers): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + OpenSearchProvidesData.__init__(self, charm.model, relation_name) + OpenSearchProvidesEventHandlers.__init__(self, charm, self) + + +class OpenSearchRequiresData(RequirerData): + """Requires data side of the OpenSearch relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of OpenSearch client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.index = index + + +class OpenSearchRequiresEventHandlers(RequirerEventHandlers): + """Requires events side of the OpenSearch relation.""" + + on = OpenSearchRequiresEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchRequiresData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the OpenSearch relation is created.""" + super()._on_relation_created_event(event) + + if not self.relation_data.local_unit.is_leader(): + return + + # Sets both index and extra user roles in the relation if the roles are provided. + # Otherwise, sets only the index. + data = {"index": self.relation_data.index} + if self.relation_data.extra_user_roles: + data["extra-user-roles"] = self.relation_data.extra_user_roles + + self.relation_data.update_relation_data(event.relation.id, data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + if not event.secret.label: + return + + relation = self.relation_data._relation_from_secret_label(event.secret.label) + if not relation: + logging.info( + f"Received secret {event.secret.label} but couldn't parse, seems irrelevant" + ) + return + + if relation.app == self.charm.app: + logging.info("Secret changed event ignored for Secret Owner") + + remote_unit = None + for unit in relation.units: + if unit.app != self.charm.app: + remote_unit = unit + + logger.info("authentication updated") + getattr(self.on, "authentication_updated").emit( + relation, app=relation.app, unit=remote_unit + ) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the OpenSearch relation has changed. + + This event triggers individual custom events depending on the changing relation. + """ + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + secret_field_tls = self.relation_data._generate_secret_field_name(SECRET_GROUPS.TLS) + updates = {"username", "password", "tls", "tls-ca", secret_field_user, secret_field_tls} + if len(set(diff._asdict().keys()) - updates) < len(diff): + logger.info("authentication updated at: %s", datetime.now()) + getattr(self.on, "authentication_updated").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Check if the index is created + # (the OpenSearch charm shares the credentials). + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("index created at: %s", datetime.now()) + getattr(self.on, "index_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # โ€œendpoints_changedโ€œ event if โ€œindex_createdโ€œ is triggered. + return + + # Emit a endpoints changed event if the OpenSearch application added or changed this info + # in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +class OpenSearchRequires(OpenSearchRequiresData, OpenSearchRequiresEventHandlers): + """Requires-side of the OpenSearch relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + OpenSearchRequiresData.__init__( + self, + charm.model, + relation_name, + index, + extra_user_roles, + additional_secret_fields, + ) + OpenSearchRequiresEventHandlers.__init__(self, charm, self) diff --git a/single_kernel_mongo/lib/charms/data_platform_libs/v0/s3.py b/single_kernel_mongo/lib/charms/data_platform_libs/v0/s3.py new file mode 100644 index 00000000..f5614aaf --- /dev/null +++ b/single_kernel_mongo/lib/charms/data_platform_libs/v0/s3.py @@ -0,0 +1,791 @@ +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""A library for communicating with the S3 credentials providers and consumers. + +This library provides the relevant interface code implementing the communication +specification for fetching, retrieving, triggering, and responding to events related to +the S3 provider charm and its consumers. + +### Provider charm + +The provider is implemented in the `s3-provider` charm which is meant to be deployed +alongside one or more consumer charms. The provider charm is serving the s3 credentials and +metadata needed to communicate and work with an S3 compatible backend. + +Example: +```python + +from charms.data_platform_libs.v0.s3 import CredentialRequestedEvent, S3Provider + + +class ExampleProviderCharm(CharmBase): + def __init__(self, *args) -> None: + super().__init__(*args) + self.s3_provider = S3Provider(self, "s3-credentials") + + self.framework.observe(self.s3_provider.on.credentials_requested, + self._on_credential_requested) + + def _on_credential_requested(self, event: CredentialRequestedEvent): + if not self.unit.is_leader(): + return + + # get relation id + relation_id = event.relation.id + + # get bucket name + bucket = event.bucket + + # S3 configuration parameters + desired_configuration = {"access-key": "your-access-key", "secret-key": + "your-secret-key", "bucket": "your-bucket"} + + # update the configuration + self.s3_provider.update_connection_info(relation_id, desired_configuration) + + # or it is possible to set each field independently + + self.s3_provider.set_secret_key(relation_id, "your-secret-key") + + +if __name__ == "__main__": + main(ExampleProviderCharm) + + +### Requirer charm + +The requirer charm is the charm requiring the S3 credentials. +An example of requirer charm is the following: + +Example: +```python + +from charms.data_platform_libs.v0.s3 import ( + CredentialsChangedEvent, + CredentialsGoneEvent, + S3Requirer +) + +class ExampleRequirerCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + bucket_name = "test-bucket" + # if bucket name is not provided the bucket name will be generated + # e.g., ('relation-{relation.id}') + + self.s3_client = S3Requirer(self, "s3-credentials", bucket_name) + + self.framework.observe(self.s3_client.on.credentials_changed, self._on_credential_changed) + self.framework.observe(self.s3_client.on.credentials_gone, self._on_credential_gone) + + def _on_credential_changed(self, event: CredentialsChangedEvent): + + # access single parameter credential + secret_key = event.secret_key + access_key = event.access_key + + # or as alternative all credentials can be collected as a dictionary + credentials = self.s3_client.get_s3_credentials() + + def _on_credential_gone(self, event: CredentialsGoneEvent): + # credentials are removed + pass + + if __name__ == "__main__": + main(ExampleRequirerCharm) +``` + +""" +import json +import logging +from collections import namedtuple +from typing import Dict, List, Optional, Union + +import ops.charm +import ops.framework +import ops.model +from ops.charm import ( + CharmBase, + CharmEvents, + RelationBrokenEvent, + RelationChangedEvent, + RelationEvent, + RelationJoinedEvent, +) +from ops.framework import EventSource, Object, ObjectEvents +from ops.model import Application, Relation, RelationDataContent, Unit + +# The unique Charmhub library identifier, never change it +LIBID = "fca396f6254246c9bfa565b1f85ab528" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 5 + +logger = logging.getLogger(__name__) + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +added - keys that were added +changed - keys that still exist but have new values +deleted - key that were deleted""" + + +def diff(event: RelationChangedEvent, bucket: Union[Unit, Application]) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + bucket: bucket of the databag (app or unit) + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the application relation databag. + old_data = json.loads(event.relation.data[bucket].get("data", "{}")) + # Retrieve the new data from the event relation databag. + new_data = ( + {key: value for key, value in event.relation.data[event.app].items() if key != "data"} + if event.app + else {} + ) + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() + # These are the keys that already existed in the databag, + # but had their values changed. + changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]} + + # TODO: evaluate the possibility of losing the diff if some error + # happens in the charm before the diff is completely checked (DPE-412). + # Convert the new_data to a serializable format and save it for a next diff check. + event.relation.data[bucket].update({"data": json.dumps(new_data)}) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + +class BucketEvent(RelationEvent): + """Base class for bucket events.""" + + @property + def bucket(self) -> Optional[str]: + """Returns the bucket was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("bucket", "") + + +class CredentialRequestedEvent(BucketEvent): + """Event emitted when a set of credential is requested for use on this relation.""" + + +class S3CredentialEvents(CharmEvents): + """Event descriptor for events raised by S3Provider.""" + + credentials_requested = EventSource(CredentialRequestedEvent) + + +class S3Provider(Object): + """A provider handler for communicating S3 credentials to consumers.""" + + on = S3CredentialEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, + charm: CharmBase, + relation_name: str, + ): + super().__init__(charm, relation_name) + self.charm = charm + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.relation_name = relation_name + + # monitor relation changed event for changes in the credentials + self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed) + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """React to the relation changed event by consuming data.""" + if not self.charm.unit.is_leader(): + return + diff = self._diff(event) + # emit on credential requested if bucket is provided by the requirer application + if "bucket" in diff.added: + getattr(self.on, "credentials_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _load_relation_data(self, raw_relation_data: dict) -> dict: + """Loads relation data from the relation data bag. + + Args: + raw_relation_data: Relation data from the databag + Returns: + dict: Relation data in dict format. + """ + connection_data = {} + for key in raw_relation_data: + try: + connection_data[key] = json.loads(raw_relation_data[key]) + except (json.decoder.JSONDecodeError, TypeError): + connection_data[key] = raw_relation_data[key] + return connection_data + + # def _diff(self, event: RelationChangedEvent) -> Diff: + # """Retrieves the diff of the data in the relation changed databag. + + # Args: + # event: relation changed event. + + # Returns: + # a Diff instance containing the added, deleted and changed + # keys from the event relation databag. + # """ + # # Retrieve the old data from the data key in the application relation databag. + # old_data = json.loads(event.relation.data[self.local_app].get("data", "{}")) + # # Retrieve the new data from the event relation databag. + # new_data = { + # key: value for key, value in event.relation.data[event.app].items() if key != "data" + # } + + # # These are the keys that were added to the databag and triggered this event. + # added = new_data.keys() - old_data.keys() + # # These are the keys that were removed from the databag and triggered this event. + # deleted = old_data.keys() - new_data.keys() + # # These are the keys that already existed in the databag, + # # but had their values changed. + # changed = { + # key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key] + # } + + # # TODO: evaluate the possibility of losing the diff if some error + # # happens in the charm before the diff is completely checked (DPE-412). + # # Convert the new_data to a serializable format and save it for a next diff check. + # event.relation.data[self.local_app].update({"data": json.dumps(new_data)}) + + # # Return the diff with all possible changes. + # return Diff(added, changed, deleted) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.local_app) + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation id). + """ + data = {} + for relation in self.relations: + data[relation.id] = ( + {key: value for key, value in relation.data[relation.app].items() if key != "data"} + if relation.app + else {} + ) + return data + + def update_connection_info(self, relation_id: int, connection_data: dict) -> None: + """Updates the credential data as set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_data: dict containing the key-value pairs + that should be updated. + """ + # check and write changes only if you are the leader + if not self.local_unit.is_leader(): + return + + relation = self.charm.model.get_relation(self.relation_name, relation_id) + + if not relation: + return + + # configuration options that are list + s3_list_options = ["attributes", "tls-ca-chain"] + + # update the databag, if connection data did not change with respect to before + # the relation changed event is not triggered + updated_connection_data = {} + for configuration_option, configuration_value in connection_data.items(): + if configuration_option in s3_list_options: + updated_connection_data[configuration_option] = json.dumps(configuration_value) + else: + updated_connection_data[configuration_option] = configuration_value + + relation.data[self.local_app].update(updated_connection_data) + logger.debug(f"Updated S3 connection info: {updated_connection_data}") + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) + + def set_bucket(self, relation_id: int, bucket: str) -> None: + """Sets bucket name in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + bucket: the bucket name. + """ + self.update_connection_info(relation_id, {"bucket": bucket}) + + def set_access_key(self, relation_id: int, access_key: str) -> None: + """Sets access-key value in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + access_key: the access-key value. + """ + self.update_connection_info(relation_id, {"access-key": access_key}) + + def set_secret_key(self, relation_id: int, secret_key: str) -> None: + """Sets the secret key value in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + secret_key: the value of the secret key. + """ + self.update_connection_info(relation_id, {"secret-key": secret_key}) + + def set_path(self, relation_id: int, path: str) -> None: + """Sets the path value in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + path: the path value. + """ + self.update_connection_info(relation_id, {"path": path}) + + def set_endpoint(self, relation_id: int, endpoint: str) -> None: + """Sets the endpoint address in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + endpoint: the endpoint address. + """ + self.update_connection_info(relation_id, {"endpoint": endpoint}) + + def set_region(self, relation_id: int, region: str) -> None: + """Sets the region location in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + region: the region address. + """ + self.update_connection_info(relation_id, {"region": region}) + + def set_s3_uri_style(self, relation_id: int, s3_uri_style: str) -> None: + """Sets the S3 URI style in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + s3_uri_style: the s3 URI style. + """ + self.update_connection_info(relation_id, {"s3-uri-style": s3_uri_style}) + + def set_storage_class(self, relation_id: int, storage_class: str) -> None: + """Sets the storage class in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + storage_class: the storage class. + """ + self.update_connection_info(relation_id, {"storage-class": storage_class}) + + def set_tls_ca_chain(self, relation_id: int, tls_ca_chain: List[str]) -> None: + """Sets the tls_ca_chain value in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + tls_ca_chain: the TLS Chain value. + """ + self.update_connection_info(relation_id, {"tls-ca-chain": tls_ca_chain}) + + def set_s3_api_version(self, relation_id: int, s3_api_version: str) -> None: + """Sets the S3 API version in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + s3_api_version: the S3 version value. + """ + self.update_connection_info(relation_id, {"s3-api-version": s3_api_version}) + + def set_delete_older_than_days(self, relation_id: int, days: int) -> None: + """Sets the retention days for full backups in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + days: the value. + """ + self.update_connection_info(relation_id, {"delete-older-than-days": str(days)}) + + def set_attributes(self, relation_id: int, attributes: List[str]) -> None: + """Sets the connection attributes in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + attributes: the attributes value. + """ + self.update_connection_info(relation_id, {"attributes": attributes}) + + +class S3Event(RelationEvent): + """Base class for S3 storage events.""" + + @property + def bucket(self) -> Optional[str]: + """Returns the bucket name.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("bucket") + + @property + def access_key(self) -> Optional[str]: + """Returns the access key.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("access-key") + + @property + def secret_key(self) -> Optional[str]: + """Returns the secret key.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("secret-key") + + @property + def path(self) -> Optional[str]: + """Returns the path where data can be stored.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("path") + + @property + def endpoint(self) -> Optional[str]: + """Returns the endpoint address.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoint") + + @property + def region(self) -> Optional[str]: + """Returns the region.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("region") + + @property + def s3_uri_style(self) -> Optional[str]: + """Returns the s3 uri style.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("s3-uri-style") + + @property + def storage_class(self) -> Optional[str]: + """Returns the storage class name.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("storage-class") + + @property + def tls_ca_chain(self) -> Optional[List[str]]: + """Returns the TLS CA chain.""" + if not self.relation.app: + return None + + tls_ca_chain = self.relation.data[self.relation.app].get("tls-ca-chain") + if tls_ca_chain is not None: + return json.loads(tls_ca_chain) + return None + + @property + def s3_api_version(self) -> Optional[str]: + """Returns the S3 API version.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("s3-api-version") + + @property + def delete_older_than_days(self) -> Optional[int]: + """Returns the retention days for full backups.""" + if not self.relation.app: + return None + + days = self.relation.data[self.relation.app].get("delete-older-than-days") + if days is None: + return None + return int(days) + + @property + def attributes(self) -> Optional[List[str]]: + """Returns the attributes.""" + if not self.relation.app: + return None + + attributes = self.relation.data[self.relation.app].get("attributes") + if attributes is not None: + return json.loads(attributes) + return None + + +class CredentialsChangedEvent(S3Event): + """Event emitted when S3 credential are changed on this relation.""" + + +class CredentialsGoneEvent(RelationEvent): + """Event emitted when S3 credential are removed from this relation.""" + + +class S3CredentialRequiresEvents(ObjectEvents): + """Event descriptor for events raised by the S3Provider.""" + + credentials_changed = EventSource(CredentialsChangedEvent) + credentials_gone = EventSource(CredentialsGoneEvent) + + +S3_REQUIRED_OPTIONS = ["access-key", "secret-key"] + + +class S3Requirer(Object): + """Requires-side of the s3 relation.""" + + on = S3CredentialRequiresEvents() # pyright: ignore[reportAssignmentType] + + def __init__( + self, charm: ops.charm.CharmBase, relation_name: str, bucket_name: Optional[str] = None + ): + """Manager of the s3 client relations.""" + super().__init__(charm, relation_name) + + self.relation_name = relation_name + self.charm = charm + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.bucket = bucket_name + + self.framework.observe( + self.charm.on[self.relation_name].relation_changed, self._on_relation_changed + ) + + self.framework.observe( + self.charm.on[self.relation_name].relation_joined, self._on_relation_joined + ) + + self.framework.observe( + self.charm.on[self.relation_name].relation_broken, + self._on_relation_broken, + ) + + def _generate_bucket_name(self, event: RelationJoinedEvent): + """Returns the bucket name generated from relation id.""" + return f"relation-{event.relation.id}" + + def _on_relation_joined(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the s3 relation.""" + if self.bucket is None: + self.bucket = self._generate_bucket_name(event) + self.update_connection_info(event.relation.id, {"bucket": self.bucket}) + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation id). + """ + data = {} + + for relation in self.relations: + data[relation.id] = self._load_relation_data(relation.data[self.charm.app]) + return data + + def update_connection_info(self, relation_id: int, connection_data: dict) -> None: + """Updates the credential data as set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_data: dict containing the key-value pairs + that should be updated. + """ + # check and write changes only if you are the leader + if not self.local_unit.is_leader(): + return + + relation = self.charm.model.get_relation(self.relation_name, relation_id) + + if not relation: + return + + # update the databag, if connection data did not change with respect to before + # the relation changed event is not triggered + # configuration options that are list + s3_list_options = ["attributes", "tls-ca-chain"] + updated_connection_data = {} + for configuration_option, configuration_value in connection_data.items(): + if configuration_option in s3_list_options: + updated_connection_data[configuration_option] = json.dumps(configuration_value) + else: + updated_connection_data[configuration_option] = configuration_value + + relation.data[self.local_app].update(updated_connection_data) + logger.debug(f"Updated S3 credentials: {updated_connection_data}") + + def _load_relation_data(self, raw_relation_data: RelationDataContent) -> Dict[str, str]: + """Loads relation data from the relation data bag. + + Args: + raw_relation_data: Relation data from the databag + Returns: + dict: Relation data in dict format. + """ + connection_data = {} + for key in raw_relation_data: + try: + connection_data[key] = json.loads(raw_relation_data[key]) + except (json.decoder.JSONDecodeError, TypeError): + connection_data[key] = raw_relation_data[key] + return connection_data + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.local_unit) + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Notify the charm about the presence of S3 credentials.""" + # check if the mandatory options are in the relation data + contains_required_options = True + # get current credentials data + credentials = self.get_s3_connection_info() + # records missing options + missing_options = [] + for configuration_option in S3_REQUIRED_OPTIONS: + if configuration_option not in credentials: + contains_required_options = False + missing_options.append(configuration_option) + # emit credential change event only if all mandatory fields are present + if contains_required_options: + getattr(self.on, "credentials_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + else: + logger.warning( + f"Some mandatory fields: {missing_options} are not present, do not emit credential change event!" + ) + + def get_s3_connection_info(self) -> Dict[str, str]: + """Return the s3 credentials as a dictionary.""" + for relation in self.relations: + if relation and relation.app: + return self._load_relation_data(relation.data[relation.app]) + + return {} + + def _on_relation_broken(self, event: RelationBrokenEvent) -> None: + """Notify the charm about a broken S3 credential store relation.""" + getattr(self.on, "credentials_gone").emit(event.relation, app=event.app, unit=event.unit) + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) diff --git a/single_kernel_mongo/lib/charms/operator_libs_linux/v1/snap.py b/single_kernel_mongo/lib/charms/operator_libs_linux/v1/snap.py new file mode 100644 index 00000000..71cdee39 --- /dev/null +++ b/single_kernel_mongo/lib/charms/operator_libs_linux/v1/snap.py @@ -0,0 +1,1065 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Representations of the system's Snaps, and abstractions around managing them. + +The `snap` module provides convenience methods for listing, installing, refreshing, and removing +Snap packages, in addition to setting and getting configuration options for them. + +In the `snap` module, `SnapCache` creates a dict-like mapping of `Snap` objects at when +instantiated. Installed snaps are fully populated, and available snaps are lazily-loaded upon +request. This module relies on an installed and running `snapd` daemon to perform operations over +the `snapd` HTTP API. + +`SnapCache` objects can be used to install or modify Snap packages by name in a manner similar to +using the `snap` command from the commandline. + +An example of adding Juju to the system with `SnapCache` and setting a config value: + +```python +try: + cache = snap.SnapCache() + juju = cache["juju"] + + if not juju.present: + juju.ensure(snap.SnapState.Latest, channel="beta") + juju.set({"some.key": "value", "some.key2": "value2"}) +except snap.SnapError as e: + logger.error("An exception occurred when installing charmcraft. Reason: %s", e.message) +``` + +In addition, the `snap` module provides "bare" methods which can act on Snap packages as +simple function calls. :meth:`add`, :meth:`remove`, and :meth:`ensure` are provided, as +well as :meth:`add_local` for installing directly from a local `.snap` file. These return +`Snap` objects. + +As an example of installing several Snaps and checking details: + +```python +try: + nextcloud, charmcraft = snap.add(["nextcloud", "charmcraft"]) + if nextcloud.get("mode") != "production": + nextcloud.set({"mode": "production"}) +except snap.SnapError as e: + logger.error("An exception occurred when installing snaps. Reason: %s" % e.message) +``` +""" + +import http.client +import json +import logging +import os +import re +import socket +import subprocess +import sys +import urllib.error +import urllib.parse +import urllib.request +from collections.abc import Mapping +from datetime import datetime, timedelta, timezone +from enum import Enum +from subprocess import CalledProcessError, CompletedProcess +from typing import Any, Dict, Iterable, List, Optional, Union + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "05394e5893f94f2d90feb7cbe6b633cd" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 12 + + +# Regex to locate 7-bit C1 ANSI sequences +ansi_filter = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") + + +def _cache_init(func): + def inner(*args, **kwargs): + if _Cache.cache is None: + _Cache.cache = SnapCache() + return func(*args, **kwargs) + + return inner + + +# recursive hints seems to error out pytest +JSONType = Union[Dict[str, Any], List[Any], str, int, float] + + +class SnapService: + """Data wrapper for snap services.""" + + def __init__( + self, + daemon: Optional[str] = None, + daemon_scope: Optional[str] = None, + enabled: bool = False, + active: bool = False, + activators: List[str] = [], + **kwargs, + ): + self.daemon = daemon + self.daemon_scope = kwargs.get("daemon-scope", None) or daemon_scope + self.enabled = enabled + self.active = active + self.activators = activators + + def as_dict(self) -> Dict: + """Return instance representation as dict.""" + return { + "daemon": self.daemon, + "daemon_scope": self.daemon_scope, + "enabled": self.enabled, + "active": self.active, + "activators": self.activators, + } + + +class MetaCache(type): + """MetaCache class used for initialising the snap cache.""" + + @property + def cache(cls) -> "SnapCache": + """Property for returning the snap cache.""" + return cls._cache + + @cache.setter + def cache(cls, cache: "SnapCache") -> None: + """Setter for the snap cache.""" + cls._cache = cache + + def __getitem__(cls, name) -> "Snap": + """Snap cache getter.""" + return cls._cache[name] + + +class _Cache(object, metaclass=MetaCache): + _cache = None + + +class Error(Exception): + """Base class of most errors raised by this library.""" + + def __repr__(self): + """Represent the Error class.""" + return "<{}.{} {}>".format(type(self).__module__, type(self).__name__, self.args) + + @property + def name(self): + """Return a string representation of the model plus class.""" + return "<{}.{}>".format(type(self).__module__, type(self).__name__) + + @property + def message(self): + """Return the message passed as an argument.""" + return self.args[0] + + +class SnapAPIError(Error): + """Raised when an HTTP API error occurs talking to the Snapd server.""" + + def __init__(self, body: Dict, code: int, status: str, message: str): + super().__init__(message) # Makes str(e) return message + self.body = body + self.code = code + self.status = status + self._message = message + + def __repr__(self): + """Represent the SnapAPIError class.""" + return "APIError({!r}, {!r}, {!r}, {!r})".format( + self.body, self.code, self.status, self._message + ) + + +class SnapState(Enum): + """The state of a snap on the system or in the cache.""" + + Present = "present" + Absent = "absent" + Latest = "latest" + Available = "available" + + +class SnapError(Error): + """Raised when there's an error running snap control commands.""" + + +class SnapNotFoundError(Error): + """Raised when a requested snap is not known to the system.""" + + +class Snap(object): + """Represents a snap package and its properties. + + `Snap` exposes the following properties about a snap: + - name: the name of the snap + - state: a `SnapState` representation of its install status + - channel: "stable", "candidate", "beta", and "edge" are common + - revision: a string representing the snap's revision + - confinement: "classic" or "strict" + """ + + def __init__( + self, + name, + state: SnapState, + channel: str, + revision: int, + confinement: str, + apps: Optional[List[Dict[str, str]]] = None, + cohort: Optional[str] = "", + ) -> None: + self._name = name + self._state = state + self._channel = channel + self._revision = revision + self._confinement = confinement + self._cohort = cohort + self._apps = apps or [] + self._snap_client = SnapClient() + + def __eq__(self, other) -> bool: + """Equality for comparison.""" + return isinstance(other, self.__class__) and ( + self._name, + self._revision, + ) == (other._name, other._revision) + + def __hash__(self): + """Calculate a hash for this snap.""" + return hash((self._name, self._revision)) + + def __repr__(self): + """Represent the object such that it can be reconstructed.""" + return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) + + def __str__(self): + """Represent the snap object as a string.""" + return "<{}: {}-{}.{} -- {}>".format( + self.__class__.__name__, + self._name, + self._revision, + self._channel, + str(self._state), + ) + + def _snap(self, command: str, optargs: Optional[Iterable[str]] = None) -> str: + """Perform a snap operation. + + Args: + command: the snap command to execute + optargs: an (optional) list of additional arguments to pass, + commonly confinement or channel + + Raises: + SnapError if there is a problem encountered + """ + optargs = optargs or [] + _cmd = ["snap", command, self._name, *optargs] + try: + return subprocess.check_output(_cmd, universal_newlines=True) + except CalledProcessError as e: + raise SnapError( + "Snap: {!r}; command {!r} failed with output = {!r}".format( + self._name, _cmd, e.output + ) + ) + + def _snap_daemons( + self, + command: List[str], + services: Optional[List[str]] = None, + ) -> CompletedProcess: + """Perform snap app commands. + + Args: + command: the snap command to execute + services: the snap service to execute command on + + Raises: + SnapError if there is a problem encountered + """ + if services: + # an attempt to keep the command constrained to the snap instance's services + services = ["{}.{}".format(self._name, service) for service in services] + else: + services = [self._name] + + _cmd = ["snap", *command, *services] + + try: + return subprocess.run(_cmd, universal_newlines=True, check=True, capture_output=True) + except CalledProcessError as e: + raise SnapError("Could not {} for snap [{}]: {}".format(_cmd, self._name, e.stderr)) + + def get(self, key) -> str: + """Fetch a snap configuration value. + + Args: + key: the key to retrieve + """ + return self._snap("get", [key]).strip() + + def set(self, config: Dict) -> str: + """Set a snap configuration value. + + Args: + config: a dictionary containing keys and values specifying the config to set. + """ + args = ['{}="{}"'.format(key, val) for key, val in config.items()] + + return self._snap("set", [*args]) + + def unset(self, key) -> str: + """Unset a snap configuration value. + + Args: + key: the key to unset + """ + return self._snap("unset", [key]) + + def start(self, services: Optional[List[str]] = None, enable: Optional[bool] = False) -> None: + """Start a snap's services. + + Args: + services (list): (optional) list of individual snap services to start (otherwise all) + enable (bool): (optional) flag to enable snap services on start. Default `false` + """ + args = ["start", "--enable"] if enable else ["start"] + self._snap_daemons(args, services) + + def stop(self, services: Optional[List[str]] = None, disable: Optional[bool] = False) -> None: + """Stop a snap's services. + + Args: + services (list): (optional) list of individual snap services to stop (otherwise all) + disable (bool): (optional) flag to disable snap services on stop. Default `False` + """ + args = ["stop", "--disable"] if disable else ["stop"] + self._snap_daemons(args, services) + + def logs(self, services: Optional[List[str]] = None, num_lines: Optional[int] = 10) -> str: + """Fetch a snap services' logs. + + Args: + services (list): (optional) list of individual snap services to show logs from + (otherwise all) + num_lines (int): (optional) integer number of log lines to return. Default `10` + """ + args = ["logs", "-n={}".format(num_lines)] if num_lines else ["logs"] + return self._snap_daemons(args, services).stdout + + def connect( + self, plug: str, service: Optional[str] = None, slot: Optional[str] = None + ) -> None: + """Connect a plug to a slot. + + Args: + plug (str): the plug to connect + service (str): (optional) the snap service name to plug into + slot (str): (optional) the snap service slot to plug in to + + Raises: + SnapError if there is a problem encountered + """ + command = ["connect", "{}:{}".format(self._name, plug)] + + if service and slot: + command = command + ["{}:{}".format(service, slot)] + elif slot: + command = command + [slot] + + _cmd = ["snap", *command] + try: + subprocess.run(_cmd, universal_newlines=True, check=True, capture_output=True) + except CalledProcessError as e: + raise SnapError("Could not {} for snap [{}]: {}".format(_cmd, self._name, e.stderr)) + + def hold(self, duration: Optional[timedelta] = None) -> None: + """Add a refresh hold to a snap. + + Args: + duration: duration for the hold, or None (the default) to hold this snap indefinitely. + """ + hold_str = "forever" + if duration is not None: + seconds = round(duration.total_seconds()) + hold_str = f"{seconds}s" + self._snap("refresh", [f"--hold={hold_str}"]) + + def unhold(self) -> None: + """Remove the refresh hold of a snap.""" + self._snap("refresh", ["--unhold"]) + + def restart( + self, services: Optional[List[str]] = None, reload: Optional[bool] = False + ) -> None: + """Restarts a snap's services. + + Args: + services (list): (optional) list of individual snap services to show logs from. + (otherwise all) + reload (bool): (optional) flag to use the service reload command, if available. + Default `False` + """ + args = ["restart", "--reload"] if reload else ["restart"] + self._snap_daemons(args, services) + + def _install( + self, + channel: Optional[str] = "", + cohort: Optional[str] = "", + revision: Optional[int] = None, + ) -> None: + """Add a snap to the system. + + Args: + channel: the channel to install from + cohort: optional, the key of a cohort that this snap belongs to + revision: optional, the revision of the snap to install + """ + cohort = cohort or self._cohort + + args = [] + if self.confinement == "classic": + args.append("--classic") + if channel: + args.append('--channel="{}"'.format(channel)) + if revision: + args.append('--revision="{}"'.format(revision)) + if cohort: + args.append('--cohort="{}"'.format(cohort)) + + self._snap("install", args) + + def _refresh( + self, + channel: Optional[str] = "", + cohort: Optional[str] = "", + revision: Optional[int] = None, + leave_cohort: Optional[bool] = False, + ) -> None: + """Refresh a snap. + + Args: + channel: the channel to install from + cohort: optionally, specify a cohort. + revision: optionally, specify the revision of the snap to refresh + leave_cohort: leave the current cohort. + """ + args = [] + if channel: + args.append('--channel="{}"'.format(channel)) + + if revision: + args.append('--revision="{}"'.format(revision)) + + if not cohort: + cohort = self._cohort + + if leave_cohort: + self._cohort = "" + args.append("--leave-cohort") + elif cohort: + args.append('--cohort="{}"'.format(cohort)) + + self._snap("refresh", args) + + def _remove(self) -> str: + """Remove a snap from the system.""" + return self._snap("remove") + + @property + def name(self) -> str: + """Returns the name of the snap.""" + return self._name + + def ensure( + self, + state: SnapState, + classic: Optional[bool] = False, + channel: Optional[str] = "", + cohort: Optional[str] = "", + revision: Optional[int] = None, + ): + """Ensure that a snap is in a given state. + + Args: + state: a `SnapState` to reconcile to. + classic: an (Optional) boolean indicating whether classic confinement should be used + channel: the channel to install from + cohort: optional. Specify the key of a snap cohort. + revision: optional. the revision of the snap to install/refresh + + While both channel and revision could be specified, the underlying snap install/refresh + command will determine which one takes precedence (revision at this time) + + Raises: + SnapError if an error is encountered + """ + self._confinement = "classic" if classic or self._confinement == "classic" else "" + + if state not in (SnapState.Present, SnapState.Latest): + # We are attempting to remove this snap. + if self._state in (SnapState.Present, SnapState.Latest): + # The snap is installed, so we run _remove. + self._remove() + else: + # The snap is not installed -- no need to do anything. + pass + else: + # We are installing or refreshing a snap. + if self._state not in (SnapState.Present, SnapState.Latest): + # The snap is not installed, so we install it. + self._install(channel, cohort, revision) + else: + # The snap is installed, but we are changing it (e.g., switching channels). + self._refresh(channel, cohort, revision) + + self._update_snap_apps() + self._state = state + + def _update_snap_apps(self) -> None: + """Update a snap's apps after snap changes state.""" + try: + self._apps = self._snap_client.get_installed_snap_apps(self._name) + except SnapAPIError: + logger.debug("Unable to retrieve snap apps for {}".format(self._name)) + self._apps = [] + + @property + def present(self) -> bool: + """Report whether or not a snap is present.""" + return self._state in (SnapState.Present, SnapState.Latest) + + @property + def latest(self) -> bool: + """Report whether the snap is the most recent version.""" + return self._state is SnapState.Latest + + @property + def state(self) -> SnapState: + """Report the current snap state.""" + return self._state + + @state.setter + def state(self, state: SnapState) -> None: + """Set the snap state to a given value. + + Args: + state: a `SnapState` to reconcile the snap to. + + Raises: + SnapError if an error is encountered + """ + if self._state is not state: + self.ensure(state) + self._state = state + + @property + def revision(self) -> int: + """Returns the revision for a snap.""" + return self._revision + + @property + def channel(self) -> str: + """Returns the channel for a snap.""" + return self._channel + + @property + def confinement(self) -> str: + """Returns the confinement for a snap.""" + return self._confinement + + @property + def apps(self) -> List: + """Returns (if any) the installed apps of the snap.""" + self._update_snap_apps() + return self._apps + + @property + def services(self) -> Dict: + """Returns (if any) the installed services of the snap.""" + self._update_snap_apps() + services = {} + for app in self._apps: + if "daemon" in app: + services[app["name"]] = SnapService(**app).as_dict() + + return services + + @property + def held(self) -> bool: + """Report whether the snap has a hold.""" + info = self._snap("info") + return "hold:" in info + + +class _UnixSocketConnection(http.client.HTTPConnection): + """Implementation of HTTPConnection that connects to a named Unix socket.""" + + def __init__(self, host, timeout=None, socket_path=None): + if timeout is None: + super().__init__(host) + else: + super().__init__(host, timeout=timeout) + self.socket_path = socket_path + + def connect(self): + """Override connect to use Unix socket (instead of TCP socket).""" + if not hasattr(socket, "AF_UNIX"): + raise NotImplementedError("Unix sockets not supported on {}".format(sys.platform)) + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.sock.connect(self.socket_path) + if self.timeout is not None: + self.sock.settimeout(self.timeout) + + +class _UnixSocketHandler(urllib.request.AbstractHTTPHandler): + """Implementation of HTTPHandler that uses a named Unix socket.""" + + def __init__(self, socket_path: str): + super().__init__() + self.socket_path = socket_path + + def http_open(self, req) -> http.client.HTTPResponse: + """Override http_open to use a Unix socket connection (instead of TCP).""" + return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path) + + +class SnapClient: + """Snapd API client to talk to HTTP over UNIX sockets. + + In order to avoid shelling out and/or involving sudo in calling the snapd API, + use a wrapper based on the Pebble Client, trimmed down to only the utility methods + needed for talking to snapd. + """ + + def __init__( + self, + socket_path: str = "/run/snapd.socket", + opener: Optional[urllib.request.OpenerDirector] = None, + base_url: str = "http://localhost/v2/", + timeout: float = 5.0, + ): + """Initialize a client instance. + + Args: + socket_path: a path to the socket on the filesystem. Defaults to /run/snap/snapd.socket + opener: specifies an opener for unix socket, if unspecified a default is used + base_url: base url for making requests to the snap client. Defaults to + http://localhost/v2/ + timeout: timeout in seconds to use when making requests to the API. Default is 5.0s. + """ + if opener is None: + opener = self._get_default_opener(socket_path) + self.opener = opener + self.base_url = base_url + self.timeout = timeout + + @classmethod + def _get_default_opener(cls, socket_path): + """Build the default opener to use for requests (HTTP over Unix socket).""" + opener = urllib.request.OpenerDirector() + opener.add_handler(_UnixSocketHandler(socket_path)) + opener.add_handler(urllib.request.HTTPDefaultErrorHandler()) + opener.add_handler(urllib.request.HTTPRedirectHandler()) + opener.add_handler(urllib.request.HTTPErrorProcessor()) + return opener + + def _request( + self, + method: str, + path: str, + query: Dict = None, + body: Dict = None, + ) -> JSONType: + """Make a JSON request to the Snapd server with the given HTTP method and path. + + If query dict is provided, it is encoded and appended as a query string + to the URL. If body dict is provided, it is serialied as JSON and used + as the HTTP body (with Content-Type: "application/json"). The resulting + body is decoded from JSON. + """ + headers = {"Accept": "application/json"} + data = None + if body is not None: + data = json.dumps(body).encode("utf-8") + headers["Content-Type"] = "application/json" + + response = self._request_raw(method, path, query, headers, data) + return json.loads(response.read().decode())["result"] + + def _request_raw( + self, + method: str, + path: str, + query: Dict = None, + headers: Dict = None, + data: bytes = None, + ) -> http.client.HTTPResponse: + """Make a request to the Snapd server; return the raw HTTPResponse object.""" + url = self.base_url + path + if query: + url = url + "?" + urllib.parse.urlencode(query) + + if headers is None: + headers = {} + request = urllib.request.Request(url, method=method, data=data, headers=headers) + + try: + response = self.opener.open(request, timeout=self.timeout) + except urllib.error.HTTPError as e: + code = e.code + status = e.reason + message = "" + try: + body = json.loads(e.read().decode())["result"] + except (IOError, ValueError, KeyError) as e2: + # Will only happen on read error or if Pebble sends invalid JSON. + body = {} + message = "{} - {}".format(type(e2).__name__, e2) + raise SnapAPIError(body, code, status, message) + except urllib.error.URLError as e: + raise SnapAPIError({}, 500, "Not found", e.reason) + return response + + def get_installed_snaps(self) -> Dict: + """Get information about currently installed snaps.""" + return self._request("GET", "snaps") + + def get_snap_information(self, name: str) -> Dict: + """Query the snap server for information about single snap.""" + return self._request("GET", "find", {"name": name})[0] + + def get_installed_snap_apps(self, name: str) -> List: + """Query the snap server for apps belonging to a named, currently installed snap.""" + return self._request("GET", "apps", {"names": name, "select": "service"}) + + +class SnapCache(Mapping): + """An abstraction to represent installed/available packages. + + When instantiated, `SnapCache` iterates through the list of installed + snaps using the `snapd` HTTP API, and a list of available snaps by reading + the filesystem to populate the cache. Information about available snaps is lazily-loaded + from the `snapd` API when requested. + """ + + def __init__(self): + if not self.snapd_installed: + raise SnapError("snapd is not installed or not in /usr/bin") from None + self._snap_client = SnapClient() + self._snap_map = {} + if self.snapd_installed: + self._load_available_snaps() + self._load_installed_snaps() + + def __contains__(self, key: str) -> bool: + """Check if a given snap is in the cache.""" + return key in self._snap_map + + def __len__(self) -> int: + """Report number of items in the snap cache.""" + return len(self._snap_map) + + def __iter__(self) -> Iterable["Snap"]: + """Provide iterator for the snap cache.""" + return iter(self._snap_map.values()) + + def __getitem__(self, snap_name: str) -> Snap: + """Return either the installed version or latest version for a given snap.""" + snap = self._snap_map.get(snap_name, None) + if snap is None: + # The snapd cache file may not have existed when _snap_map was + # populated. This is normal. + try: + self._snap_map[snap_name] = self._load_info(snap_name) + except SnapAPIError: + raise SnapNotFoundError("Snap '{}' not found!".format(snap_name)) + + return self._snap_map[snap_name] + + @property + def snapd_installed(self) -> bool: + """Check whether snapd has been installled on the system.""" + return os.path.isfile("/usr/bin/snap") + + def _load_available_snaps(self) -> None: + """Load the list of available snaps from disk. + + Leave them empty and lazily load later if asked for. + """ + if not os.path.isfile("/var/cache/snapd/names"): + # The snap catalog may not be populated yet; this is normal. + # snapd updates the cache infrequently and the cache file may not + # currently exist. + return + + with open("/var/cache/snapd/names", "r") as f: + for line in f: + if line.strip(): + self._snap_map[line.strip()] = None + + def _load_installed_snaps(self) -> None: + """Load the installed snaps into the dict.""" + installed = self._snap_client.get_installed_snaps() + + for i in installed: + snap = Snap( + name=i["name"], + state=SnapState.Latest, + channel=i["channel"], + revision=int(i["revision"]), + confinement=i["confinement"], + apps=i.get("apps", None), + ) + self._snap_map[snap.name] = snap + + def _load_info(self, name) -> Snap: + """Load info for snaps which are not installed if requested. + + Args: + name: a string representing the name of the snap + """ + info = self._snap_client.get_snap_information(name) + + return Snap( + name=info["name"], + state=SnapState.Available, + channel=info["channel"], + revision=int(info["revision"]), + confinement=info["confinement"], + apps=None, + ) + + +@_cache_init +def add( + snap_names: Union[str, List[str]], + state: Union[str, SnapState] = SnapState.Latest, + channel: Optional[str] = "", + classic: Optional[bool] = False, + cohort: Optional[str] = "", + revision: Optional[int] = None, +) -> Union[Snap, List[Snap]]: + """Add a snap to the system. + + Args: + snap_names: the name or names of the snaps to install + state: a string or `SnapState` representation of the desired state, one of + [`Present` or `Latest`] + channel: an (Optional) channel as a string. Defaults to 'latest' + classic: an (Optional) boolean specifying whether it should be added with classic + confinement. Default `False` + cohort: an (Optional) string specifying the snap cohort to use + revision: an (Optional) integer specifying the snap revision to use + + Raises: + SnapError if some snaps failed to install or were not found. + """ + if not channel and not revision: + channel = "latest" + + snap_names = [snap_names] if type(snap_names) is str else snap_names + if not snap_names: + raise TypeError("Expected at least one snap to add, received zero!") + + if type(state) is str: + state = SnapState(state) + + return _wrap_snap_operations(snap_names, state, channel, classic, cohort, revision) + + +@_cache_init +def remove(snap_names: Union[str, List[str]]) -> Union[Snap, List[Snap]]: + """Remove specified snap(s) from the system. + + Args: + snap_names: the name or names of the snaps to install + + Raises: + SnapError if some snaps failed to install. + """ + snap_names = [snap_names] if type(snap_names) is str else snap_names + if not snap_names: + raise TypeError("Expected at least one snap to add, received zero!") + + return _wrap_snap_operations(snap_names, SnapState.Absent, "", False) + + +@_cache_init +def ensure( + snap_names: Union[str, List[str]], + state: str, + channel: Optional[str] = "", + classic: Optional[bool] = False, + cohort: Optional[str] = "", + revision: Optional[int] = None, +) -> Union[Snap, List[Snap]]: + """Ensure specified snaps are in a given state on the system. + + Args: + snap_names: the name(s) of the snaps to operate on + state: a string representation of the desired state, from `SnapState` + channel: an (Optional) channel as a string. Defaults to 'latest' + classic: an (Optional) boolean specifying whether it should be added with classic + confinement. Default `False` + cohort: an (Optional) string specifying the snap cohort to use + revision: an (Optional) integer specifying the snap revision to use + + When both channel and revision are specified, the underlying snap install/refresh + command will determine the precedence (revision at the time of adding this) + + Raises: + SnapError if the snap is not in the cache. + """ + if not revision and not channel: + channel = "latest" + + if state in ("present", "latest") or revision: + return add(snap_names, SnapState(state), channel, classic, cohort, revision) + else: + return remove(snap_names) + + +def _wrap_snap_operations( + snap_names: List[str], + state: SnapState, + channel: str, + classic: bool, + cohort: Optional[str] = "", + revision: Optional[int] = None, +) -> Union[Snap, List[Snap]]: + """Wrap common operations for bare commands.""" + snaps = {"success": [], "failed": []} + + op = "remove" if state is SnapState.Absent else "install or refresh" + + for s in snap_names: + try: + snap = _Cache[s] + if state is SnapState.Absent: + snap.ensure(state=SnapState.Absent) + else: + snap.ensure( + state=state, classic=classic, channel=channel, cohort=cohort, revision=revision + ) + snaps["success"].append(snap) + except SnapError as e: + logger.warning("Failed to {} snap {}: {}!".format(op, s, e.message)) + snaps["failed"].append(s) + except SnapNotFoundError: + logger.warning("Snap '{}' not found in cache!".format(s)) + snaps["failed"].append(s) + + if len(snaps["failed"]): + raise SnapError( + "Failed to install or refresh snap(s): {}".format(", ".join(list(snaps["failed"]))) + ) + + return snaps["success"] if len(snaps["success"]) > 1 else snaps["success"][0] + + +def install_local( + filename: str, classic: Optional[bool] = False, dangerous: Optional[bool] = False +) -> Snap: + """Perform a snap operation. + + Args: + filename: the path to a local .snap file to install + classic: whether to use classic confinement + dangerous: whether --dangerous should be passed to install snaps without a signature + + Raises: + SnapError if there is a problem encountered + """ + _cmd = [ + "snap", + "install", + filename, + ] + if classic: + _cmd.append("--classic") + if dangerous: + _cmd.append("--dangerous") + try: + result = subprocess.check_output(_cmd, universal_newlines=True).splitlines()[-1] + snap_name, _ = result.split(" ", 1) + snap_name = ansi_filter.sub("", snap_name) + + c = SnapCache() + + try: + return c[snap_name] + except SnapAPIError as e: + logger.error( + "Could not find snap {} when querying Snapd socket: {}".format(snap_name, e.body) + ) + raise SnapError("Failed to find snap {} in Snap cache".format(snap_name)) + except CalledProcessError as e: + raise SnapError("Could not install snap {}: {}".format(filename, e.output)) + + +def _system_set(config_item: str, value: str) -> None: + """Set system snapd config values. + + Args: + config_item: name of snap system setting. E.g. 'refresh.hold' + value: value to assign + """ + _cmd = ["snap", "set", "system", "{}={}".format(config_item, value)] + try: + subprocess.check_call(_cmd, universal_newlines=True) + except CalledProcessError: + raise SnapError("Failed setting system config '{}' to '{}'".format(config_item, value)) + + +def hold_refresh(days: int = 90, forever: bool = False) -> bool: + """Set the system-wide snap refresh hold. + + Args: + days: number of days to hold system refreshes for. Maximum 90. Set to zero to remove hold. + forever: if True, will set a hold forever. + """ + if not isinstance(forever, bool): + raise TypeError("forever must be a bool") + if not isinstance(days, int): + raise TypeError("days must be an int") + if forever: + _system_set("refresh.hold", "forever") + logger.info("Set system-wide snap refresh hold to: forever") + elif days == 0: + _system_set("refresh.hold", "") + logger.info("Removed system-wide snap refresh hold") + else: + # Currently the snap daemon can only hold for a maximum of 90 days + if not 1 <= days <= 90: + raise ValueError("days must be between 1 and 90") + # Add the number of days to current time + target_date = datetime.now(timezone.utc).astimezone() + timedelta(days=days) + # Format for the correct datetime format + hold_date = target_date.strftime("%Y-%m-%dT%H:%M:%S%z") + # Python dumps the offset in format '+0100', we need '+01:00' + hold_date = "{0}:{1}".format(hold_date[:-2], hold_date[-2:]) + # Actually set the hold date + _system_set("refresh.hold", hold_date) + logger.info("Set system-wide snap refresh hold to: %s", hold_date) diff --git a/single_kernel_mongo/lib/charms/tls_certificates_interface/v3/tls_certificates.py b/single_kernel_mongo/lib/charms/tls_certificates_interface/v3/tls_certificates.py new file mode 100644 index 00000000..141412b0 --- /dev/null +++ b/single_kernel_mongo/lib/charms/tls_certificates_interface/v3/tls_certificates.py @@ -0,0 +1,2061 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + + +"""Library for the tls-certificates relation. + +This library contains the Requires and Provides classes for handling the tls-certificates +interface. + +Pre-requisites: + - Juju >= 3.0 + +## Getting Started +From a charm directory, fetch the library using `charmcraft`: + +```shell +charmcraft fetch-lib charms.tls_certificates_interface.v3.tls_certificates +``` + +Add the following libraries to the charm's `requirements.txt` file: +- jsonschema +- cryptography >= 42.0.0 + +Add the following section to the charm's `charmcraft.yaml` file: +```yaml +parts: + charm: + build-packages: + - libffi-dev + - libssl-dev + - rustc + - cargo +``` + +### Provider charm +The provider charm is the charm providing certificates to another charm that requires them. In +this example, the provider charm is storing its private key using a peer relation interface called +`replicas`. + +Example: +```python +from charms.tls_certificates_interface.v3.tls_certificates import ( + CertificateCreationRequestEvent, + CertificateRevocationRequestEvent, + TLSCertificatesProvidesV3, + generate_private_key, +) +from ops.charm import CharmBase, InstallEvent +from ops.main import main +from ops.model import ActiveStatus, WaitingStatus + + +def generate_ca(private_key: bytes, subject: str) -> str: + return "whatever ca content" + + +def generate_certificate(ca: str, private_key: str, csr: str) -> str: + return "Whatever certificate" + + +class ExampleProviderCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.certificates = TLSCertificatesProvidesV3(self, "certificates") + self.framework.observe( + self.certificates.on.certificate_request, + self._on_certificate_request + ) + self.framework.observe( + self.certificates.on.certificate_revocation_request, + self._on_certificate_revocation_request + ) + self.framework.observe(self.on.install, self._on_install) + + def _on_install(self, event: InstallEvent) -> None: + private_key_password = b"banana" + private_key = generate_private_key(password=private_key_password) + ca_certificate = generate_ca(private_key=private_key, subject="whatever") + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + replicas_relation.data[self.app].update( + { + "private_key_password": "banana", + "private_key": private_key, + "ca_certificate": ca_certificate, + } + ) + self.unit.status = ActiveStatus() + + def _on_certificate_request(self, event: CertificateCreationRequestEvent) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + ca_certificate = replicas_relation.data[self.app].get("ca_certificate") + private_key = replicas_relation.data[self.app].get("private_key") + certificate = generate_certificate( + ca=ca_certificate, + private_key=private_key, + csr=event.certificate_signing_request, + ) + + self.certificates.set_relation_certificate( + certificate=certificate, + certificate_signing_request=event.certificate_signing_request, + ca=ca_certificate, + chain=[ca_certificate, certificate], + relation_id=event.relation_id, + recommended_expiry_notification_time=720, + ) + + def _on_certificate_revocation_request(self, event: CertificateRevocationRequestEvent) -> None: + # Do what you want to do with this information + pass + + +if __name__ == "__main__": + main(ExampleProviderCharm) +``` + +### Requirer charm +The requirer charm is the charm requiring certificates from another charm that provides them. In +this example, the requirer charm is storing its certificates using a peer relation interface called +`replicas`. + +Example: +```python +from charms.tls_certificates_interface.v3.tls_certificates import ( + CertificateAvailableEvent, + CertificateExpiringEvent, + CertificateRevokedEvent, + TLSCertificatesRequiresV3, + generate_csr, + generate_private_key, +) +from ops.charm import CharmBase, RelationCreatedEvent +from ops.main import main +from ops.model import ActiveStatus, WaitingStatus +from typing import Union + + +class ExampleRequirerCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.cert_subject = "whatever" + self.certificates = TLSCertificatesRequiresV3(self, "certificates") + self.framework.observe(self.on.install, self._on_install) + self.framework.observe( + self.on.certificates_relation_created, self._on_certificates_relation_created + ) + self.framework.observe( + self.certificates.on.certificate_available, self._on_certificate_available + ) + self.framework.observe( + self.certificates.on.certificate_expiring, self._on_certificate_expiring + ) + self.framework.observe( + self.certificates.on.certificate_invalidated, self._on_certificate_invalidated + ) + self.framework.observe( + self.certificates.on.all_certificates_invalidated, + self._on_all_certificates_invalidated + ) + + def _on_install(self, event) -> None: + private_key_password = b"banana" + private_key = generate_private_key(password=private_key_password) + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + replicas_relation.data[self.app].update( + {"private_key_password": "banana", "private_key": private_key.decode()} + ) + + def _on_certificates_relation_created(self, event: RelationCreatedEvent) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + private_key_password = replicas_relation.data[self.app].get("private_key_password") + private_key = replicas_relation.data[self.app].get("private_key") + csr = generate_csr( + private_key=private_key.encode(), + private_key_password=private_key_password.encode(), + subject=self.cert_subject, + ) + replicas_relation.data[self.app].update({"csr": csr.decode()}) + self.certificates.request_certificate_creation(certificate_signing_request=csr) + + def _on_certificate_available(self, event: CertificateAvailableEvent) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + replicas_relation.data[self.app].update({"certificate": event.certificate}) + replicas_relation.data[self.app].update({"ca": event.ca}) + replicas_relation.data[self.app].update({"chain": event.chain}) + self.unit.status = ActiveStatus() + + def _on_certificate_expiring( + self, event: Union[CertificateExpiringEvent, CertificateInvalidatedEvent] + ) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + old_csr = replicas_relation.data[self.app].get("csr") + private_key_password = replicas_relation.data[self.app].get("private_key_password") + private_key = replicas_relation.data[self.app].get("private_key") + new_csr = generate_csr( + private_key=private_key.encode(), + private_key_password=private_key_password.encode(), + subject=self.cert_subject, + ) + self.certificates.request_certificate_renewal( + old_certificate_signing_request=old_csr, + new_certificate_signing_request=new_csr, + ) + replicas_relation.data[self.app].update({"csr": new_csr.decode()}) + + def _certificate_revoked(self) -> None: + old_csr = replicas_relation.data[self.app].get("csr") + private_key_password = replicas_relation.data[self.app].get("private_key_password") + private_key = replicas_relation.data[self.app].get("private_key") + new_csr = generate_csr( + private_key=private_key.encode(), + private_key_password=private_key_password.encode(), + subject=self.cert_subject, + ) + self.certificates.request_certificate_renewal( + old_certificate_signing_request=old_csr, + new_certificate_signing_request=new_csr, + ) + replicas_relation.data[self.app].update({"csr": new_csr.decode()}) + replicas_relation.data[self.app].pop("certificate") + replicas_relation.data[self.app].pop("ca") + replicas_relation.data[self.app].pop("chain") + self.unit.status = WaitingStatus("Waiting for new certificate") + + def _on_certificate_invalidated(self, event: CertificateInvalidatedEvent) -> None: + replicas_relation = self.model.get_relation("replicas") + if not replicas_relation: + self.unit.status = WaitingStatus("Waiting for peer relation to be created") + event.defer() + return + if event.reason == "revoked": + self._certificate_revoked() + if event.reason == "expired": + self._on_certificate_expiring(event) + + def _on_all_certificates_invalidated(self, event: AllCertificatesInvalidatedEvent) -> None: + # Do what you want with this information, probably remove all certificates. + pass + + +if __name__ == "__main__": + main(ExampleRequirerCharm) +``` + +You can relate both charms by running: + +```bash +juju relate +``` + +""" # noqa: D405, D410, D411, D214, D416 + +import copy +import ipaddress +import json +import logging +import uuid +from contextlib import suppress +from dataclasses import dataclass +from datetime import datetime, timedelta, timezone +from typing import List, Literal, Optional, Union + +from cryptography import x509 +from cryptography.hazmat._oid import ExtensionOID +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from jsonschema import exceptions, validate +from ops.charm import ( + CharmBase, + CharmEvents, + RelationBrokenEvent, + RelationChangedEvent, + SecretExpiredEvent, +) +from ops.framework import EventBase, EventSource, Handle, Object +from ops.jujuversion import JujuVersion +from ops.model import ( + Application, + ModelError, + Relation, + RelationDataContent, + Secret, + SecretNotFoundError, + Unit, +) + +# The unique Charmhub library identifier, never change it +LIBID = "afd8c2bccf834997afce12c2706d2ede" + +# Increment this major API version when introducing breaking changes +LIBAPI = 3 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 23 + +PYDEPS = ["cryptography", "jsonschema"] + +REQUIRER_JSON_SCHEMA = { + "$schema": "http://json-schema.org/draft-04/schema#", + "$id": "https://canonical.github.io/charm-relation-interfaces/interfaces/tls_certificates/v1/schemas/requirer.json", + "type": "object", + "title": "`tls_certificates` requirer root schema", + "description": "The `tls_certificates` root schema comprises the entire requirer databag for this interface.", # noqa: E501 + "examples": [ + { + "certificate_signing_requests": [ + { + "certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\\nAQEBBQADggEPADCCAQoCggEBANWlx9wE6cW7Jkb4DZZDOZoEjk1eDBMJ+8R4pyKp\\nFBeHMl1SQSDt6rAWsrfL3KOGiIHqrRY0B5H6c51L8LDuVrJG0bPmyQ6rsBo3gVke\\nDSivfSLtGvHtp8lwYnIunF8r858uYmblAR0tdXQNmnQvm+6GERvURQ6sxpgZ7iLC\\npPKDoPt+4GKWL10FWf0i82FgxWC2KqRZUtNbgKETQuARLig7etBmCnh20zmynorA\\ncY7vrpTPAaeQpGLNqqYvKV9W6yWVY08V+nqARrFrjk3vSioZSu8ZJUdZ4d9++SGl\\nbH7A6e77YDkX9i/dQ3Pa/iDtWO3tXS2MvgoxX1iSWlGNOHcCAwEAAaAAMA0GCSqG\\nSIb3DQEBCwUAA4IBAQCW1fKcHessy/ZhnIwAtSLznZeZNH8LTVOzkhVd4HA7EJW+\\nKVLBx8DnN7L3V2/uPJfHiOg4Rx7fi7LkJPegl3SCqJZ0N5bQS/KvDTCyLG+9E8Y+\\n7wqCmWiXaH1devimXZvazilu4IC2dSks2D8DPWHgsOdVks9bme8J3KjdNMQudegc\\newWZZ1Dtbd+Rn7cpKU3jURMwm4fRwGxbJ7iT5fkLlPBlyM/yFEik4SmQxFYrZCQg\\n0f3v4kBefTh5yclPy5tEH+8G0LMsbbo3dJ5mPKpAShi0QEKDLd7eR1R/712lYTK4\\ndi4XaEfqERgy68O4rvb4PGlJeRGS7AmL7Ss8wfAq\\n-----END CERTIFICATE REQUEST-----\\n" # noqa: E501 + }, + { + "certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\\nAQEBBQADggEPADCCAQoCggEBAMk3raaX803cHvzlBF9LC7KORT46z4VjyU5PIaMb\\nQLIDgYKFYI0n5hf2Ra4FAHvOvEmW7bjNlHORFEmvnpcU5kPMNUyKFMTaC8LGmN8z\\nUBH3aK+0+FRvY4afn9tgj5435WqOG9QdoDJ0TJkjJbJI9M70UOgL711oU7ql6HxU\\n4d2ydFK9xAHrBwziNHgNZ72L95s4gLTXf0fAHYf15mDA9U5yc+YDubCKgTXzVySQ\\nUx73VCJLfC/XkZIh559IrnRv5G9fu6BMLEuBwAz6QAO4+/XidbKWN4r2XSq5qX4n\\n6EPQQWP8/nd4myq1kbg6Q8w68L/0YdfjCmbyf2TuoWeImdUCAwEAAaAAMA0GCSqG\\nSIb3DQEBCwUAA4IBAQBIdwraBvpYo/rl5MH1+1Um6HRg4gOdQPY5WcJy9B9tgzJz\\nittRSlRGTnhyIo6fHgq9KHrmUthNe8mMTDailKFeaqkVNVvk7l0d1/B90Kz6OfmD\\nxN0qjW53oP7y3QB5FFBM8DjqjmUnz5UePKoX4AKkDyrKWxMwGX5RoET8c/y0y9jp\\nvSq3Wh5UpaZdWbe1oVY8CqMVUEVQL2DPjtopxXFz2qACwsXkQZxWmjvZnRiP8nP8\\nbdFaEuh9Q6rZ2QdZDEtrU4AodPU3NaukFr5KlTUQt3w/cl+5//zils6G5zUWJ2pN\\ng7+t9PTvXHRkH+LnwaVnmsBFU2e05qADQbfIn7JA\\n-----END CERTIFICATE REQUEST-----\\n" # noqa: E501 + }, + ] + } + ], + "properties": { + "certificate_signing_requests": { + "type": "array", + "items": { + "type": "object", + "properties": { + "certificate_signing_request": {"type": "string"}, + "ca": {"type": "boolean"}, + }, + "required": ["certificate_signing_request"], + }, + } + }, + "required": ["certificate_signing_requests"], + "additionalProperties": True, +} + +PROVIDER_JSON_SCHEMA = { + "$schema": "http://json-schema.org/draft-04/schema#", + "$id": "https://canonical.github.io/charm-relation-interfaces/interfaces/tls_certificates/v1/schemas/provider.json", + "type": "object", + "title": "`tls_certificates` provider root schema", + "description": "The `tls_certificates` root schema comprises the entire provider databag for this interface.", # noqa: E501 + "examples": [ + { + "certificates": [ + { + "ca": "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n", # noqa: E501 + "chain": [ + "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n" # noqa: E501, W505 + ], + "certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBANWlx9wE6cW7Jkb4DZZDOZoEjk1eDBMJ+8R4pyKp\nFBeHMl1SQSDt6rAWsrfL3KOGiIHqrRY0B5H6c51L8LDuVrJG0bPmyQ6rsBo3gVke\nDSivfSLtGvHtp8lwYnIunF8r858uYmblAR0tdXQNmnQvm+6GERvURQ6sxpgZ7iLC\npPKDoPt+4GKWL10FWf0i82FgxWC2KqRZUtNbgKETQuARLig7etBmCnh20zmynorA\ncY7vrpTPAaeQpGLNqqYvKV9W6yWVY08V+nqARrFrjk3vSioZSu8ZJUdZ4d9++SGl\nbH7A6e77YDkX9i/dQ3Pa/iDtWO3tXS2MvgoxX1iSWlGNOHcCAwEAAaAAMA0GCSqG\nSIb3DQEBCwUAA4IBAQCW1fKcHessy/ZhnIwAtSLznZeZNH8LTVOzkhVd4HA7EJW+\nKVLBx8DnN7L3V2/uPJfHiOg4Rx7fi7LkJPegl3SCqJZ0N5bQS/KvDTCyLG+9E8Y+\n7wqCmWiXaH1devimXZvazilu4IC2dSks2D8DPWHgsOdVks9bme8J3KjdNMQudegc\newWZZ1Dtbd+Rn7cpKU3jURMwm4fRwGxbJ7iT5fkLlPBlyM/yFEik4SmQxFYrZCQg\n0f3v4kBefTh5yclPy5tEH+8G0LMsbbo3dJ5mPKpAShi0QEKDLd7eR1R/712lYTK4\ndi4XaEfqERgy68O4rvb4PGlJeRGS7AmL7Ss8wfAq\n-----END CERTIFICATE REQUEST-----\n", # noqa: E501 + "certificate": "-----BEGIN CERTIFICATE-----\nMIICvDCCAaQCFFPAOD7utDTsgFrm0vS4We18OcnKMA0GCSqGSIb3DQEBCwUAMCAx\nCzAJBgNVBAYTAlVTMREwDwYDVQQDDAh3aGF0ZXZlcjAeFw0yMjA3MjkyMTE5Mzha\nFw0yMzA3MjkyMTE5MzhaMBUxEzARBgNVBAMMCmJhbmFuYS5jb20wggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVpcfcBOnFuyZG+A2WQzmaBI5NXgwTCfvE\neKciqRQXhzJdUkEg7eqwFrK3y9yjhoiB6q0WNAeR+nOdS/Cw7layRtGz5skOq7Aa\nN4FZHg0or30i7Rrx7afJcGJyLpxfK/OfLmJm5QEdLXV0DZp0L5vuhhEb1EUOrMaY\nGe4iwqTyg6D7fuBili9dBVn9IvNhYMVgtiqkWVLTW4ChE0LgES4oO3rQZgp4dtM5\nsp6KwHGO766UzwGnkKRizaqmLylfVusllWNPFfp6gEaxa45N70oqGUrvGSVHWeHf\nfvkhpWx+wOnu+2A5F/Yv3UNz2v4g7Vjt7V0tjL4KMV9YklpRjTh3AgMBAAEwDQYJ\nKoZIhvcNAQELBQADggEBAChjRzuba8zjQ7NYBVas89Oy7u++MlS8xWxh++yiUsV6\nWMk3ZemsPtXc1YmXorIQohtxLxzUPm2JhyzFzU/sOLmJQ1E/l+gtZHyRCwsb20fX\nmphuJsMVd7qv/GwEk9PBsk2uDqg4/Wix0Rx5lf95juJP7CPXQJl5FQauf3+LSz0y\nwF/j+4GqvrwsWr9hKOLmPdkyKkR6bHKtzzsxL9PM8GnElk2OpaPMMnzbL/vt2IAt\nxK01ZzPxCQCzVwHo5IJO5NR/fIyFbEPhxzG17QsRDOBR9fl9cOIvDeSO04vyZ+nz\n+kA2c3fNrZFAtpIlOOmFh8Q12rVL4sAjI5mVWnNEgvI=\n-----END CERTIFICATE-----\n", # noqa: E501 + } + ] + }, + { + "certificates": [ + { + "ca": "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n", # noqa: E501 + "chain": [ + "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n" # noqa: E501, W505 + ], + "certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBANWlx9wE6cW7Jkb4DZZDOZoEjk1eDBMJ+8R4pyKp\nFBeHMl1SQSDt6rAWsrfL3KOGiIHqrRY0B5H6c51L8LDuVrJG0bPmyQ6rsBo3gVke\nDSivfSLtGvHtp8lwYnIunF8r858uYmblAR0tdXQNmnQvm+6GERvURQ6sxpgZ7iLC\npPKDoPt+4GKWL10FWf0i82FgxWC2KqRZUtNbgKETQuARLig7etBmCnh20zmynorA\ncY7vrpTPAaeQpGLNqqYvKV9W6yWVY08V+nqARrFrjk3vSioZSu8ZJUdZ4d9++SGl\nbH7A6e77YDkX9i/dQ3Pa/iDtWO3tXS2MvgoxX1iSWlGNOHcCAwEAAaAAMA0GCSqG\nSIb3DQEBCwUAA4IBAQCW1fKcHessy/ZhnIwAtSLznZeZNH8LTVOzkhVd4HA7EJW+\nKVLBx8DnN7L3V2/uPJfHiOg4Rx7fi7LkJPegl3SCqJZ0N5bQS/KvDTCyLG+9E8Y+\n7wqCmWiXaH1devimXZvazilu4IC2dSks2D8DPWHgsOdVks9bme8J3KjdNMQudegc\newWZZ1Dtbd+Rn7cpKU3jURMwm4fRwGxbJ7iT5fkLlPBlyM/yFEik4SmQxFYrZCQg\n0f3v4kBefTh5yclPy5tEH+8G0LMsbbo3dJ5mPKpAShi0QEKDLd7eR1R/712lYTK4\ndi4XaEfqERgy68O4rvb4PGlJeRGS7AmL7Ss8wfAq\n-----END CERTIFICATE REQUEST-----\n", # noqa: E501 + "certificate": "-----BEGIN CERTIFICATE-----\nMIICvDCCAaQCFFPAOD7utDTsgFrm0vS4We18OcnKMA0GCSqGSIb3DQEBCwUAMCAx\nCzAJBgNVBAYTAlVTMREwDwYDVQQDDAh3aGF0ZXZlcjAeFw0yMjA3MjkyMTE5Mzha\nFw0yMzA3MjkyMTE5MzhaMBUxEzARBgNVBAMMCmJhbmFuYS5jb20wggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVpcfcBOnFuyZG+A2WQzmaBI5NXgwTCfvE\neKciqRQXhzJdUkEg7eqwFrK3y9yjhoiB6q0WNAeR+nOdS/Cw7layRtGz5skOq7Aa\nN4FZHg0or30i7Rrx7afJcGJyLpxfK/OfLmJm5QEdLXV0DZp0L5vuhhEb1EUOrMaY\nGe4iwqTyg6D7fuBili9dBVn9IvNhYMVgtiqkWVLTW4ChE0LgES4oO3rQZgp4dtM5\nsp6KwHGO766UzwGnkKRizaqmLylfVusllWNPFfp6gEaxa45N70oqGUrvGSVHWeHf\nfvkhpWx+wOnu+2A5F/Yv3UNz2v4g7Vjt7V0tjL4KMV9YklpRjTh3AgMBAAEwDQYJ\nKoZIhvcNAQELBQADggEBAChjRzuba8zjQ7NYBVas89Oy7u++MlS8xWxh++yiUsV6\nWMk3ZemsPtXc1YmXorIQohtxLxzUPm2JhyzFzU/sOLmJQ1E/l+gtZHyRCwsb20fX\nmphuJsMVd7qv/GwEk9PBsk2uDqg4/Wix0Rx5lf95juJP7CPXQJl5FQauf3+LSz0y\nwF/j+4GqvrwsWr9hKOLmPdkyKkR6bHKtzzsxL9PM8GnElk2OpaPMMnzbL/vt2IAt\nxK01ZzPxCQCzVwHo5IJO5NR/fIyFbEPhxzG17QsRDOBR9fl9cOIvDeSO04vyZ+nz\n+kA2c3fNrZFAtpIlOOmFh8Q12rVL4sAjI5mVWnNEgvI=\n-----END CERTIFICATE-----\n", # noqa: E501 + "revoked": True, + } + ] + }, + ], + "properties": { + "certificates": { + "$id": "#/properties/certificates", + "type": "array", + "items": { + "$id": "#/properties/certificates/items", + "type": "object", + "required": ["certificate_signing_request", "certificate", "ca", "chain"], + "properties": { + "certificate_signing_request": { + "$id": "#/properties/certificates/items/certificate_signing_request", + "type": "string", + }, + "certificate": { + "$id": "#/properties/certificates/items/certificate", + "type": "string", + }, + "ca": {"$id": "#/properties/certificates/items/ca", "type": "string"}, + "chain": { + "$id": "#/properties/certificates/items/chain", + "type": "array", + "items": { + "type": "string", + "$id": "#/properties/certificates/items/chain/items", + }, + }, + "revoked": { + "$id": "#/properties/certificates/items/revoked", + "type": "boolean", + }, + }, + "additionalProperties": True, + }, + } + }, + "required": ["certificates"], + "additionalProperties": True, +} + + +logger = logging.getLogger(__name__) + + +@dataclass +class RequirerCSR: + """This class represents a certificate signing request from an interface Requirer.""" + + relation_id: int + application_name: str + unit_name: str + csr: str + is_ca: bool + + +@dataclass +class ProviderCertificate: + """This class represents a certificate from an interface Provider.""" + + relation_id: int + application_name: str + csr: str + certificate: str + ca: str + chain: List[str] + revoked: bool + expiry_time: datetime + expiry_notification_time: Optional[datetime] = None + + def chain_as_pem(self) -> str: + """Return full certificate chain as a PEM string.""" + return "\n\n".join(reversed(self.chain)) + + def to_json(self) -> str: + """Return the object as a JSON string. + + Returns: + str: JSON representation of the object + """ + return json.dumps( + { + "relation_id": self.relation_id, + "application_name": self.application_name, + "csr": self.csr, + "certificate": self.certificate, + "ca": self.ca, + "chain": self.chain, + "revoked": self.revoked, + "expiry_time": self.expiry_time.isoformat(), + "expiry_notification_time": self.expiry_notification_time.isoformat() + if self.expiry_notification_time + else None, + } + ) + + +class CertificateAvailableEvent(EventBase): + """Charm Event triggered when a TLS certificate is available.""" + + def __init__( + self, + handle: Handle, + certificate: str, + certificate_signing_request: str, + ca: str, + chain: List[str], + ): + super().__init__(handle) + self.certificate = certificate + self.certificate_signing_request = certificate_signing_request + self.ca = ca + self.chain = chain + + def snapshot(self) -> dict: + """Return snapshot.""" + return { + "certificate": self.certificate, + "certificate_signing_request": self.certificate_signing_request, + "ca": self.ca, + "chain": self.chain, + } + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.certificate = snapshot["certificate"] + self.certificate_signing_request = snapshot["certificate_signing_request"] + self.ca = snapshot["ca"] + self.chain = snapshot["chain"] + + def chain_as_pem(self) -> str: + """Return full certificate chain as a PEM string.""" + return "\n\n".join(reversed(self.chain)) + + +class CertificateExpiringEvent(EventBase): + """Charm Event triggered when a TLS certificate is almost expired.""" + + def __init__(self, handle, certificate: str, expiry: str): + """CertificateExpiringEvent. + + Args: + handle (Handle): Juju framework handle + certificate (str): TLS Certificate + expiry (str): Datetime string representing the time at which the certificate + won't be valid anymore. + """ + super().__init__(handle) + self.certificate = certificate + self.expiry = expiry + + def snapshot(self) -> dict: + """Return snapshot.""" + return {"certificate": self.certificate, "expiry": self.expiry} + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.certificate = snapshot["certificate"] + self.expiry = snapshot["expiry"] + + +class CertificateInvalidatedEvent(EventBase): + """Charm Event triggered when a TLS certificate is invalidated.""" + + def __init__( + self, + handle: Handle, + reason: Literal["expired", "revoked"], + certificate: str, + certificate_signing_request: str, + ca: str, + chain: List[str], + ): + super().__init__(handle) + self.reason = reason + self.certificate_signing_request = certificate_signing_request + self.certificate = certificate + self.ca = ca + self.chain = chain + + def snapshot(self) -> dict: + """Return snapshot.""" + return { + "reason": self.reason, + "certificate_signing_request": self.certificate_signing_request, + "certificate": self.certificate, + "ca": self.ca, + "chain": self.chain, + } + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.reason = snapshot["reason"] + self.certificate_signing_request = snapshot["certificate_signing_request"] + self.certificate = snapshot["certificate"] + self.ca = snapshot["ca"] + self.chain = snapshot["chain"] + + +class AllCertificatesInvalidatedEvent(EventBase): + """Charm Event triggered when all TLS certificates are invalidated.""" + + def __init__(self, handle: Handle): + super().__init__(handle) + + def snapshot(self) -> dict: + """Return snapshot.""" + return {} + + def restore(self, snapshot: dict): + """Restore snapshot.""" + pass + + +class CertificateCreationRequestEvent(EventBase): + """Charm Event triggered when a TLS certificate is required.""" + + def __init__( + self, + handle: Handle, + certificate_signing_request: str, + relation_id: int, + is_ca: bool = False, + ): + super().__init__(handle) + self.certificate_signing_request = certificate_signing_request + self.relation_id = relation_id + self.is_ca = is_ca + + def snapshot(self) -> dict: + """Return snapshot.""" + return { + "certificate_signing_request": self.certificate_signing_request, + "relation_id": self.relation_id, + "is_ca": self.is_ca, + } + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.certificate_signing_request = snapshot["certificate_signing_request"] + self.relation_id = snapshot["relation_id"] + self.is_ca = snapshot["is_ca"] + + +class CertificateRevocationRequestEvent(EventBase): + """Charm Event triggered when a TLS certificate needs to be revoked.""" + + def __init__( + self, + handle: Handle, + certificate: str, + certificate_signing_request: str, + ca: str, + chain: str, + ): + super().__init__(handle) + self.certificate = certificate + self.certificate_signing_request = certificate_signing_request + self.ca = ca + self.chain = chain + + def snapshot(self) -> dict: + """Return snapshot.""" + return { + "certificate": self.certificate, + "certificate_signing_request": self.certificate_signing_request, + "ca": self.ca, + "chain": self.chain, + } + + def restore(self, snapshot: dict): + """Restore snapshot.""" + self.certificate = snapshot["certificate"] + self.certificate_signing_request = snapshot["certificate_signing_request"] + self.ca = snapshot["ca"] + self.chain = snapshot["chain"] + + +def _load_relation_data(relation_data_content: RelationDataContent) -> dict: + """Load relation data from the relation data bag. + + Json loads all data. + + Args: + relation_data_content: Relation data from the databag + + Returns: + dict: Relation data in dict format. + """ + certificate_data = {} + try: + for key in relation_data_content: + try: + certificate_data[key] = json.loads(relation_data_content[key]) + except (json.decoder.JSONDecodeError, TypeError): + certificate_data[key] = relation_data_content[key] + except ModelError: + pass + return certificate_data + + +def _get_closest_future_time( + expiry_notification_time: datetime, expiry_time: datetime +) -> datetime: + """Return expiry_notification_time if not in the past, otherwise return expiry_time. + + Args: + expiry_notification_time (datetime): Notification time of impending expiration + expiry_time (datetime): Expiration time + + Returns: + datetime: expiry_notification_time if not in the past, expiry_time otherwise + """ + return ( + expiry_notification_time + if datetime.now(timezone.utc) < expiry_notification_time + else expiry_time + ) + + +def calculate_expiry_notification_time( + validity_start_time: datetime, + expiry_time: datetime, + provider_recommended_notification_time: Optional[int], + requirer_recommended_notification_time: Optional[int], +) -> datetime: + """Calculate a reasonable time to notify the user about the certificate expiry. + + It takes into account the time recommended by the provider and by the requirer. + Time recommended by the provider is preferred, + then time recommended by the requirer, + then dynamically calculated time. + + Args: + validity_start_time: Certificate validity time + expiry_time: Certificate expiry time + provider_recommended_notification_time: + Time in hours prior to expiry to notify the user. + Recommended by the provider. + requirer_recommended_notification_time: + Time in hours prior to expiry to notify the user. + Recommended by the requirer. + + Returns: + datetime: Time to notify the user about the certificate expiry. + """ + if provider_recommended_notification_time is not None: + provider_recommended_notification_time = abs(provider_recommended_notification_time) + provider_recommendation_time_delta = expiry_time - timedelta( + hours=provider_recommended_notification_time + ) + if validity_start_time < provider_recommendation_time_delta: + return provider_recommendation_time_delta + + if requirer_recommended_notification_time is not None: + requirer_recommended_notification_time = abs(requirer_recommended_notification_time) + requirer_recommendation_time_delta = expiry_time - timedelta( + hours=requirer_recommended_notification_time + ) + if validity_start_time < requirer_recommendation_time_delta: + return requirer_recommendation_time_delta + calculated_hours = (expiry_time - validity_start_time).total_seconds() / (3600 * 3) + return expiry_time - timedelta(hours=calculated_hours) + + +def generate_ca( + private_key: bytes, + subject: str, + private_key_password: Optional[bytes] = None, + validity: int = 365, + country: str = "US", +) -> bytes: + """Generate a CA Certificate. + + Args: + private_key (bytes): Private key + subject (str): Common Name that can be an IP or a Full Qualified Domain Name (FQDN). + private_key_password (bytes): Private key password + validity (int): Certificate validity time (in days) + country (str): Certificate Issuing country + + Returns: + bytes: CA Certificate. + """ + private_key_object = serialization.load_pem_private_key( + private_key, password=private_key_password + ) + subject_name = x509.Name( + [ + x509.NameAttribute(x509.NameOID.COUNTRY_NAME, country), + x509.NameAttribute(x509.NameOID.COMMON_NAME, subject), + ] + ) + subject_identifier_object = x509.SubjectKeyIdentifier.from_public_key( + private_key_object.public_key() # type: ignore[arg-type] + ) + subject_identifier = key_identifier = subject_identifier_object.public_bytes() + key_usage = x509.KeyUsage( + digital_signature=True, + key_encipherment=True, + key_cert_sign=True, + key_agreement=False, + content_commitment=False, + data_encipherment=False, + crl_sign=False, + encipher_only=False, + decipher_only=False, + ) + cert = ( + x509.CertificateBuilder() + .subject_name(subject_name) + .issuer_name(subject_name) + .public_key(private_key_object.public_key()) # type: ignore[arg-type] + .serial_number(x509.random_serial_number()) + .not_valid_before(datetime.now(timezone.utc)) + .not_valid_after(datetime.now(timezone.utc) + timedelta(days=validity)) + .add_extension(x509.SubjectKeyIdentifier(digest=subject_identifier), critical=False) + .add_extension( + x509.AuthorityKeyIdentifier( + key_identifier=key_identifier, + authority_cert_issuer=None, + authority_cert_serial_number=None, + ), + critical=False, + ) + .add_extension(key_usage, critical=True) + .add_extension( + x509.BasicConstraints(ca=True, path_length=None), + critical=True, + ) + .sign(private_key_object, hashes.SHA256()) # type: ignore[arg-type] + ) + return cert.public_bytes(serialization.Encoding.PEM) + + +def get_certificate_extensions( + authority_key_identifier: bytes, + csr: x509.CertificateSigningRequest, + alt_names: Optional[List[str]], + is_ca: bool, +) -> List[x509.Extension]: + """Generate a list of certificate extensions from a CSR and other known information. + + Args: + authority_key_identifier (bytes): Authority key identifier + csr (x509.CertificateSigningRequest): CSR + alt_names (list): List of alt names to put on cert - prefer putting SANs in CSR + is_ca (bool): Whether the certificate is a CA certificate + + Returns: + List[x509.Extension]: List of extensions + """ + cert_extensions_list: List[x509.Extension] = [ + x509.Extension( + oid=ExtensionOID.AUTHORITY_KEY_IDENTIFIER, + value=x509.AuthorityKeyIdentifier( + key_identifier=authority_key_identifier, + authority_cert_issuer=None, + authority_cert_serial_number=None, + ), + critical=False, + ), + x509.Extension( + oid=ExtensionOID.SUBJECT_KEY_IDENTIFIER, + value=x509.SubjectKeyIdentifier.from_public_key(csr.public_key()), + critical=False, + ), + x509.Extension( + oid=ExtensionOID.BASIC_CONSTRAINTS, + critical=True, + value=x509.BasicConstraints(ca=is_ca, path_length=None), + ), + ] + + sans: List[x509.GeneralName] = [] + san_alt_names = [x509.DNSName(name) for name in alt_names] if alt_names else [] + sans.extend(san_alt_names) + try: + loaded_san_ext = csr.extensions.get_extension_for_class(x509.SubjectAlternativeName) + sans.extend( + [x509.DNSName(name) for name in loaded_san_ext.value.get_values_for_type(x509.DNSName)] + ) + sans.extend( + [x509.IPAddress(ip) for ip in loaded_san_ext.value.get_values_for_type(x509.IPAddress)] + ) + sans.extend( + [ + x509.RegisteredID(oid) + for oid in loaded_san_ext.value.get_values_for_type(x509.RegisteredID) + ] + ) + except x509.ExtensionNotFound: + pass + + if sans: + cert_extensions_list.append( + x509.Extension( + oid=ExtensionOID.SUBJECT_ALTERNATIVE_NAME, + critical=False, + value=x509.SubjectAlternativeName(sans), + ) + ) + + if is_ca: + cert_extensions_list.append( + x509.Extension( + ExtensionOID.KEY_USAGE, + critical=True, + value=x509.KeyUsage( + digital_signature=False, + content_commitment=False, + key_encipherment=False, + data_encipherment=False, + key_agreement=False, + key_cert_sign=True, + crl_sign=True, + encipher_only=False, + decipher_only=False, + ), + ) + ) + + existing_oids = {ext.oid for ext in cert_extensions_list} + for extension in csr.extensions: + if extension.oid == ExtensionOID.SUBJECT_ALTERNATIVE_NAME: + continue + if extension.oid in existing_oids: + logger.warning("Extension %s is managed by the TLS provider, ignoring.", extension.oid) + continue + cert_extensions_list.append(extension) + + return cert_extensions_list + + +def generate_certificate( + csr: bytes, + ca: bytes, + ca_key: bytes, + ca_key_password: Optional[bytes] = None, + validity: int = 365, + alt_names: Optional[List[str]] = None, + is_ca: bool = False, +) -> bytes: + """Generate a TLS certificate based on a CSR. + + Args: + csr (bytes): CSR + ca (bytes): CA Certificate + ca_key (bytes): CA private key + ca_key_password: CA private key password + validity (int): Certificate validity (in days) + alt_names (list): List of alt names to put on cert - prefer putting SANs in CSR + is_ca (bool): Whether the certificate is a CA certificate + + Returns: + bytes: Certificate + """ + csr_object = x509.load_pem_x509_csr(csr) + subject = csr_object.subject + ca_pem = x509.load_pem_x509_certificate(ca) + issuer = ca_pem.issuer + private_key = serialization.load_pem_private_key(ca_key, password=ca_key_password) + + certificate_builder = ( + x509.CertificateBuilder() + .subject_name(subject) + .issuer_name(issuer) + .public_key(csr_object.public_key()) + .serial_number(x509.random_serial_number()) + .not_valid_before(datetime.now(timezone.utc)) + .not_valid_after(datetime.now(timezone.utc) + timedelta(days=validity)) + ) + extensions = get_certificate_extensions( + authority_key_identifier=ca_pem.extensions.get_extension_for_class( + x509.SubjectKeyIdentifier + ).value.key_identifier, + csr=csr_object, + alt_names=alt_names, + is_ca=is_ca, + ) + for extension in extensions: + try: + certificate_builder = certificate_builder.add_extension( + extval=extension.value, + critical=extension.critical, + ) + except ValueError as e: + logger.warning("Failed to add extension %s: %s", extension.oid, e) + + cert = certificate_builder.sign(private_key, hashes.SHA256()) # type: ignore[arg-type] + return cert.public_bytes(serialization.Encoding.PEM) + + +def generate_private_key( + password: Optional[bytes] = None, + key_size: int = 2048, + public_exponent: int = 65537, +) -> bytes: + """Generate a private key. + + Args: + password (bytes): Password for decrypting the private key + key_size (int): Key size in bytes + public_exponent: Public exponent. + + Returns: + bytes: Private Key + """ + private_key = rsa.generate_private_key( + public_exponent=public_exponent, + key_size=key_size, + ) + key_bytes = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=( + serialization.BestAvailableEncryption(password) + if password + else serialization.NoEncryption() + ), + ) + return key_bytes + + +def generate_csr( # noqa: C901 + private_key: bytes, + subject: str, + add_unique_id_to_subject_name: bool = True, + organization: Optional[str] = None, + email_address: Optional[str] = None, + country_name: Optional[str] = None, + state_or_province_name: Optional[str] = None, + locality_name: Optional[str] = None, + private_key_password: Optional[bytes] = None, + sans: Optional[List[str]] = None, + sans_oid: Optional[List[str]] = None, + sans_ip: Optional[List[str]] = None, + sans_dns: Optional[List[str]] = None, + additional_critical_extensions: Optional[List] = None, +) -> bytes: + """Generate a CSR using private key and subject. + + Args: + private_key (bytes): Private key + subject (str): CSR Common Name that can be an IP or a Full Qualified Domain Name (FQDN). + add_unique_id_to_subject_name (bool): Whether a unique ID must be added to the CSR's + subject name. Always leave to "True" when the CSR is used to request certificates + using the tls-certificates relation. + organization (str): Name of organization. + email_address (str): Email address. + country_name (str): Country Name. + state_or_province_name (str): State or Province Name. + locality_name (str): Locality Name. + private_key_password (bytes): Private key password + sans (list): Use sans_dns - this will be deprecated in a future release + List of DNS subject alternative names (keeping it for now for backward compatibility) + sans_oid (list): List of registered ID SANs + sans_dns (list): List of DNS subject alternative names (similar to the arg: sans) + sans_ip (list): List of IP subject alternative names + additional_critical_extensions (list): List of critical additional extension objects. + Object must be a x509 ExtensionType. + + Returns: + bytes: CSR + """ + signing_key = serialization.load_pem_private_key(private_key, password=private_key_password) + subject_name = [x509.NameAttribute(x509.NameOID.COMMON_NAME, subject)] + if add_unique_id_to_subject_name: + unique_identifier = uuid.uuid4() + subject_name.append( + x509.NameAttribute(x509.NameOID.X500_UNIQUE_IDENTIFIER, str(unique_identifier)) + ) + if organization: + subject_name.append(x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, organization)) + if email_address: + subject_name.append(x509.NameAttribute(x509.NameOID.EMAIL_ADDRESS, email_address)) + if country_name: + subject_name.append(x509.NameAttribute(x509.NameOID.COUNTRY_NAME, country_name)) + if state_or_province_name: + subject_name.append( + x509.NameAttribute(x509.NameOID.STATE_OR_PROVINCE_NAME, state_or_province_name) + ) + if locality_name: + subject_name.append(x509.NameAttribute(x509.NameOID.LOCALITY_NAME, locality_name)) + csr = x509.CertificateSigningRequestBuilder(subject_name=x509.Name(subject_name)) + + _sans: List[x509.GeneralName] = [] + if sans_oid: + _sans.extend([x509.RegisteredID(x509.ObjectIdentifier(san)) for san in sans_oid]) + if sans_ip: + _sans.extend([x509.IPAddress(ipaddress.ip_address(san)) for san in sans_ip]) + if sans: + _sans.extend([x509.DNSName(san) for san in sans]) + if sans_dns: + _sans.extend([x509.DNSName(san) for san in sans_dns]) + if _sans: + csr = csr.add_extension(x509.SubjectAlternativeName(set(_sans)), critical=False) + + if additional_critical_extensions: + for extension in additional_critical_extensions: + csr = csr.add_extension(extension, critical=True) + + signed_certificate = csr.sign(signing_key, hashes.SHA256()) # type: ignore[arg-type] + return signed_certificate.public_bytes(serialization.Encoding.PEM) + + +def get_sha256_hex(data: str) -> str: + """Calculate the hash of the provided data and return the hexadecimal representation.""" + digest = hashes.Hash(hashes.SHA256()) + digest.update(data.encode()) + return digest.finalize().hex() + + +def csr_matches_certificate(csr: str, cert: str) -> bool: + """Check if a CSR matches a certificate. + + Args: + csr (str): Certificate Signing Request as a string + cert (str): Certificate as a string + Returns: + bool: True/False depending on whether the CSR matches the certificate. + """ + csr_object = x509.load_pem_x509_csr(csr.encode("utf-8")) + cert_object = x509.load_pem_x509_certificate(cert.encode("utf-8")) + + if csr_object.public_key().public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) != cert_object.public_key().public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ): + return False + return True + + +def _relation_data_is_valid( + relation: Relation, app_or_unit: Union[Application, Unit], json_schema: dict +) -> bool: + """Check whether relation data is valid based on json schema. + + Args: + relation (Relation): Relation object + app_or_unit (Union[Application, Unit]): Application or unit object + json_schema (dict): Json schema + + Returns: + bool: Whether relation data is valid. + """ + relation_data = _load_relation_data(relation.data[app_or_unit]) + try: + validate(instance=relation_data, schema=json_schema) + return True + except exceptions.ValidationError: + return False + + +class CertificatesProviderCharmEvents(CharmEvents): + """List of events that the TLS Certificates provider charm can leverage.""" + + certificate_creation_request = EventSource(CertificateCreationRequestEvent) + certificate_revocation_request = EventSource(CertificateRevocationRequestEvent) + + +class CertificatesRequirerCharmEvents(CharmEvents): + """List of events that the TLS Certificates requirer charm can leverage.""" + + certificate_available = EventSource(CertificateAvailableEvent) + certificate_expiring = EventSource(CertificateExpiringEvent) + certificate_invalidated = EventSource(CertificateInvalidatedEvent) + all_certificates_invalidated = EventSource(AllCertificatesInvalidatedEvent) + + +class TLSCertificatesProvidesV3(Object): + """TLS certificates provider class to be instantiated by TLS certificates providers.""" + + on = CertificatesProviderCharmEvents() # type: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relationship_name: str): + super().__init__(charm, relationship_name) + self.framework.observe( + charm.on[relationship_name].relation_changed, self._on_relation_changed + ) + self.charm = charm + self.relationship_name = relationship_name + + def _load_app_relation_data(self, relation: Relation) -> dict: + """Load relation data from the application relation data bag. + + Json loads all data. + + Args: + relation: Relation data from the application databag + + Returns: + dict: Relation data in dict format. + """ + # If unit is not leader, it does not try to reach relation data. + if not self.model.unit.is_leader(): + return {} + return _load_relation_data(relation.data[self.charm.app]) + + def _add_certificate( + self, + relation_id: int, + certificate: str, + certificate_signing_request: str, + ca: str, + chain: List[str], + recommended_expiry_notification_time: Optional[int] = None, + ) -> None: + """Add certificate to relation data. + + Args: + relation_id (int): Relation id + certificate (str): Certificate + certificate_signing_request (str): Certificate Signing Request + ca (str): CA Certificate + chain (list): CA Chain + recommended_expiry_notification_time (int): + Time in hours before the certificate expires to notify the user. + + Returns: + None + """ + relation = self.model.get_relation( + relation_name=self.relationship_name, relation_id=relation_id + ) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} does not exist - " + f"The certificate request can't be completed" + ) + new_certificate = { + "certificate": certificate, + "certificate_signing_request": certificate_signing_request, + "ca": ca, + "chain": chain, + "recommended_expiry_notification_time": recommended_expiry_notification_time, + } + provider_relation_data = self._load_app_relation_data(relation) + provider_certificates = provider_relation_data.get("certificates", []) + certificates = copy.deepcopy(provider_certificates) + if new_certificate in certificates: + logger.info("Certificate already in relation data - Doing nothing") + return + certificates.append(new_certificate) + relation.data[self.model.app]["certificates"] = json.dumps(certificates) + + def _remove_certificate( + self, + relation_id: int, + certificate: Optional[str] = None, + certificate_signing_request: Optional[str] = None, + ) -> None: + """Remove certificate from a given relation based on user provided certificate or csr. + + Args: + relation_id (int): Relation id + certificate (str): Certificate (optional) + certificate_signing_request: Certificate signing request (optional) + + Returns: + None + """ + relation = self.model.get_relation( + relation_name=self.relationship_name, + relation_id=relation_id, + ) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} with relation id {relation_id} does not exist" + ) + provider_relation_data = self._load_app_relation_data(relation) + provider_certificates = provider_relation_data.get("certificates", []) + certificates = copy.deepcopy(provider_certificates) + for certificate_dict in certificates: + if certificate and certificate_dict["certificate"] == certificate: + certificates.remove(certificate_dict) + if ( + certificate_signing_request + and certificate_dict["certificate_signing_request"] == certificate_signing_request + ): + certificates.remove(certificate_dict) + relation.data[self.model.app]["certificates"] = json.dumps(certificates) + + def revoke_all_certificates(self) -> None: + """Revoke all certificates of this provider. + + This method is meant to be used when the Root CA has changed. + """ + for relation in self.model.relations[self.relationship_name]: + provider_relation_data = self._load_app_relation_data(relation) + provider_certificates = copy.deepcopy(provider_relation_data.get("certificates", [])) + for certificate in provider_certificates: + certificate["revoked"] = True + relation.data[self.model.app]["certificates"] = json.dumps(provider_certificates) + + def set_relation_certificate( + self, + certificate: str, + certificate_signing_request: str, + ca: str, + chain: List[str], + relation_id: int, + recommended_expiry_notification_time: Optional[int] = None, + ) -> None: + """Add certificates to relation data. + + Args: + certificate (str): Certificate + certificate_signing_request (str): Certificate signing request + ca (str): CA Certificate + chain (list): CA Chain + relation_id (int): Juju relation ID + recommended_expiry_notification_time (int): + Recommended time in hours before the certificate expires to notify the user. + + Returns: + None + """ + if not self.model.unit.is_leader(): + return + certificates_relation = self.model.get_relation( + relation_name=self.relationship_name, relation_id=relation_id + ) + if not certificates_relation: + raise RuntimeError(f"Relation {self.relationship_name} does not exist") + self._remove_certificate( + certificate_signing_request=certificate_signing_request.strip(), + relation_id=relation_id, + ) + self._add_certificate( + relation_id=relation_id, + certificate=certificate.strip(), + certificate_signing_request=certificate_signing_request.strip(), + ca=ca.strip(), + chain=[cert.strip() for cert in chain], + recommended_expiry_notification_time=recommended_expiry_notification_time, + ) + + def remove_certificate(self, certificate: str) -> None: + """Remove a given certificate from relation data. + + Args: + certificate (str): TLS Certificate + + Returns: + None + """ + certificates_relation = self.model.relations[self.relationship_name] + if not certificates_relation: + raise RuntimeError(f"Relation {self.relationship_name} does not exist") + for certificate_relation in certificates_relation: + self._remove_certificate(certificate=certificate, relation_id=certificate_relation.id) + + def get_issued_certificates( + self, relation_id: Optional[int] = None + ) -> List[ProviderCertificate]: + """Return a List of issued (non revoked) certificates. + + Returns: + List: List of ProviderCertificate objects + """ + provider_certificates = self.get_provider_certificates(relation_id=relation_id) + return [certificate for certificate in provider_certificates if not certificate.revoked] + + def get_provider_certificates( + self, relation_id: Optional[int] = None + ) -> List[ProviderCertificate]: + """Return a List of issued certificates. + + Returns: + List: List of ProviderCertificate objects + """ + certificates: List[ProviderCertificate] = [] + relations = ( + [ + relation + for relation in self.model.relations[self.relationship_name] + if relation.id == relation_id + ] + if relation_id is not None + else self.model.relations.get(self.relationship_name, []) + ) + for relation in relations: + if not relation.app: + logger.warning("Relation %s does not have an application", relation.id) + continue + provider_relation_data = self._load_app_relation_data(relation) + provider_certificates = provider_relation_data.get("certificates", []) + for certificate in provider_certificates: + try: + certificate_object = x509.load_pem_x509_certificate( + data=certificate["certificate"].encode() + ) + except ValueError as e: + logger.error("Could not load certificate - Skipping: %s", e) + continue + provider_certificate = ProviderCertificate( + relation_id=relation.id, + application_name=relation.app.name, + csr=certificate["certificate_signing_request"], + certificate=certificate["certificate"], + ca=certificate["ca"], + chain=certificate["chain"], + revoked=certificate.get("revoked", False), + expiry_time=certificate_object.not_valid_after_utc, + expiry_notification_time=certificate.get( + "recommended_expiry_notification_time" + ), + ) + certificates.append(provider_certificate) + return certificates + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Handle relation changed event. + + Looks at the relation data and either emits: + - certificate request event: If the unit relation data contains a CSR for which + a certificate does not exist in the provider relation data. + - certificate revocation event: If the provider relation data contains a CSR for which + a csr does not exist in the requirer relation data. + + Args: + event: Juju event + + Returns: + None + """ + if event.unit is None: + logger.error("Relation_changed event does not have a unit.") + return + if not self.model.unit.is_leader(): + return + if not _relation_data_is_valid(event.relation, event.unit, REQUIRER_JSON_SCHEMA): + logger.debug("Relation data did not pass JSON Schema validation") + return + provider_certificates = self.get_provider_certificates(relation_id=event.relation.id) + requirer_csrs = self.get_requirer_csrs(relation_id=event.relation.id) + provider_csrs = [ + certificate_creation_request.csr + for certificate_creation_request in provider_certificates + ] + for certificate_request in requirer_csrs: + if certificate_request.csr not in provider_csrs: + self.on.certificate_creation_request.emit( + certificate_signing_request=certificate_request.csr, + relation_id=certificate_request.relation_id, + is_ca=certificate_request.is_ca, + ) + self._revoke_certificates_for_which_no_csr_exists(relation_id=event.relation.id) + + def _revoke_certificates_for_which_no_csr_exists(self, relation_id: int) -> None: + """Revoke certificates for which no unit has a CSR. + + Goes through all generated certificates and compare against the list of CSRs for all units. + + Returns: + None + """ + provider_certificates = self.get_unsolicited_certificates(relation_id=relation_id) + for provider_certificate in provider_certificates: + self.on.certificate_revocation_request.emit( + certificate=provider_certificate.certificate, + certificate_signing_request=provider_certificate.csr, + ca=provider_certificate.ca, + chain=provider_certificate.chain, + ) + self.remove_certificate(certificate=provider_certificate.certificate) + + def get_unsolicited_certificates( + self, relation_id: Optional[int] = None + ) -> List[ProviderCertificate]: + """Return provider certificates for which no certificate requests exists. + + Those certificates should be revoked. + """ + unsolicited_certificates: List[ProviderCertificate] = [] + provider_certificates = self.get_provider_certificates(relation_id=relation_id) + requirer_csrs = self.get_requirer_csrs(relation_id=relation_id) + list_of_csrs = [csr.csr for csr in requirer_csrs] + for certificate in provider_certificates: + if certificate.csr not in list_of_csrs: + unsolicited_certificates.append(certificate) + return unsolicited_certificates + + def get_outstanding_certificate_requests( + self, relation_id: Optional[int] = None + ) -> List[RequirerCSR]: + """Return CSR's for which no certificate has been issued. + + Args: + relation_id (int): Relation id + + Returns: + list: List of RequirerCSR objects. + """ + requirer_csrs = self.get_requirer_csrs(relation_id=relation_id) + outstanding_csrs: List[RequirerCSR] = [] + for relation_csr in requirer_csrs: + if not self.certificate_issued_for_csr( + app_name=relation_csr.application_name, + csr=relation_csr.csr, + relation_id=relation_id, + ): + outstanding_csrs.append(relation_csr) + return outstanding_csrs + + def get_requirer_csrs(self, relation_id: Optional[int] = None) -> List[RequirerCSR]: + """Return a list of requirers' CSRs. + + It returns CSRs from all relations if relation_id is not specified. + CSRs are returned per relation id, application name and unit name. + + Returns: + list: List[RequirerCSR] + """ + relation_csrs: List[RequirerCSR] = [] + relations = ( + [ + relation + for relation in self.model.relations[self.relationship_name] + if relation.id == relation_id + ] + if relation_id is not None + else self.model.relations.get(self.relationship_name, []) + ) + + for relation in relations: + for unit in relation.units: + requirer_relation_data = _load_relation_data(relation.data[unit]) + unit_csrs_list = requirer_relation_data.get("certificate_signing_requests", []) + for unit_csr in unit_csrs_list: + csr = unit_csr.get("certificate_signing_request") + if not csr: + logger.warning("No CSR found in relation data - Skipping") + continue + ca = unit_csr.get("ca", False) + if not relation.app: + logger.warning("No remote app in relation - Skipping") + continue + relation_csr = RequirerCSR( + relation_id=relation.id, + application_name=relation.app.name, + unit_name=unit.name, + csr=csr, + is_ca=ca, + ) + relation_csrs.append(relation_csr) + return relation_csrs + + def certificate_issued_for_csr( + self, app_name: str, csr: str, relation_id: Optional[int] + ) -> bool: + """Check whether a certificate has been issued for a given CSR. + + Args: + app_name (str): Application name that the CSR belongs to. + csr (str): Certificate Signing Request. + relation_id (Optional[int]): Relation ID + + Returns: + bool: True/False depending on whether a certificate has been issued for the given CSR. + """ + issued_certificates_per_csr = self.get_issued_certificates(relation_id=relation_id) + for issued_certificate in issued_certificates_per_csr: + if issued_certificate.csr == csr and issued_certificate.application_name == app_name: + return csr_matches_certificate(csr, issued_certificate.certificate) + return False + + +class TLSCertificatesRequiresV3(Object): + """TLS certificates requirer class to be instantiated by TLS certificates requirers.""" + + on = CertificatesRequirerCharmEvents() # type: ignore[reportAssignmentType] + + def __init__( + self, + charm: CharmBase, + relationship_name: str, + expiry_notification_time: Optional[int] = None, + ): + """Generate/use private key and observes relation changed event. + + Args: + charm: Charm object + relationship_name: Juju relation name + expiry_notification_time (int): Number of hours prior to certificate expiry. + Used to trigger the CertificateExpiring event. + This value is used as a recommendation only, + The actual value is calculated taking into account the provider's recommendation. + """ + super().__init__(charm, relationship_name) + if not JujuVersion.from_environ().has_secrets: + logger.warning("This version of the TLS library requires Juju secrets (Juju >= 3.0)") + self.relationship_name = relationship_name + self.charm = charm + self.expiry_notification_time = expiry_notification_time + self.framework.observe( + charm.on[relationship_name].relation_changed, self._on_relation_changed + ) + self.framework.observe( + charm.on[relationship_name].relation_broken, self._on_relation_broken + ) + self.framework.observe(charm.on.secret_expired, self._on_secret_expired) + + def get_requirer_csrs(self) -> List[RequirerCSR]: + """Return list of requirer's CSRs from relation unit data. + + Returns: + list: List of RequirerCSR objects. + """ + relation = self.model.get_relation(self.relationship_name) + if not relation: + return [] + requirer_csrs = [] + requirer_relation_data = _load_relation_data(relation.data[self.model.unit]) + requirer_csrs_dict = requirer_relation_data.get("certificate_signing_requests", []) + for requirer_csr_dict in requirer_csrs_dict: + csr = requirer_csr_dict.get("certificate_signing_request") + if not csr: + logger.warning("No CSR found in relation data - Skipping") + continue + ca = requirer_csr_dict.get("ca", False) + relation_csr = RequirerCSR( + relation_id=relation.id, + application_name=self.model.app.name, + unit_name=self.model.unit.name, + csr=csr, + is_ca=ca, + ) + requirer_csrs.append(relation_csr) + return requirer_csrs + + def get_provider_certificates(self) -> List[ProviderCertificate]: + """Return list of certificates from the provider's relation data.""" + provider_certificates: List[ProviderCertificate] = [] + relation = self.model.get_relation(self.relationship_name) + if not relation: + logger.debug("No relation: %s", self.relationship_name) + return [] + if not relation.app: + logger.debug("No remote app in relation: %s", self.relationship_name) + return [] + provider_relation_data = _load_relation_data(relation.data[relation.app]) + provider_certificate_dicts = provider_relation_data.get("certificates", []) + for provider_certificate_dict in provider_certificate_dicts: + certificate = provider_certificate_dict.get("certificate") + if not certificate: + logger.warning("No certificate found in relation data - Skipping") + continue + try: + certificate_object = x509.load_pem_x509_certificate(data=certificate.encode()) + except ValueError as e: + logger.error("Could not load certificate - Skipping: %s", e) + continue + ca = provider_certificate_dict.get("ca") + chain = provider_certificate_dict.get("chain", []) + csr = provider_certificate_dict.get("certificate_signing_request") + recommended_expiry_notification_time = provider_certificate_dict.get( + "recommended_expiry_notification_time" + ) + expiry_time = certificate_object.not_valid_after_utc + validity_start_time = certificate_object.not_valid_before_utc + expiry_notification_time = calculate_expiry_notification_time( + validity_start_time=validity_start_time, + expiry_time=expiry_time, + provider_recommended_notification_time=recommended_expiry_notification_time, + requirer_recommended_notification_time=self.expiry_notification_time, + ) + if not csr: + logger.warning("No CSR found in relation data - Skipping") + continue + revoked = provider_certificate_dict.get("revoked", False) + provider_certificate = ProviderCertificate( + relation_id=relation.id, + application_name=relation.app.name, + csr=csr, + certificate=certificate, + ca=ca, + chain=chain, + revoked=revoked, + expiry_time=expiry_time, + expiry_notification_time=expiry_notification_time, + ) + provider_certificates.append(provider_certificate) + return provider_certificates + + def _add_requirer_csr_to_relation_data(self, csr: str, is_ca: bool) -> None: + """Add CSR to relation data. + + Args: + csr (str): Certificate Signing Request + is_ca (bool): Whether the certificate is a CA certificate + + Returns: + None + """ + relation = self.model.get_relation(self.relationship_name) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} does not exist - " + f"The certificate request can't be completed" + ) + for requirer_csr in self.get_requirer_csrs(): + if requirer_csr.csr == csr and requirer_csr.is_ca == is_ca: + logger.info("CSR already in relation data - Doing nothing") + return + new_csr_dict = { + "certificate_signing_request": csr, + "ca": is_ca, + } + requirer_relation_data = _load_relation_data(relation.data[self.model.unit]) + existing_relation_data = requirer_relation_data.get("certificate_signing_requests", []) + new_relation_data = copy.deepcopy(existing_relation_data) + new_relation_data.append(new_csr_dict) + relation.data[self.model.unit]["certificate_signing_requests"] = json.dumps( + new_relation_data + ) + + def _remove_requirer_csr_from_relation_data(self, csr: str) -> None: + """Remove CSR from relation data. + + Args: + csr (str): Certificate signing request + + Returns: + None + """ + relation = self.model.get_relation(self.relationship_name) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} does not exist - " + f"The certificate request can't be completed" + ) + if not self.get_requirer_csrs(): + logger.info("No CSRs in relation data - Doing nothing") + return + requirer_relation_data = _load_relation_data(relation.data[self.model.unit]) + existing_relation_data = requirer_relation_data.get("certificate_signing_requests", []) + new_relation_data = copy.deepcopy(existing_relation_data) + for requirer_csr in new_relation_data: + if requirer_csr["certificate_signing_request"] == csr: + new_relation_data.remove(requirer_csr) + relation.data[self.model.unit]["certificate_signing_requests"] = json.dumps( + new_relation_data + ) + + def request_certificate_creation( + self, certificate_signing_request: bytes, is_ca: bool = False + ) -> None: + """Request TLS certificate to provider charm. + + Args: + certificate_signing_request (bytes): Certificate Signing Request + is_ca (bool): Whether the certificate is a CA certificate + + Returns: + None + """ + relation = self.model.get_relation(self.relationship_name) + if not relation: + raise RuntimeError( + f"Relation {self.relationship_name} does not exist - " + f"The certificate request can't be completed" + ) + self._add_requirer_csr_to_relation_data( + certificate_signing_request.decode().strip(), is_ca=is_ca + ) + logger.info("Certificate request sent to provider") + + def request_certificate_revocation(self, certificate_signing_request: bytes) -> None: + """Remove CSR from relation data. + + The provider of this relation is then expected to remove certificates associated to this + CSR from the relation data as well and emit a request_certificate_revocation event for the + provider charm to interpret. + + Args: + certificate_signing_request (bytes): Certificate Signing Request + + Returns: + None + """ + self._remove_requirer_csr_from_relation_data(certificate_signing_request.decode().strip()) + logger.info("Certificate revocation sent to provider") + + def request_certificate_renewal( + self, old_certificate_signing_request: bytes, new_certificate_signing_request: bytes + ) -> None: + """Renew certificate. + + Removes old CSR from relation data and adds new one. + + Args: + old_certificate_signing_request: Old CSR + new_certificate_signing_request: New CSR + + Returns: + None + """ + try: + self.request_certificate_revocation( + certificate_signing_request=old_certificate_signing_request + ) + except RuntimeError: + logger.warning("Certificate revocation failed.") + self.request_certificate_creation( + certificate_signing_request=new_certificate_signing_request + ) + logger.info("Certificate renewal request completed.") + + def get_assigned_certificates(self) -> List[ProviderCertificate]: + """Get a list of certificates that were assigned to this unit. + + Returns: + List: List[ProviderCertificate] + """ + assigned_certificates = [] + for requirer_csr in self.get_certificate_signing_requests(fulfilled_only=True): + if cert := self._find_certificate_in_relation_data(requirer_csr.csr): + assigned_certificates.append(cert) + return assigned_certificates + + def get_expiring_certificates(self) -> List[ProviderCertificate]: + """Get a list of certificates that were assigned to this unit that are expiring or expired. + + Returns: + List: List[ProviderCertificate] + """ + expiring_certificates: List[ProviderCertificate] = [] + for requirer_csr in self.get_certificate_signing_requests(fulfilled_only=True): + if cert := self._find_certificate_in_relation_data(requirer_csr.csr): + if not cert.expiry_time or not cert.expiry_notification_time: + continue + if datetime.now(timezone.utc) > cert.expiry_notification_time: + expiring_certificates.append(cert) + return expiring_certificates + + def get_certificate_signing_requests( + self, + fulfilled_only: bool = False, + unfulfilled_only: bool = False, + ) -> List[RequirerCSR]: + """Get the list of CSR's that were sent to the provider. + + You can choose to get only the CSR's that have a certificate assigned or only the CSR's + that don't. + + Args: + fulfilled_only (bool): This option will discard CSRs that don't have certificates yet. + unfulfilled_only (bool): This option will discard CSRs that have certificates signed. + + Returns: + List of RequirerCSR objects. + """ + csrs = [] + for requirer_csr in self.get_requirer_csrs(): + cert = self._find_certificate_in_relation_data(requirer_csr.csr) + if (unfulfilled_only and cert) or (fulfilled_only and not cert): + continue + csrs.append(requirer_csr) + + return csrs + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Handle relation changed event. + + Goes through all providers certificates that match a requested CSR. + + If the provider certificate is revoked, emit a CertificateInvalidateEvent, + otherwise emit a CertificateAvailableEvent. + + Remove the secret for revoked certificate, or add a secret with the correct expiry + time for new certificates. + + Args: + event: Juju event + + Returns: + None + """ + if not event.app: + logger.warning("No remote app in relation - Skipping") + return + if not _relation_data_is_valid(event.relation, event.app, PROVIDER_JSON_SCHEMA): + logger.debug("Relation data did not pass JSON Schema validation") + return + provider_certificates = self.get_provider_certificates() + requirer_csrs = [ + certificate_creation_request.csr + for certificate_creation_request in self.get_requirer_csrs() + ] + for certificate in provider_certificates: + if certificate.csr in requirer_csrs: + csr_in_sha256_hex = get_sha256_hex(certificate.csr) + if certificate.revoked: + with suppress(SecretNotFoundError): + logger.debug( + "Removing secret with label %s", + f"{LIBID}-{csr_in_sha256_hex}", + ) + secret = self.model.get_secret(label=f"{LIBID}-{csr_in_sha256_hex}") + secret.remove_all_revisions() + self.on.certificate_invalidated.emit( + reason="revoked", + certificate=certificate.certificate, + certificate_signing_request=certificate.csr, + ca=certificate.ca, + chain=certificate.chain, + ) + else: + try: + secret = self.model.get_secret(label=f"{LIBID}-{csr_in_sha256_hex}") + logger.debug( + "Setting secret with label %s", f"{LIBID}-{csr_in_sha256_hex}" + ) + # Juju < 3.6 will create a new revision even if the content is the same + if ( + secret.get_content(refresh=True).get("certificate", "") + == certificate.certificate + ): + logger.debug( + "Secret %s with correct certificate already exists", + f"{LIBID}-{csr_in_sha256_hex}", + ) + continue + secret.set_content( + {"certificate": certificate.certificate, "csr": certificate.csr} + ) + secret.set_info( + expire=self._get_next_secret_expiry_time(certificate), + ) + except SecretNotFoundError: + logger.debug( + "Creating new secret with label %s", f"{LIBID}-{csr_in_sha256_hex}" + ) + secret = self.charm.unit.add_secret( + {"certificate": certificate.certificate, "csr": certificate.csr}, + label=f"{LIBID}-{csr_in_sha256_hex}", + expire=self._get_next_secret_expiry_time(certificate), + ) + self.on.certificate_available.emit( + certificate_signing_request=certificate.csr, + certificate=certificate.certificate, + ca=certificate.ca, + chain=certificate.chain, + ) + + def _get_next_secret_expiry_time(self, certificate: ProviderCertificate) -> Optional[datetime]: + """Return the expiry time or expiry notification time. + + Extracts the expiry time from the provided certificate, calculates the + expiry notification time and return the closest of the two, that is in + the future. + + Args: + certificate: ProviderCertificate object + + Returns: + Optional[datetime]: None if the certificate expiry time cannot be read, + next expiry time otherwise. + """ + if not certificate.expiry_time or not certificate.expiry_notification_time: + return None + return _get_closest_future_time( + certificate.expiry_notification_time, + certificate.expiry_time, + ) + + def _on_relation_broken(self, event: RelationBrokenEvent) -> None: + """Handle Relation Broken Event. + + Emitting `all_certificates_invalidated` from `relation-broken` rather + than `relation-departed` since certs are stored in app data. + + Args: + event: Juju event + + Returns: + None + """ + self.on.all_certificates_invalidated.emit() + + def _on_secret_expired(self, event: SecretExpiredEvent) -> None: + """Handle Secret Expired Event. + + Loads the certificate from the secret, and will emit 1 of 2 + events. + + If the certificate is not yet expired, emits CertificateExpiringEvent + and updates the expiry time of the secret to the exact expiry time on + the certificate. + + If the certificate is expired, emits CertificateInvalidedEvent and + deletes the secret. + + Args: + event (SecretExpiredEvent): Juju event + """ + csr = self._get_csr_from_secret(event.secret) + if not csr: + logger.error("Failed to get CSR from secret %s", event.secret.label) + return + provider_certificate = self._find_certificate_in_relation_data(csr) + if not provider_certificate: + # A secret expired but we did not find matching certificate. Cleaning up + logger.warning( + "Failed to find matching certificate for csr, cleaning up secret %s", + event.secret.label, + ) + event.secret.remove_all_revisions() + return + + if not provider_certificate.expiry_time: + # A secret expired but matching certificate is invalid. Cleaning up + logger.warning( + "Certificate matching csr is invalid, cleaning up secret %s", + event.secret.label, + ) + event.secret.remove_all_revisions() + return + + if datetime.now(timezone.utc) < provider_certificate.expiry_time: + logger.warning("Certificate almost expired") + self.on.certificate_expiring.emit( + certificate=provider_certificate.certificate, + expiry=provider_certificate.expiry_time.isoformat(), + ) + event.secret.set_info( + expire=provider_certificate.expiry_time, + ) + else: + logger.warning("Certificate is expired") + self.on.certificate_invalidated.emit( + reason="expired", + certificate=provider_certificate.certificate, + certificate_signing_request=provider_certificate.csr, + ca=provider_certificate.ca, + chain=provider_certificate.chain, + ) + self.request_certificate_revocation(provider_certificate.certificate.encode()) + event.secret.remove_all_revisions() + + def _find_certificate_in_relation_data(self, csr: str) -> Optional[ProviderCertificate]: + """Return the certificate that match the given CSR.""" + for provider_certificate in self.get_provider_certificates(): + if provider_certificate.csr != csr: + continue + return provider_certificate + return None + + def _get_csr_from_secret(self, secret: Secret) -> Union[str, None]: + """Extract the CSR from the secret label or content. + + This function is a workaround to maintain backwards compatibility + and fix the issue reported in + https://github.com/canonical/tls-certificates-interface/issues/228 + """ + try: + content = secret.get_content(refresh=True) + except SecretNotFoundError: + return None + if not (csr := content.get("csr", None)): + # In versions <14 of the Lib we were storing the CSR in the label of the secret + # The CSR now is stored int the content of the secret, which was a breaking change + # Here we get the CSR if the secret was created by an app using libpatch 14 or lower + if secret.label and secret.label.startswith(f"{LIBID}-"): + csr = secret.label[len(f"{LIBID}-") :] + return csr diff --git a/single_kernel_mongo/managers/__init__.py b/single_kernel_mongo/managers/__init__.py new file mode 100644 index 00000000..429edd1d --- /dev/null +++ b/single_kernel_mongo/managers/__init__.py @@ -0,0 +1,6 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The event managers for mongo charms. + +Interacts with workloads. +""" diff --git a/single_kernel_mongo/managers/backups.py b/single_kernel_mongo/managers/backups.py new file mode 100644 index 00000000..8c7ed4eb --- /dev/null +++ b/single_kernel_mongo/managers/backups.py @@ -0,0 +1,579 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The backup manager. + +In this class, we manage backup configurations and actions. + +Specifically backups are handled with Percona Backup MongoDB (pbm). +A user for PBM is created when MongoDB is first started during the start phase. +This user is named "backup". +""" + +from __future__ import annotations + +import json +import logging +import re +import time +from functools import cached_property +from typing import TYPE_CHECKING + +from ops import Container +from ops.framework import Object +from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus, StatusBase, WaitingStatus +from tenacity import ( + Retrying, + before_log, + retry, + retry_if_exception_type, + retry_if_not_exception_type, + stop_after_attempt, + wait_fixed, +) + +from single_kernel_mongo.config.literals import MongoPorts, Substrates +from single_kernel_mongo.core.structured_config import MongoDBRoles +from single_kernel_mongo.exceptions import ( + BackupError, + ListBackupError, + PBMBusyError, + RestoreError, + ResyncError, + SetPBMConfigError, + WorkloadExecError, +) +from single_kernel_mongo.managers.config import BackupConfigManager +from single_kernel_mongo.state.charm_state import CharmState +from single_kernel_mongo.workload import get_pbm_workload_for_substrate +from single_kernel_mongo.workload.backup_workload import PBMWorkload + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + +BACKUP_RESTORE_MAX_ATTEMPTS = 10 +BACKUP_RESTORE_ATTEMPT_COOLDOWN = 15 +REMAPPING_PATTERN = r"\ABackup doesn't match current cluster topology - it has different replica set names. Extra shards in the backup will cause this, for a simple example. The extra/unknown replica set names found in the backup are: ([\w\d\-,\s]+)([.] Backup has no data for the config server or sole replicaset)?\Z" + +S3_PBM_OPTION_MAP = { + "region": "storage.s3.region", + "bucket": "storage.s3.bucket", + "path": "storage.s3.prefix", + "access-key": "storage.s3.credentials.access-key-id", + "secret-key": "storage.s3.credentials.secret-access-key", + "endpoint": "storage.s3.endpointUrl", + "storage-class": "storage.s3.storageClass", +} +logger = logging.getLogger(__name__) + + +class BackupManager(Object, BackupConfigManager): + """Manager for the S3 integrator and backups.""" + + def __init__( + self, + charm: AbstractMongoCharm, + substrate: Substrates, + state: CharmState, + container: Container | None, + ) -> None: + super().__init__(parent=charm, key="backup") + super(Object, self).__init__( + substrate=substrate, config=charm.parsed_config, state=state, container=container + ) + self.charm = charm + self.workload: PBMWorkload = get_pbm_workload_for_substrate(substrate)(container=container) + self.state = state + + @cached_property + def environment(self) -> dict[str, str]: + """The environment used to run PBM commands.""" + return {self.workload.env_var: self.state.backup_config.uri} + + def is_valid_s3_integration(self) -> bool: + """Returns true if relation to s3-integrator is valid. + + Only replica sets and config_servers can integrate to s3-integrator. + """ + return (self.state.s3_relation is None) or (not self.state.is_role(MongoDBRoles.SHARD)) + + def create_backup_action(self) -> str: # type: ignore[return] + """Try to create a backup and return the backup id. + + If PBM is resyncing, the function will retry to create backup + (up to BACKUP_RESTORE_MAX_ATTEMPTS times) + with BACKUP_RESTORE_ATTEMPT_COOLDOWN time between attempts. + + If PMB returen any other error, the function will raise BackupError. + """ + for attempt in Retrying( # noqa: RET503 + stop=stop_after_attempt(BACKUP_RESTORE_MAX_ATTEMPTS), + retry=retry_if_not_exception_type(BackupError), + wait=wait_fixed(BACKUP_RESTORE_ATTEMPT_COOLDOWN), + reraise=True, + before_sleep=_backup_restore_retry_before_sleep, + ): + with attempt: + try: + output = self.workload.run_bin_command( + "backup", + environment=self.environment, + ) + backup_id_match = re.search( + r"Starting backup '(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)'", + output, + ) + return backup_id_match.group("backup_id") if backup_id_match else "N/A" + except WorkloadExecError as e: + error_message = e.stdout + if "Resync" in error_message: + raise ResyncError from e + + fail_message = f"Backup failed: {str(e)}" + + raise BackupError(fail_message) + + def list_backup_action(self) -> str: + """List the backups entries.""" + backup_list: list[tuple[str, str, str]] = [] + try: + pbm_status_output = self.pbm_status + except WorkloadExecError as e: + raise ListBackupError from e + pbm_status = json.loads(pbm_status_output) + backups = pbm_status.get("backups", {}).get("snapshot", []) + for backup in backups: + backup_status = "finished" + if backup["status"] == "error": + # backups from a different cluster have an error status, but they should show as + # finished + if self._is_backup_from_different_cluster(backup.get("error", "")): + backup_status = "finished" + else: + # display reason for failure if available + backup_status = "failed: " + backup.get("error", "N/A") + if backup["status"] not in ["error", "done"]: + backup_status = "in progress" + backup_list.append((backup["name"], backup["type"], backup_status)) + + # process in progress backups + running_backup = pbm_status["running"] + if running_backup.get("type", None) == "backup": + # backups are sorted in reverse order + last_reported_backup = backup_list[0] + # pbm will occasionally report backups that are currently running as failed, so it is + # necessary to correct the backup list in this case. + if last_reported_backup[0] == running_backup["name"]: + backup_list[0] = ( + last_reported_backup[0], + last_reported_backup[1], + "in progress", + ) + else: + backup_list.append((running_backup["name"], "logical", "in progress")) + + return self._format_backup_list(sorted(backup_list, key=lambda pair: pair[0])) + + def restore_backup(self, backup_id: str, remapping_pattern: str | None = None) -> None: + """Try to restore cluster a backup specified by backup id. + + If PBM is resyncing, the function will retry to create backup + (up to BACKUP_RESTORE_MAX_ATTEMPTS times) with BACKUP_RESTORE_ATTEMPT_COOLDOWN + time between attempts. + + If PMB returen any other error, the function will raise RestoreError. + """ + for attempt in Retrying( + stop=stop_after_attempt(BACKUP_RESTORE_MAX_ATTEMPTS), + retry=retry_if_not_exception_type(RestoreError), + wait=wait_fixed(BACKUP_RESTORE_ATTEMPT_COOLDOWN), + reraise=True, + before_sleep=_backup_restore_retry_before_sleep, + ): + with attempt: + try: + remapping_pattern = remapping_pattern or self._remap_replicaset(backup_id) + remapping_args = ( + ["--replset-remapping", remapping_pattern] if remapping_pattern else [] + ) + self.workload.run_bin_command( + "restore", + [backup_id] + remapping_args, + environment=self.environment, + ) + except WorkloadExecError as e: + error_message = e.stdout + if "Resync" in e.stdout: + raise ResyncError + + fail_message = f"Restore failed: {str(e)}" + if f"backup '{backup_id}' not found" in error_message: + fail_message = f"Restore failed: Backup id '{backup_id}' does not exist in list of backups, please check list-backups for the available backup_ids." + + raise RestoreError(fail_message) + + def get_status(self) -> StatusBase | None: + """Gets the PBM status.""" + if not self.workload.active(): + return WaitingStatus("waiting for pbm to start") + if not self.state.s3_relation: + logger.info("No configuration for backups, not relation to s3-charm") + return None + try: + previous_status = self.charm.unit.status + pbm_status = self.pbm_status + pbm_error = self.process_pbm_error(pbm_status) + if pbm_error: + return BlockedStatus(pbm_error) + + processed_status = self.process_pbm_status(pbm_status) + operation_result = self._get_backup_restore_operation_result( + processed_status, previous_status + ) + logger.info(operation_result) + return processed_status + except Exception as e: + logger.error(f"Failed to get pbm status: {e}") + return BlockedStatus("PBM error") + + def resync_config_options(self): + """Attempts to resync config options and sets status in case of failure.""" + self.workload.start() + + # pbm has a flakely resync and it is necessary to wait for no actions to be running before + # resync-ing. See: https://jira.percona.com/browse/PBM-1038 + for attempt in Retrying( + stop=stop_after_attempt(20), + wait=wait_fixed(5), + reraise=True, + ): + with attempt: + pbm_status = self.get_status() + # wait for backup/restore to finish + if isinstance(pbm_status, (MaintenanceStatus)): + raise PBMBusyError + + # if a resync is running restart the service + if isinstance(pbm_status, (WaitingStatus)): + self.workload.restart() + raise PBMBusyError + + # wait for re-sync and update charm status based on pbm syncing status. Need to wait for + # 2 seconds for pbm_agent to receive the resync command before verifying. + self.workload.run_bin_command("config", ["--force-resync"], environment=self.environment) + time.sleep(2) + self._wait_pbm_status() + + def set_config_options(self, credentials: dict[str, str]) -> None: + """Apply the configuration provided by S3 integrator. + + Args: + credentials: A dictionary provided by backup event handler. + """ + # Clear the current config file. + self.clear_pbm_config_file() + + config = map_s3_config_to_pbm_config(credentials) + + for pbm_key, pbm_value in config.items(): + try: + self.workload.run_bin_command( + "config", ["--set", f"{pbm_key}={pbm_value}"], environment=self.environment + ) + except WorkloadExecError: + logger.error(f"Failed to configure PBM option: {pbm_key}") + raise SetPBMConfigError + + def clear_pbm_config_file(self) -> None: + """Overwrites the PBM config file with the one provided by default.""" + self.workload.write( + self.workload.paths.pbm_config, + "# this file is to be left empty. Changes in this file will be ignored.\n", + ) + self.workload.run_bin_command( + "config", ["--file", str(self.workload.paths.pbm_config)], environment=self.environment + ) + + def retrieve_error_message(self, pbm_status: dict) -> str: + """Parses pbm status for an error message from the current unit. + + If pbm_agent is in the error state, the command `pbm status` does not raise an error. + Instead, it is in the log messages. pbm_agent also shows all the error messages for other + replicas in the set. This method tries to handle both cases at once. + """ + try: + clusters = pbm_status["cluster"] + for cluster in clusters: + if cluster["rs"] == self.charm.app.name: + break + + for host_info in cluster["nodes"]: + replica_info = ( + f"mongodb/{self.state.unit_peer_data.internal_address}:{MongoPorts.MONGOS_PORT}" + ) + if host_info["host"] == replica_info: + break + + return str(host_info["errors"]) + except KeyError: + return "" + + def get_backup_error_status(self, backup_id: str) -> str: + """Get the error status for a provided backup.""" + pbm_status = self.pbm_status + pbm_as_dict: dict = json.loads(pbm_status) + backups = pbm_as_dict.get("backups", {}).get("snapshot", []) + for backup in backups: + if backup_id == backup["name"]: + return backup.get("error", "") + + return "" + + def process_pbm_error(self, pbm_status: str) -> str: + """Look up PBM status for errors.""" + error_message: str + message = "" + try: + pbm_as_dict = json.loads(pbm_status) + error_message = self.retrieve_error_message(pbm_as_dict) + except json.JSONDecodeError: + error_message = pbm_status + + if "status code: 403" in error_message: + message = "s3 credentials are incorrect" + elif "status code: 404" in error_message: + message = "s3 configurations are incompatible." + elif "status code: 301" in error_message: + message = "s3 configurations are incompatible." + return message + + def process_pbm_status(self, pbm_status: str) -> StatusBase: + """Processes the pbm status if there's no error.""" + pbm_as_dict: dict[str, dict] = json.loads(pbm_status) + current_op = pbm_as_dict.get("running", {}) + match current_op: + case {}: + return ActiveStatus("") + case {"type": "backup", "name": backup_id}: + return MaintenanceStatus(f"backup started/running, backup id: '{backup_id}'") + case {"type": "restore", "name": backup_id}: + return MaintenanceStatus(f"restore started/running, backup id: '{backup_id}'") + case {"type": "resync"}: + return WaitingStatus("waiting to sync s3 configurations.") + case _: + return ActiveStatus() + + def can_restore(self, backup_id: str, remapping_pattern: str) -> tuple[bool, str]: + """Does the status allow to restore. + + Returns: + check: boolean telling if the status allows to restore. + reason: The reason if it is not possible to restore yet. + """ + pbm_status = self.get_status() + match pbm_status: + case MaintenanceStatus(): + return (False, "Please wait for current backup/restore to finish.") + case WaitingStatus(): + return ( + False, + "Sync-ing configurations needs more time, must wait before listing backups.", + ) + case BlockedStatus(): + return (False, pbm_status.message) + case _: + pass + + if not backup_id: + return (False, "Missing backup-id to restore.") + if self._needs_provided_remap_arguments(backup_id) and remapping_pattern == "": + return (False, "Cannot restore backup, 'remap-pattern' must be set.") + + return True, "" + + def can_backup(self) -> tuple[bool, str]: + """Is PBM is a state where it can backup?""" + pbm_status = self.get_status() + match pbm_status: + case MaintenanceStatus(): + return ( + False, + "Can only create one backup at a time, please wait for current backup to finish.", + ) + case WaitingStatus(): + return ( + False, + "Sync-ing configurations needs more time, must wait before creating backups.", + ) + case BlockedStatus(): + return False, pbm_status.message + case _: + return True, "" + + def can_list_backup(self) -> tuple[bool, str]: + """Is PBM in a state to list backup?""" + pbm_status = self.get_status() + match pbm_status: + case WaitingStatus(): + return ( + False, + "Sync-ing configurations needs more time, must wait before listing backups.", + ) + case BlockedStatus(): + return False, pbm_status.message + case _: + return True, "" + + @retry( + stop=stop_after_attempt(20), + reraise=True, + retry=retry_if_exception_type(ResyncError), + before=before_log(logger, logging.DEBUG), + ) + def _wait_pbm_status(self) -> None: + """Wait for pbm_agent to resolve errors and return the status of pbm. + + The pbm status is set by the pbm_agent daemon which needs time to both resync and resolve + errors in configurations. Resync-ing is a longer process and should take around 5 minutes. + Configuration errors generally occur when the configurations change and pbm_agent is + updating, this is generally quick and should take <15s. If errors are not resolved in 15s + it means there is an incorrect configuration which will require user intervention. + + Retrying for resync is handled by decorator, retrying for configuration errors is handled + within this function. + """ + # on occasion it takes the pbm_agent daemon time to update its configs, meaning that it + # will error for incorrect configurations for <15s before resolving itself. + + for attempt in Retrying( + stop=stop_after_attempt(3), + wait=wait_fixed(5), + reraise=True, + ): + with attempt: + try: + pbm_status = self.pbm_status + pbm_as_dict = json.loads(pbm_status) + current_pbm_op: dict[str, str] = pbm_as_dict.get("running", {}) + + if current_pbm_op.get("type", "") == "resync": + # since this process takes several minutes we should let the user know + # immediately. + self.charm.status_manager.set_and_share_status( + WaitingStatus("waiting to sync s3 configurations.") + ) + raise ResyncError + except WorkloadExecError as e: + self.charm.status_manager.set_and_share_status( + BlockedStatus(self.process_pbm_error(e.stdout)) + ) + + def _get_backup_restore_operation_result( + self, current_pbm_status: StatusBase, previous_pbm_status: StatusBase + ) -> str: + """Returns a string with the result of the backup/restore operation. + + The function call is expected to be only for not failed operations. + The operation is taken from previous status of the unit and expected + to contain the operation type (backup/restore) and the backup id. + """ + if ( + current_pbm_status.name == previous_pbm_status.name + and current_pbm_status.message == previous_pbm_status.message + and not isinstance(current_pbm_status, ActiveStatus) + ): + return f"Operation is still in progress: '{current_pbm_status.message}'" + + if ( + isinstance(previous_pbm_status, MaintenanceStatus) + and "backup id:" in previous_pbm_status.message + ): + backup_id = previous_pbm_status.message.split("backup id:")[-1].strip() + if "restore" in previous_pbm_status.message: + return f"Restore from backup {backup_id} completed successfully" + if "backup" in previous_pbm_status.message: + return f"Backup {backup_id} completed successfully" + + return "Unknown operation result" + + def _is_backup_from_different_cluster(self, backup_status: str) -> bool: + """Returns if a given backup was made on a different cluster.""" + return re.search(REMAPPING_PATTERN, backup_status) is not None + + def _format_backup_list(self, backup_list: list[tuple[str, str, str]]) -> str: + """Formats provided list of backups as a table.""" + backups = ["{:<21s} | {:<12s} | {:s}".format("backup-id", "backup-type", "backup-status")] + + backups.append("-" * len(backups[0])) + for backup_id, backup_type, backup_status in backup_list: + backups.append(f"{backup_id:<21s} | {backup_type:<12s} | {backup_status:s}") + + return "\n".join(backups) + + @cached_property + def pbm_status(self) -> str: + """Runs the pbm status command.""" + return self.workload.run_bin_command( + "status", + ["-o", "json"], + environment=self.environment, + ).rstrip() + + def _needs_provided_remap_arguments(self, backup_id: str) -> bool: + """Returns true if remap arguments are needed to perform a restore command.""" + backup_error_status = self.get_backup_error_status(backup_id) + + # When a charm is running as a Replica set it can generate its own remapping arguments + return self._is_backup_from_different_cluster(backup_error_status) and self.state.is_role( + MongoDBRoles.CONFIG_SERVER + ) + + def _remap_replicaset(self, backup_id: str) -> str | None: + """Returns options for remapping a replica set during a cluster migration restore. + + Args: + backup_id: str of the backup to check for remapping + + Raises: CalledProcessError + """ + pbm_status = self.pbm_status + pbm_status = json.loads(pbm_status) + + # grab the error status from the backup if present + backup_error_status = self.get_backup_error_status(backup_id) + + if not self._is_backup_from_different_cluster(backup_error_status): + return None + + # TODO in the future when we support conf servers and shards this will need to be more + # comprehensive. + old_cluster_name_match = re.search(REMAPPING_PATTERN, backup_error_status) + if not old_cluster_name_match: + return None + old_cluster_name = old_cluster_name_match.group(1) + current_cluster_name = self.charm.app.name + logger.debug( + "Replica set remapping is necessary for restore, old cluster name: %s ; new cluster name: %s", + old_cluster_name, + current_cluster_name, + ) + return f"{current_cluster_name}={old_cluster_name}" + + +def map_s3_config_to_pbm_config(credentials: dict[str, str]): + """Simple mapping from s3 integration to current status.""" + pbm_configs = {"storage.type": "s3"} + for s3_option, s3_value in credentials.items(): + if s3_option not in S3_PBM_OPTION_MAP: + continue + + pbm_configs[S3_PBM_OPTION_MAP[s3_option]] = s3_value + return pbm_configs + + +def _backup_restore_retry_before_sleep(retry_state) -> None: + logger.error( + f"Attempt {retry_state.attempt_number} failed. {BACKUP_RESTORE_MAX_ATTEMPTS - retry_state.attempt_number} attempts left." + f"Retrying after {BACKUP_RESTORE_ATTEMPT_COOLDOWN} seconds." + ) diff --git a/single_kernel_mongo/managers/config.py b/single_kernel_mongo/managers/config.py new file mode 100644 index 00000000..920d4c98 --- /dev/null +++ b/single_kernel_mongo/managers/config.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Manager for handling Mongo configuration.""" + +import logging +import time +from abc import ABC, abstractmethod +from itertools import chain + +from ops import Container +from typing_extensions import override + +from single_kernel_mongo.config.audit_config import AuditLog +from single_kernel_mongo.config.literals import LOCALHOST, CharmRole, MongoPorts, Substrates +from single_kernel_mongo.config.logrotate_config import LogRotateConfig +from single_kernel_mongo.core.structured_config import MongoConfigModel, MongoDBRoles +from single_kernel_mongo.core.workload import WorkloadBase +from single_kernel_mongo.exceptions import WorkloadServiceError +from single_kernel_mongo.state.charm_state import CharmState +from single_kernel_mongo.utils.mongodb_users import BackupUser, MonitorUser +from single_kernel_mongo.workload import ( + get_logrotate_workload_for_substrate, + get_mongodb_exporter_workload_for_substrate, + get_pbm_workload_for_substrate, +) +from single_kernel_mongo.workload.log_rotate_workload import LogRotateWorkload + +logger = logging.getLogger(__name__) + + +class CommonConfigManager(ABC): + """A generic config manager for a workload.""" + + config: MongoConfigModel + workload: WorkloadBase + state: CharmState + + def set_environment(self): + """Write all parameters in the environment variable.""" + if self.workload.env_var != "": + parameters = chain.from_iterable(self.build_parameters()) + self.workload.update_env(parameters) + + def get_environment(self) -> str: + """Gets the environment for the defined service.""" + env = self.workload.get_env() + return env[self.workload.env_var] + + @abstractmethod + def build_parameters(self) -> list[list[str]]: + """Builds the parameters list.""" + ... + + +class BackupConfigManager(CommonConfigManager): + """Config manager for PBM.""" + + def __init__( + self, + substrate: Substrates, + config: MongoConfigModel, + state: CharmState, + container: Container | None, + ): + self.config = config + self.workload = get_pbm_workload_for_substrate(substrate)(container=container) + self.state = state + + @override + def build_parameters(self) -> list[list[str]]: + return [ + [ + self.state.backup_config.uri, + ] + ] + + def connect(self): + """Exposes the endpoint to PBM Agent.""" + if not self.workload.container_can_connect: + logger.info("Container cannot connect.") + return + if not self.state.db_initialised: + logger.info("DB is not initialised.") + return + + if not self.state.app_peer_data.get_user_password(BackupUser.username): + logger.info("No password found.") + return + + if not self.workload.active() or self.get_environment() != self.state.backup_config.uri: + logger.info("Restarting the PBM agent.") + try: + self.workload.stop() + self.set_environment() + # Avoid restart errors on PBM. + time.sleep(2) + self.workload.start() + except WorkloadServiceError as e: + logger.error(f"Failed to restart {self.workload.service}: {e}") + raise + + +class LogRotateConfigManager(CommonConfigManager): + """Config manager for logrotate.""" + + def __init__( + self, + substrate: Substrates, + config: MongoConfigModel, + state: CharmState, + container: Container | None, + ): + self.config = config + self.workload: LogRotateWorkload = get_logrotate_workload_for_substrate(substrate)( + container=container + ) + self.state = state + self.substrate = substrate + + @override + def build_parameters(self) -> list[list[str]]: + return [[]] + + def connect(self) -> None: + """Setup logrotate and cron.""" + self.workload.build_template() + if self.substrate == "vm": + self.workload.setup_cron( + [ + f"* 1-23 * * * root logrotate {LogRotateConfig.rendered_template}\n", + f"1-59 0 * * * root logrotate {LogRotateConfig.rendered_template}\n", + ] + ) + else: + self.workload.start() + + +class MongoDBExporterConfigManager(CommonConfigManager): + """Config manager for mongodb-exporter.""" + + def __init__( + self, + substrate: Substrates, + config: MongoConfigModel, + state: CharmState, + container: Container | None, + ): + self.config = config + self.workload = get_mongodb_exporter_workload_for_substrate(substrate)(container=container) + self.state = state + + @override + def build_parameters(self) -> list[list[str]]: + return [[self.state.monitor_config.uri]] + + def connect(self): + """Exposes the endpoint to mongodb_exporter.""" + if not self.state.db_initialised: + return + + if not self.state.app_peer_data.get_user_password(MonitorUser.username): + return + + if not self.workload.active() or self.get_environment() != self.state.monitor_config.uri: + try: + self.set_environment() + self.workload.restart() + except WorkloadServiceError as e: + logger.error(f"Failed to restart {self.workload.service}: {e}") + raise + + +class MongoConfigManager(CommonConfigManager, ABC): + """The common configuration manager for both MongoDB and Mongos.""" + + @override + def build_parameters(self) -> list[list[str]]: + return [ + self.binding_ips, + self.port_parameter, + self.auth_parameter, + self.tls_parameters, + self.log_options, + self.audit_options, + ] + + @property + @abstractmethod + def port_parameter(self) -> list[str]: + """The port parameter.""" + ... + + @property + def binding_ips(self) -> list[str]: + """The binding IP parameters.""" + if ( + self.state.charm_role == CharmRole.MONGOS + and not self.state.app_peer_data.external_connectivity + ): + return [ + f"--bind-ip {self.workload.paths.socket_path}", + "--filePermissions 0766", + ] + return ["--bind_ip_all"] + + @property + def log_options(self) -> list[str]: + """The arguments for the logging option.""" + return [ + "--setParameter processUmask=037", # Required for log files permissions + "--logRotate reopen", + "--logappend", + f"--logpath={self.workload.paths.log_file}", + ] + + @property + def audit_options(self) -> list[str]: + """The argument for the audit log options.""" + return [ + f"--auditDestination={AuditLog.destination}", + f"--auditFormat={AuditLog.format}", + f"--auditPath={self.workload.paths.audit_file}", + ] + + @property + def auth_parameter(self) -> list[str]: + """The auth mode.""" + if self.state.tls.internal_enabled and self.state.tls.external_enabled: + return [ + "--auth", + "--clusterAuthMode=x509", + "--tlsAllowInvalidCertificates", + f"--tlsClusterCAFile={self.workload.paths.int_ca_file}", + f"--tlsClusterFile={self.workload.paths.int_pem_file}", + ] + return [ + "--auth", + "--clusterAuthMode=keyFile", + f"--keyFile={self.workload.paths.keyfile}", + ] + + @property + def tls_parameters(self) -> list[str]: + """The TLS external parameters.""" + if self.state.tls.external_enabled: + return [ + f"--tlsCAFile={self.workload.paths.ext_ca_file}", + f"--tlsCertificateKeyFile={self.workload.paths.ext_pem_file}", + # allow non-TLS connections + "--tlsMode=preferTLS", + "--tlsDisabledProtocols=TLS1_0,TLS1_1", + ] + return [] + + +class MongoDBConfigManager(MongoConfigManager): + """MongoDB Specifics config manager.""" + + def __init__(self, config: MongoConfigModel, state: CharmState, workload: WorkloadBase): + self.state = state + self.workload = workload + self.config = config + + @property + def db_path_argument(self) -> list[str]: + """The full path of the data directory.""" + return [f"--dbpath={self.workload.paths.data_path}"] + + @property + def role_parameter(self) -> list[str]: + """The role parameter.""" + match self.state.app_peer_data.role: + case MongoDBRoles.CONFIG_SERVER: + return ["--configsvr"] + case MongoDBRoles.SHARD: + return ["--shardsvr"] + case _: + return [] + + @property + def replset_option(self) -> list[str]: + """The replSet configuration option.""" + return [f"--replSet={self.state.app_peer_data.replica_set}"] + + @property + @override + def port_parameter(self) -> list[str]: + return [f"--port {MongoPorts.MONGODB_PORT}"] + + @override + def build_parameters(self) -> list[list[str]]: + base = super().build_parameters() + return base + [ + self.replset_option, + self.role_parameter, + self.db_path_argument, + ] + + +class MongosConfigManager(MongoConfigManager): + """Mongos Specifics config manager.""" + + def __init__(self, config: MongoConfigModel, workload: WorkloadBase, state: CharmState): + self.state = state + self.workload = workload + self.config = config + + @property + def config_server_db_parameter(self) -> list[str]: + """The config server DB parameter.""" + if uri := self.state.cluster.config_server_uri: + return [f"--configdb {uri}"] + return [ + f"--configdb {self.state.app_peer_data.replica_set}/{LOCALHOST}:{MongoPorts.MONGODB_PORT}" + ] + + @property + @override + def port_parameter(self) -> list[str]: + return [f"--port {MongoPorts.MONGOS_PORT}"] + + @override + def build_parameters(self) -> list[list[str]]: + base = super().build_parameters() + return base + [ + self.config_server_db_parameter, + ] diff --git a/single_kernel_mongo/managers/k8s.py b/single_kernel_mongo/managers/k8s.py new file mode 100644 index 00000000..cc293eda --- /dev/null +++ b/single_kernel_mongo/managers/k8s.py @@ -0,0 +1,271 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Manager for handling k8s resources.""" + +import json +import logging +import math +import time +from functools import cache, cached_property + +from lightkube.core.client import Client +from lightkube.core.exceptions import ApiError +from lightkube.models.core_v1 import ServicePort, ServiceSpec +from lightkube.models.meta_v1 import ObjectMeta, OwnerReference +from lightkube.resources.apps_v1 import StatefulSet +from lightkube.resources.core_v1 import Node, Pod, Service + +from single_kernel_mongo.exceptions import FailedToFindNodePortError, FailedToFindServiceError + +# default logging from lightkube httpx requests is very noisy +logging.getLogger("lightkube").disabled = True +logging.getLogger("lightkube.core.client").disabled = True +logging.getLogger("httpx").disabled = True +logging.getLogger("httpcore").disabled = True + +logger = logging.getLogger() + + +class K8sManager: + """Manager for handling k8s resources for mongo charms.""" + + def __init__(self, pod_name: str, namespace: str): + self.pod_name: str = pod_name + self.app_name: str = "-".join(pod_name.split("-")[:-1]) + self.namespace: str = namespace + + def __eq__(self, other: object) -> bool: + """__eq__ dunder. + + Allows to get cache hit on calls on the same method from different instances of K8sManager + as `self` is passed to methods. + """ + return isinstance(other, K8sManager) and self.__dict__ == other.__dict__ + + def __hash__(self) -> int: + """__hash__ dunder. + + For dict like caching. + """ + return hash(json.dumps(self.__dict__, sort_keys=True)) + + @cached_property + def client(self) -> Client: + """The Lightkube client.""" + return Client( # pyright: ignore[reportArgumentType] + field_manager=self.pod_name, + namespace=self.namespace, + ) + + # BEGIN: getters + def get_ttl_hash(self, seconds=120) -> int: + """Gets a unique time hash for the cache, expiring after 2 minutes. + + We enforce a cache miss by using a ghost argument which changes every 2 + minutes to all getters. + """ + return math.floor(time.time() / seconds) + + def get_pod(self, pod_name: str = "") -> Pod: + """Gets the pod via k8s API.""" + return self._get_pod(pod_name, self.get_ttl_hash()) + + def get_node(self, pod_name: str) -> Node: + """Gets the node the port is running on.""" + return self._get_node(pod_name, self.get_ttl_hash()) + + def get_node_ip(self, pod_name: str) -> str: + """Gets the IP Address of the Node via the K8s API.""" + return self._get_node_ip(pod_name, self.get_ttl_hash()) + + def get_service(self, service_name: str) -> Service | None: + """Gets the Service via the K8s API.""" + return self._get_service(service_name, self.get_ttl_hash()) + + def get_partition(self) -> int: + """Gets the stateful set rolling partition.""" + return self._get_partition(self.get_ttl_hash()) + + def get_unit_service_name(self, pod_name: str = "") -> str: + """Returns the service name for the current unit.""" + pod_name = pod_name or self.pod_name + return f"{pod_name}-external" + + # END: getters + + # BEGIN: helpers + + def on_deployed_without_trust(self) -> None: + """Blocks the application and returns a specific error message.""" + logger.error("Kubernetes application needs `juju trust`") + + def build_node_port_services(self, port: str) -> Service: + """Builds a ClusterIP service for initial client connection.""" + pod = self.get_pod(pod_name=self.pod_name) + if not pod.metadata: + raise Exception(f"Could not find metadata for {pod}") + + return Service( + metadata=ObjectMeta( + name=self.get_unit_service_name(self.pod_name), + namespace=self.namespace, + # When we scale-down K8s will keep the Services for the deleted units around, + # unless the Services' owner is also deleted. + ownerReferences=[ + OwnerReference( + apiVersion=pod.apiVersion, + kind=pod.kind, + name=self.pod_name, + uid=pod.metadata.uid, + blockOwnerDeletion=False, + ) + ], + ), + spec=ServiceSpec( + externalTrafficPolicy="Local", + type="NodePort", + selector={ + "statefulset.kubernetes.io/pod-name": self.pod_name, + }, + ports=[ + ServicePort( + protocol="TCP", + port=port, + targetPort=port, + name=f"{self.pod_name}-port", + ) + ], + ), + ) + + def apply_service(self, service: Service) -> None: + """Applies the given service.""" + try: + self.client.apply(service) + except ApiError as e: + if e.status.code == 403: + self.on_deployed_without_trust() + return + if ( + e.status.code == 422 + and isinstance(e.status.message, str) + and "port is already allocated" in e.status.message + ): + logger.error(e.status.message) + return + raise + + def delete_service(self) -> None: + """Deletes the service if it exists.""" + try: + service_name = self.get_unit_service_name(self.pod_name) + service = self.get_service(service_name=service_name) + except ApiError as e: + if e.status.code == 404: + logger.debug(f"Could not find {service_name} to delete.") + return + raise + + if not service: + raise Exception(f"No service {service_name}.") + if not service.metadata: + raise Exception(f"No metadata for {service_name}") + if not service.metadata.name: + raise Exception(f"No name in service metadata for {service_name}.") + + try: + self.client.delete(Service, name=service.metadata.name) + except ApiError as e: + if e.status.code == 403: + self.on_deployed_without_trust() + return + raise + + def set_partition(self, value: int) -> None: + """Sets the partition value.""" + try: + self.client.patch( + res=StatefulSet, + name=self.app_name, + obj={"spec": {"updateStrategy": {"rollingUpdate": {"partition": value}}}}, + ) + self._get_partition.cache_clear() # Clean the cache. + except ApiError as e: + if e.status.code == 403: + self.on_deployed_without_trust() + return + raise + + def get_node_port(self, port_to_match: int) -> int: + """Return node port for the provided port to match.""" + service_name = self.get_unit_service_name(self.pod_name) + service = self.get_service(service_name=service_name) + + if not service or not service.spec or not service.spec.type == "NodePort": + raise FailedToFindServiceError(f"No service found for port on {self.pod_name}") + + for svc_port in service.spec.ports or []: + if svc_port.port == port_to_match: + return svc_port.nodePort # type: ignore[return-value] + + raise FailedToFindNodePortError( + f"Unable to find NodePort for {port_to_match} for the {service} service" + ) + + # END: helpers + + # BEGIN: Private methods + @cache + def _get_pod(self, pod_name: str = "", *_) -> Pod: + # Allows us to get pods from other peer units + pod_name = pod_name or self.pod_name + + return self.client.get( + res=Pod, + name=pod_name, + ) + + @cache + def _get_node(self, pod_name: str, *_) -> Node: + pod = self.get_pod(pod_name) + if not pod.spec or not pod.spec.nodeName: + raise Exception("Could not find podSpec or nodeName") + + return self.client.get( + Node, + name=pod.spec.nodeName, + ) + + @cache + def _get_node_ip(self, pod_name: str, *_) -> str: + # all these redundant checks are because Lightkube's typing is awful + node = self.get_node(pod_name) + if not node.status or not node.status.addresses: + raise Exception(f"No status found for {node}") + + for addresses in node.status.addresses: + if addresses.type in ["ExternalIP", "InternalIP", "Hostname"]: + return addresses.address + + return "" + + @cache + def _get_service(self, service_name: str, *_) -> Service | None: + return self.client.get( + res=Service, + name=service_name, + ) + + @cache + def _get_partition(self, *_) -> int: + partition = self.client.get(res=StatefulSet, name=self.app_name) + if ( + not partition.spec + or not partition.spec.updateStrategy + or not partition.spec.updateStrategy.rollingUpdate + or not partition.spec.updateStrategy.rollingUpdate.partition + ): + raise Exception("Incomplete stateful set.") + return partition.spec.updateStrategy.rollingUpdate.partition diff --git a/single_kernel_mongo/managers/mongo.py b/single_kernel_mongo/managers/mongo.py new file mode 100644 index 00000000..37c29e40 --- /dev/null +++ b/single_kernel_mongo/managers/mongo.py @@ -0,0 +1,465 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The Mongo manager. + +In this class, we manage the mongo database internals. + +This class is in charge of creating users, databases, initialising replicat sets, etc. +""" + +from __future__ import annotations + +import json +import logging +import re +from typing import TYPE_CHECKING + +from dacite import from_dict +from ops import EventBase, Object +from ops.charm import RelationChangedEvent +from ops.model import Relation +from pymongo.errors import PyMongoError + +from single_kernel_mongo.config.literals import Scope, Substrates +from single_kernel_mongo.core.structured_config import MongoDBRoles +from single_kernel_mongo.exceptions import SetPasswordError +from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import ( + DatabaseProviderData, +) +from single_kernel_mongo.managers.k8s import K8sManager +from single_kernel_mongo.state.charm_state import CharmState +from single_kernel_mongo.utils.mongo_config import ( + EMPTY_CONFIGURATION, + MongoConfiguration, +) +from single_kernel_mongo.utils.mongo_connection import MongoConnection, NotReadyError +from single_kernel_mongo.utils.mongodb_users import ( + OPERATOR_ROLE, + BackupUser, + MongoDBUser, + MonitorUser, + OperatorUser, +) +from single_kernel_mongo.workload.mongodb_workload import MongoDBWorkload + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + +logger = logging.getLogger(__name__) + + +class MongoManager(Object): + """Manager for Mongo related operations.""" + + def __init__( + self, + charm: AbstractMongoCharm, + workload: MongoDBWorkload, + state: CharmState, + substrate: Substrates, + ) -> None: + super().__init__(parent=charm, key="managers") + self.charm = charm + self.workload = workload + self.state = state + self.substrate = substrate + pod_name = self.model.unit.name.replace("/", "-") + self.k8s = K8sManager(pod_name, self.model.name) + + def mongod_ready(self, uri: str | None = None) -> bool: + """Is MongoDB ready and running?""" + config = EMPTY_CONFIGURATION + actual_uri = uri or "localhost" + with MongoConnection(config, actual_uri, direct=True) as direct_mongo: + return direct_mongo.is_ready + + def set_user_password(self, user: MongoDBUser, password: str) -> str: + """Sets the password for a given username and return the secret id. + + Raises: + SetPasswordError + """ + with MongoConnection(self.state.mongo_config) as mongo: + try: + mongo.set_user_password(user.username, password) + except NotReadyError: + raise SetPasswordError( + "Failed changing the password: Not all members healthy or finished initial sync." + ) + except PyMongoError as e: + raise SetPasswordError(f"Failed changing the password: {e}") + + return self.state.secrets.set( + user.password_key_name, + password, + Scope.UNIT, + ).label + + def initialise_replica_set(self) -> None: + """Initialises the replica set.""" + with MongoConnection(self.state.mongo_config, "localhost", direct=True) as direct_mongo: + direct_mongo.init_replset() + self.state.app_peer_data.replica_set_hosts = [self.state.unit_peer_data.host] + + def initialise_users(self) -> None: + """First initialisation of each user.""" + self.initialise_operator_user() + self.initialise_monitor_user() + self.initialise_backup_user() + + def initialise_operator_user(self): + """Creates initial admin user for MongoDB. + + Initial admin user can be created only through localhost connection. + see https://www.mongodb.com/docs/manual/core/localhost-exception/ + unfortunately, pymongo unable to create connection that considered + as local connection by MongoDB, even if socket connection used. + As a result, where are only hackish ways to create initial user. + It is needed to install mongodb-clients inside charm container to make + this function work correctly. + """ + if self.state.app_peer_data.is_user_created(OperatorUser.username): + return + config = self.state.mongo_config + with MongoConnection(config, "localhost", direct=True) as direct_mongo: + direct_mongo.create_user(config=config, roles=OPERATOR_ROLE) + self.state.app_peer_data.set_user_created(OperatorUser.username) + + def initialise_monitor_user(self): + """Creates the monitor user on the MongoDB database.""" + if self.state.app_peer_data.is_user_created(MonitorUser.username): + return + with MongoConnection(self.state.mongo_config) as mongo: + logger.debug("Creating the monitor user rolesโ€ฆ") + mongo.create_role( + role_name=MonitorUser.mongodb_role, + privileges=MonitorUser.privileges, + ) + logger.debug("creating the monitor user...") + mongo.create_user(self.state.monitor_config) + self.state.app_peer_data.set_user_created(MonitorUser.username) + + def initialise_backup_user(self): + """Creates the monitor user on the MongoDB database.""" + if self.state.app_peer_data.is_user_created(BackupUser.username): + return + with MongoConnection(self.state.mongo_config) as mongo: + logger.debug("Creating the backup user rolesโ€ฆ") + mongo.create_role( + role_name=BackupUser.mongodb_role, + privileges=BackupUser.privileges, + ) + logger.debug("creating the backup user...") + mongo.create_user(self.state.backup_config) + self.state.app_peer_data.set_user_created(BackupUser.username) + + def oversee_users( + self, relation_name: str, relation_id: int | None = None, event: EventBase | None = None + ): + """Oversees the users of the application. + + Function manages user relations by removing, updated, and creating + users; and dropping databases when necessary. + + Args: + relation_name: The relation name are working with. + relation_id: When specified execution of functions + makes sure to exclude the users and databases and remove + them if necessary. + event: relation event. + + When the function is executed in relation departed event, the departed + relation is still on the list of all relations. Therefore, for proper + work of the function, we need to exclude departed relation from the list. + + Raises: + PyMongoError + """ + with MongoConnection(self.state.mongo_config) as mongo: + database_users = mongo.get_users() + + users_being_managed = database_users.intersection(self.state.app_peer_data.managed_users) + relations = self.model.relations[relation_name] + expected_current_users = { + f"relation-{relation.id}" for relation in relations if relation.id != relation_id + } + self.remove_users(users_being_managed, expected_current_users) + self.add_users(relation_name, users_being_managed, expected_current_users) + self.update_users(relation_name, users_being_managed, expected_current_users) + self.update_diff(event) + self.auto_delete_dbs(relation_name, relation_id) + + def update_diff(self, event: EventBase | None = None) -> None: + """Update the relation databag with the diff of data. + + Args: + event: An event. Does nothing if this event is not a RelationChangedEvent. + """ + if not isinstance(event, RelationChangedEvent): + logger.info("Cannot compute diff of event type: %s", type(event)) + return + + new_data = { + key: value for key, value in event.relation.data[event.app].items() if key != "data" + } + event.relation.data[self.charm.model.app].update({"data": json.dumps(new_data)}) + + def add_users( + self, relation_name: str, users_being_managed: set[str], expected_current_users: set[str] + ) -> None: + """Adds users to Charmed MongoDB. + + Args: + users_being_managed: The users managed by the Charm. + expected_current_users: The expected users after this call. + + Raises: + PyMongoError + """ + managed_users = self.state.app_peer_data.managed_users + with MongoConnection(self.state.mongo_config) as mongo: + for username in expected_current_users - users_being_managed: + relation = self._get_relation_from_username(username, relation_name) + data_interface = DatabaseProviderData( + self.model, + relation.name, + ) + config = self.get_config( + username, + None, + data_interface, + relation.id, + ) + if config.database is None: + # We need to wait for the moment when the provider library + # set the database name into the relation. + continue + logger.info("Create relation user: %s on %s", config.username, config.database) + + mongo.create_user(config) + managed_users.add(username) + data_interface.set_database(relation.id, config.database) + + self.state.app_peer_data.managed_users = managed_users + + def update_users( + self, relation_name: str, users_being_managed: set[str], expected_current_users: set[str] + ) -> None: + """Updates existing users in Charmed MongoDB. + + Raises: + PyMongoError + """ + with MongoConnection(self.state.mongo_config) as mongo: + for username in expected_current_users.intersection(users_being_managed): + relation = self._get_relation_from_username(username, relation_name) + data_interface = DatabaseProviderData( + self.model, + relation.name, + ) + config = self.get_config( + username, + None, + data_interface, + relation.id, + ) + logger.info("Update relation user: %s on %s", config.username, config.database) + mongo.update_user(config) + logger.info("Updating relation data according to diff") + + def update_app_relation_data(self, relation_name: str) -> None: + """Helper function to update application relation data.""" + # TODO: Add sanity checks. + # if not self.pass_sanity_hook_checks(): + # return + + # relations with the mongos server should not connect through the config-server directly + if self.state.is_role(MongoDBRoles.CONFIG_SERVER): + return + database_users = set() + + with MongoConnection(self.state.mongo_config) as mongo: + database_users = mongo.get_users() + + relations = self.model.relations[relation_name] + + for relation in relations: + data_interface = DatabaseProviderData(self.model, relation.name) + if not data_interface.fetch_relation_field(relation.id, "database"): + continue + username = data_interface.fetch_relation_field(relation.id, "username") + password = data_interface.fetch_relation_field(relation.id, "password") + + if not username or not password: + username = f"relation-{relation.id}" + password = self.workload.generate_password() + + config = self.get_config( + username, + password, + data_interface, + relation.id, + ) + + data_interface.set_credentials( + relation.id, + username, + password, + ) + if username in database_users: + data_interface.set_endpoints( + relation.id, + ",".join(config.hosts), + ) + data_interface.set_uris( + relation.id, + config.uri, + ) + + def remove_users(self, users_being_managed: set[str], expected_current_users: set[str]) -> None: + """Removes users from Charmed MongoDB. + + Note this only removes users that this application of Charmed MongoDB is responsible for + managing. It won't remove: + 1. users created from other applications + 2. users created from other mongos routers. + + Raises: + PyMongoError + """ + mongo_config = self.state.mongo_config + managed_users = self.state.app_peer_data.managed_users + with MongoConnection(mongo_config) as mongo: + for username in users_being_managed - expected_current_users: + logger.info("Remove relation user: %s", username) + # Skip our user. + if self.state.is_role(MongoDBRoles.MONGOS) and username == mongo_config.username: + continue + + # for user removal of mongos-k8s router, we let the router remove itself + if self.state.is_role(MongoDBRoles.CONFIG_SERVER) and self.substrate == "k8s": + logger.info("K8s routers will remove themselves.") + managed_users.remove(username) + continue + + mongo.drop_user(username) + managed_users.remove(username) + self.state.app_peer_data.managed_users = managed_users + + def auto_delete_dbs(self, relation_name: str, relation_id: int | None) -> None: + """Delete unused DBs if configured to do so.""" + with MongoConnection(self.state.mongo_config) as mongo: + if not self.state.config.auto_delete: + return + + relations = self.model.relations[relation_name] + database_dbs = mongo.get_databases() + relation_dbs = set() + for relation in relations: + if relation.id == relation_id: + continue + data_interface = DatabaseProviderData(self.model, relation.name) + database = data_interface.fetch_relation_field(relation.id, "database") + if database is not None: + relation_dbs.add(database) + for database in database_dbs - relation_dbs: + logger.info("Drop database: %s", database) + mongo.drop_database(database) + + def get_config( + self, + username: str, + password: str | None, + data_inteface: DatabaseProviderData, + relation_id: int, + ) -> MongoConfiguration: + """.""" + password = password or data_inteface.fetch_my_relation_field(relation_id, "password") + if not password: + password = self.workload.generate_password() + data_inteface.set_credentials(relation_id, username, password) + database_name = data_inteface.fetch_relation_field(relation_id, "database") + roles = data_inteface.fetch_relation_field(relation_id, "extra-user-roles") + if not database_name or not roles: + raise Exception("Missing database name or roles.") + mongo_args = { + "database": database_name, + "username": username, + "password": password, + "hosts": self.state.app_hosts, + "roles": roles, + "tls_external": False, + "tls_internal": False, + "port": self.state.host_port, + } + if not self.state.is_role(MongoDBRoles.MONGOS): + mongo_args["replset"] = self.state.app_peer_data.replica_set + return from_dict(data_class=MongoConfiguration, data=mongo_args) + + def set_election_priority(self, priority: int): + """Sets the election priority.""" + with MongoConnection(self.state.mongo_config) as mongo: + mongo.set_replicaset_election_priority(priority=priority) + + def process_unremoved_units(self) -> None: + """Remove units from replica set.""" + with MongoConnection(self.state.mongo_config) as mongo: + try: + replset_members = mongo.get_replset_members() + for member in replset_members - mongo.config.hosts: + logger.debug("Removing %s from replica set", member) + mongo.remove_replset_member(member) + except NotReadyError: + logger.info("Deferring process_unremoved_units: another member is syncing") + raise + except PyMongoError as e: + logger.error("Deferring process_unremoved_units: error=%r", e) + raise + + def remove_replset_member(self) -> None: + """Remove a unit from the replicaset.""" + with MongoConnection(self.state.mongo_config) as mongo: + mongo.remove_replset_member(self.state.unit_peer_data.host) + + def process_added_units(self) -> None: + """Adds units to replica set.""" + with MongoConnection(self.state.mongo_config) as mongo: + replset_members = mongo.get_replset_members() + config_hosts = mongo.config.hosts + # compare set of mongod replica set members and juju hosts to avoid the unnecessary + # reconfiguration. + if replset_members == config_hosts: + return + + for member in config_hosts - replset_members: + logger.debug("Adding %s to replica set", member) + if not self.mongod_ready(uri=member): + logger.debug("not reconfiguring: %s is not ready yet.", member) + raise NotReadyError + mongo.add_replset_member(member) + + def _get_relation_from_username(self, username: str, relation_name: str) -> Relation: + """Parse relation ID from a username and return Relation object.""" + match = re.match(r"^relation-(\d+)$", username) + # We generated username in `_get_users_from_relations` + # func and passed it into this function later. + # It means the username here MUST match regex. + if not match: + raise Exception("No relation match") + relation_id = int(match.group(1)) + logger.debug("Relation ID: %s", relation_id) + relation = self.model.get_relation(relation_name, relation_id) + if not relation: + raise Exception("No relation match") + return relation + + # Keep for memory for now. + # def get_relation_name(self): + # """Returns the name of the relation to use.""" + # if self.state.is_role(MongoDBRoles.CONFIG_SERVER): + # return RelationNames.CLUSTER + # if self.state.is_role(MongoDBRoles.MONGOS): + # return RelationNames.MONGOS_PROXY + # return RelationNames.DATABASE diff --git a/single_kernel_mongo/managers/mongodb_operator.py b/single_kernel_mongo/managers/mongodb_operator.py new file mode 100644 index 00000000..7b22389b --- /dev/null +++ b/single_kernel_mongo/managers/mongodb_operator.py @@ -0,0 +1,564 @@ +#!/usr/bin/python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Operator for MongoDB Related Charms.""" + +from __future__ import annotations + +import logging +from collections.abc import Iterable +from pathlib import Path +from typing import TYPE_CHECKING + +from ops.model import Container, Unit +from pymongo.errors import PyMongoError +from tenacity import Retrying, stop_after_attempt, wait_fixed +from typing_extensions import override + +from single_kernel_mongo.config.literals import ( + CONTAINER, + MAX_PASSWORD_LENGTH, + CharmRole, + MongoPorts, + Scope, + Substrates, +) +from single_kernel_mongo.config.logrotate_config import LogRotateConfig +from single_kernel_mongo.config.relations import RelationNames +from single_kernel_mongo.config.roles import K8S_MONGO, VM_MONGO +from single_kernel_mongo.core.operator import OperatorProtocol +from single_kernel_mongo.core.secrets import generate_secret_label +from single_kernel_mongo.core.structured_config import MongoDBRoles +from single_kernel_mongo.events.backups import INVALID_S3_INTEGRATION_STATUS, BackupEventsHandler +from single_kernel_mongo.exceptions import ( + ContainerNotReadyError, + SetPasswordError, + ShardingMigrationError, + UpgradeInProgressError, + WorkloadExecError, + WorkloadNotReadyError, + WorkloadServiceError, +) +from single_kernel_mongo.managers.backups import BackupManager +from single_kernel_mongo.managers.config import ( + CommonConfigManager, + LogRotateConfigManager, + MongoDBConfigManager, + MongoDBExporterConfigManager, + MongosConfigManager, +) +from single_kernel_mongo.managers.mongo import MongoManager +from single_kernel_mongo.managers.tls import TLSManager +from single_kernel_mongo.state.charm_state import CharmState +from single_kernel_mongo.utils.mongo_connection import NotReadyError +from single_kernel_mongo.utils.mongodb_users import ( + BackupUser, + MonitorUser, + OperatorUser, + get_user_from_username, +) +from single_kernel_mongo.workload import ( + get_mongodb_workload_for_substrate, + get_mongos_workload_for_substrate, +) + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + + +logger = logging.getLogger(__name__) + + +class MongoDBOperator(OperatorProtocol): + """Operator for MongoDB Related Charms.""" + + name = CharmRole.MONGODB + + def __init__(self, charm: AbstractMongoCharm): + super().__init__(charm, self.name.value) + self.charm = charm + self.config = charm.parsed_config + self.substrate: Substrates = self.charm.substrate + self.role = VM_MONGO if self.substrate == "vm" else K8S_MONGO + self.state = CharmState(self.charm, self.role, self.name) + container = self.charm.unit.get_container(CONTAINER) if self.substrate == "k8s" else None + + # Defined workloads and configs + self.define_workloads_and_config_managers(container) + + self.backup_manager = BackupManager(self.charm, self.substrate, self.state, container) + self.tls_manager = TLSManager(self.charm, self.workload, self.state, self.substrate) + self.mongo_manager = MongoManager(self.charm, self.workload, self.state, self.substrate) + + self.backup_events = BackupEventsHandler(self) + + def define_workloads_and_config_managers(self, container: Container | None) -> None: + """Export all workload and config definition for readability.""" + # BEGIN: Define workloads. + self.workload = get_mongodb_workload_for_substrate(self.substrate)(container=container) + self.mongos_workload = get_mongos_workload_for_substrate(self.substrate)( + container=container + ) + # END: Define workloads + + # BEGIN Define config managers + self.config_manager = MongoDBConfigManager( + self.config, + self.state, + self.workload, + ) + self.mongos_config_manager = MongosConfigManager( + self.config, + self.mongos_workload, + self.state, + ) + self.logrotate_config_manager = LogRotateConfigManager( + self.substrate, + self.config, + self.state, + container, + ) + self.mongodb_exporter_config_manager = MongoDBExporterConfigManager( + self.substrate, + self.config, + self.state, + container, + ) + # END: Define config managers + + @property + def config_managers(self) -> Iterable[CommonConfigManager]: + """All config managers for iteration.""" + return ( + self.config_manager, + self.mongos_config_manager, + self.backup_manager, + self.logrotate_config_manager, + self.mongodb_exporter_config_manager, + ) + + # BEGIN: Handlers. + + @override + def on_install(self) -> None: + """Handler on install.""" + if not self.workload.snap_present: + return + if not self.workload.container_can_connect: + raise ContainerNotReadyError + self.charm.unit.set_workload_version(self.workload.get_version()) + + # Truncate the file. + self.workload.write(self.workload.paths.config_file, "") + + for config_manager in self.config_managers: + config_manager.set_environment() + + self.logrotate_config_manager.connect() + + @override + def on_start(self) -> None: + """Handler on start.""" + if not self.workload.container_can_connect: + logger.debug("mongod container is not ready yet.") + raise ContainerNotReadyError + + if any(not storage for storage in self.model.storages.values()): + logger.debug("Storages not attached yet.") + raise ContainerNotReadyError + + self.instantiate_keyfile() + self.tls_manager.push_tls_files_to_workload() + self.handle_licenses() + self.set_permissions() + + try: + logger.info("Starting MongoDB.") + self.charm.status_manager.to_maintenance("starting MongoDB") + self.start_charm_services() + self.charm.status_manager.to_active(None) + except WorkloadServiceError as e: + logger.error(f"An exception occurred when starting mongod agent, error: {e}.") + self.charm.status_manager.to_blocked("couldn't start MongoDB") + return + + # Open ports: + try: + self.open_ports() + except WorkloadExecError: + self.charm.status_manager.to_blocked("failed to open TCP port for MongoDB") + raise + + # FIXME: Do we need to check for socket path here? + + if not self.mongo_manager.mongod_ready(): + self.charm.status_manager.to_waiting("waiting for MongoDB to start") + raise WorkloadNotReadyError + + self.charm.status_manager.to_active(None) + + try: + self.mongodb_exporter_config_manager.connect() + except WorkloadServiceError: + self.charm.status_manager.to_blocked("couldn't start mongodb exporter") + return + + try: + self.backup_manager.connect() + except WorkloadServiceError: + self.charm.status_manager.to_blocked("couldn't start pbm-agent") + return + + self._initialise_replica_set() + + @override + def on_stop(self) -> None: + """Handler for the stop event.""" + ... + + def on_config_changed(self) -> None: + """Listen to changes in application configuration. + + To prevent a user from migrating a cluster, and causing the component to become + unresponsive therefore causing a cluster failure, error the component. This prevents it + from executing other hooks with a new role. + """ + if self.state.is_role(self.config.role): + return + if self.state.upgrade_in_progress: + logger.warning( + "Changing config options is not permitted during an upgrade. The charm may be in a broken, unrecoverable state." + ) + raise UpgradeInProgressError + + logger.error( + f"cluster migration currently not supported, cannot change from {self.config.role} to {self.state.role}" + ) + raise ShardingMigrationError( + f"Migration of sharding components not permitted, revert config role to {self.role}" + ) + + @override + def on_leader_elected(self) -> None: + """Handles the leader elected event. + + Generates the keyfile and users credentials. + """ + if not self.state.app_peer_data.keyfile: + self.state.app_peer_data.keyfile = self.workload.generate_keyfile() + + # Set the password for the Operator User. + if not self.state.app_peer_data.get_user_password(OperatorUser.username): + self.state.app_peer_data.set_user_password( + OperatorUser.username, self.workload.generate_password() + ) + + # Set the password for the Monitor User. + if not self.state.app_peer_data.get_user_password(MonitorUser.username): + self.state.app_peer_data.set_user_password( + MonitorUser.username, self.workload.generate_password() + ) + + # Set the password for the Backup User. + if not self.state.app_peer_data.get_user_password(BackupUser.username): + self.state.app_peer_data.set_user_password( + BackupUser.username, self.workload.generate_password() + ) + + @override + def on_relation_joined(self) -> None: + """Handle relation joined events.""" + if not self.charm.unit.is_leader(): + return + if self.state.upgrade_in_progress: + logger.warning( + "Adding replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state" + ) + raise UpgradeInProgressError + + self.on_relation_changed() + self.update_related_hosts() + + def on_relation_changed(self) -> None: + """Handle relation changed events.""" + self.mongodb_exporter_config_manager.connect() + self.backup_manager.connect() + + if not self.charm.unit.is_leader() or not self.state.db_initialised: + return + + try: + self.mongo_manager.process_added_units() + except (NotReadyError, PyMongoError) as e: + logger.error(f"Not reconfiguring: error={e}") + self.charm.status_manager.to_waiting("waiting to reconfigure replica set") + raise + self.charm.status_manager.to_active(None) + + @override + def on_secret_changed(self, secret_label: str, secret_id: str) -> None: + """Handles secrets changes event. + + When user run set-password action, juju leader changes the password inside the database + and inside the secret object. This action runs the restart for monitoring tool and + for backup tool on non-leader units to keep them working with MongoDB. The same workflow + occurs on TLS certs change. + """ + if generate_secret_label(self.charm.app.name, Scope.APP) == secret_label: + scope = Scope.APP + elif generate_secret_label(self.charm.app.name, Scope.UNIT) == secret_label: + scope = Scope.UNIT + else: + logging.debug("Secret %s changed, but it's unknown", secret_id) + return + logging.debug("Secret %s for scope %s changed, refreshing", secret_id, scope) + self.state.secrets.get(scope) + + self.mongodb_exporter_config_manager.connect() + self.backup_manager.connect() + + @override + def on_relation_departed(self, departing_unit: Unit | None) -> None: + """Handles the relation departed events.""" + if not self.charm.unit.is_leader() or departing_unit == self.charm.unit: + return + if self.state.upgrade_in_progress: + # do not defer or return here, if a user removes a unit, the config will be incorrect + # and lead to MongoDB reporting that the replica set is unhealthy, we should make an + # attempt to fix the replica set configuration even if an upgrade is occurring. + logger.warning( + "Removing replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state" + ) + self.update_hosts() + + @override + def on_storage_attached(self) -> None: + """Handler for `storage_attached` event. + + This should handle fixing the permissions for the data dir. + """ + if self.substrate == "vm": + self.workload.exec(["chmod", "-R", "770", f"{self.workload.paths.common_path}"]) + self.workload.exec( + [ + "chown", + "-R", + f"{self.workload.users.user}:{self.workload.users.group}", + f"{self.workload.paths.common_path}", + ] + ) + + @override + def on_update_status(self) -> None: + """Status update Handler.""" + if not self.backup_manager.is_valid_s3_integration(): + self.charm.status_manager.to_blocked(INVALID_S3_INTEGRATION_STATUS) + return + # TODO: Cluster integration status + Cluster Mismatch revision. + if not self.state.db_initialised: + return + + # TODO: TLS + Shard check. + + if not self.mongo_manager.mongod_ready(): + self.charm.status_manager.to_waiting("Waiting for MongoDB to start") + + self.perform_self_healing() + + self.charm.status_manager.to_active(None) + # TODO: Process statuses. + + @override + def on_storage_detaching(self) -> None: + """Before storage detaches, allow removing unit to remove itself from the set. + + If the removing unit is primary also allow it to step down and elect another unit as + primary while it still has access to its storage. + """ + if self.state.upgrade_in_progress: + # We cannot defer and prevent a user from removing a unit, log a warning instead. + logger.warning( + "Removing replicas during an upgrade is not supported. The charm may be in a broken, unrecoverable state" + ) + # A single replica cannot step down as primary and we cannot reconfigure the replica set to + # have 0 members. + # TODO: + # if self._is_removing_last_replica: + # pass + + try: + # retries over a period of 10 minutes in an attempt to resolve race conditions it is + # not possible to defer in storage detached. + logger.debug("Removing %s from replica set", self.state.unit_peer_data.host) + for attempt in Retrying( + stop=stop_after_attempt(600), + wait=wait_fixed(1), + reraise=True, + ): + with attempt: + # remove_replset_member retries for 60 seconds + self.mongo_manager.remove_replset_member() + except NotReadyError: + logger.info( + "Failed to remove %s from replica set, another member is syncing", + self.charm.unit.name, + ) + except PyMongoError as e: + logger.error("Failed to remove %s from replica set, error=%r", self.charm.unit.name, e) + + def on_set_password_action(self, username: str, password: str | None = None) -> tuple[str, str]: + """Handler for the set password action.""" + user = get_user_from_username(username) + new_password = password or self.workload.generate_password() + if len(new_password) > MAX_PASSWORD_LENGTH: + raise SetPasswordError( + f"Password cannot be longer than {MAX_PASSWORD_LENGTH} characters." + ) + + secret_id = self.mongo_manager.set_user_password(user, new_password) + if user == BackupUser: + # Update and restart PBM Agent. + self.backup_manager.connect() + if user == MonitorUser: + # Update and restart mongodb exporter. + self.mongodb_exporter_config_manager.connect() + # Rotate password. + if user in (OperatorUser, BackupUser): + pass + + return new_password, secret_id + + def on_get_password_action(self, username: str) -> str: + """Handler for the get password action.""" + return self.get_password(username) + + # END: Handlers. + + def get_password(self, username: str) -> str: + """Gets the password for the relevant username.""" + user = get_user_from_username(username) + return self.state.secrets.get_for_key(Scope.APP, user.password_key_name) or "" + + def perform_self_healing(self) -> None: + """Reconfigures the replica set if necessary. + + Incidents such as network cuts can lead to new IP addresses and therefore will require a + reconfigure. Especially in the case that the leader's IP address changed, it will not + receive a relation event. + """ + if not self.charm.unit.is_leader(): + logger.debug("Only the leader can perform reconfigurations to the replica set.") + return + + self.update_hosts() + self.on_relation_changed() + # make sure all nodes in the replica set have the same priority for re-election. This is + # necessary in the case that pre-upgrade hook fails to reset the priority of election for + # cluster nodes. + self.mongo_manager.set_election_priority(priority=1) + + def update_hosts(self): + """Update the replica set hosts and remove any unremoved replica from the config.""" + if not self.state.db_initialised: + return + self.mongo_manager.process_unremoved_units() + self.state.app_peer_data.replica_set_hosts = list(self.state.app_hosts) + self.update_related_hosts() + + def update_related_hosts(self): + """Update the app relations that need to be made aware of the new set of hosts.""" + if self.state.is_role(MongoDBRoles.REPLICATION): + self.mongo_manager.update_app_relation_data(RelationNames.DATABASE) + # TODO: Update related hosts for config server , cluster. + + def open_ports(self) -> None: + """Open ports on the workload. + + VM-only. + """ + if self.substrate != "vm": + return + ports = [MongoPorts.MONGODB_PORT] + if self.state.is_role(MongoDBRoles.CONFIG_SERVER): + ports.append(MongoPorts.MONGOS_PORT) + + try: + for port in ports: + self.workload.exec(["open-port", f"{port}/TCP"]) + except WorkloadExecError as e: + logger.exception(f"Failed to open port: {e}") + raise + + def start_charm_services(self): + """Start the relevant services.""" + self.workload.start() + if self.state.is_role(MongoDBRoles.CONFIG_SERVER): + self.mongos_workload.start() + + def stop_charm_services(self): + """Start the relevant services.""" + self.workload.stop() + if self.state.is_role(MongoDBRoles.CONFIG_SERVER): + self.mongos_workload.stop() + + def instantiate_keyfile(self): + """Instantiate the keyfile.""" + if not (keyfile := self.state.app_peer_data.keyfile): + raise Exception("Waiting for leader unit to generate keyfile contents") + + self.workload.write(self.workload.paths.keyfile, keyfile) + + def handle_licenses(self) -> None: + """Pull / Push licenses.""" + licenses = [ + "snap", + "mongodb-exporter", + "percona-backup-mongodb", + "percona-server", + ] + prefix = Path("./src/licenses") if self.substrate == "vm" else Path("./") + # Create the directory if needed. + if self.substrate == "vm": + prefix.mkdir(exist_ok=True) + file = Path("./LICENSE") + dst = prefix / "LICENSE-charm" + self.workload.copy_to_unit(file, dst) + else: + name = "LICENSE-rock" + file = Path(f"{self.workload.paths.licenses_path}/{name}") + dst = prefix / name + if not dst.is_file(): + self.workload.copy_to_unit(file, dst) + + for license in licenses: + name = f"LICENSE-{license}" + file = Path(f"{self.workload.paths.licenses_path}/{name}") + dst = prefix / name + if not dst.is_file(): + self.workload.copy_to_unit(file, dst) + + def set_permissions(self) -> None: + """Ensure directories and make permissions.""" + self.workload.mkdir(LogRotateConfig.log_status_dir, make_parents=True) + + for path in ( + self.workload.paths.data_path, + self.workload.paths.logs_path, + LogRotateConfig.log_status_dir, + ): + self.workload.exec( + [ + "chown", + "-R", + f"{self.workload.users.user}:{self.workload.users.group}", + f"{path}", + ] + ) + + def _initialise_replica_set(self): + if not self.model.unit.is_leader(): + return + self.mongo_manager.initialise_replica_set() + self.mongo_manager.initialise_users() + self.state.app_peer_data.db_initialised = True diff --git a/single_kernel_mongo/managers/tls.py b/single_kernel_mongo/managers/tls.py new file mode 100644 index 00000000..7879885a --- /dev/null +++ b/single_kernel_mongo/managers/tls.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The TLS Manager. + +Handles MongoDB TLS Files. +""" + +from __future__ import annotations + +import json +import logging +import socket +from typing import TYPE_CHECKING, TypedDict + +from cryptography import x509 +from cryptography.hazmat.backends import default_backend + +from single_kernel_mongo.config.literals import Scope, Substrates +from single_kernel_mongo.core.structured_config import MongoDBRoles +from single_kernel_mongo.lib.charms.tls_certificates_interface.v3.tls_certificates import ( + generate_csr, + generate_private_key, +) +from single_kernel_mongo.state.charm_state import CharmState +from single_kernel_mongo.state.tls_state import ( + SECRET_CA_LABEL, + SECRET_CERT_LABEL, + SECRET_CHAIN_LABEL, + SECRET_CSR_LABEL, + SECRET_KEY_LABEL, + WAIT_CERT_UPDATE, +) +from single_kernel_mongo.utils.helpers import parse_tls_file +from single_kernel_mongo.workload.mongodb_workload import MongoDBWorkload + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + + +class Sans(TypedDict): + """A Typed Dict for a Sans.""" + + sans_ip: list[str] + sans_dns: list[str] + + +logger = logging.getLogger(__name__) + + +class TLSManager: + """Manager for building necessary files for mongodb.""" + + def __init__( + self, + charm: AbstractMongoCharm, + workload: MongoDBWorkload, + state: CharmState, + substrate: Substrates, + ) -> None: + self.charm = charm + self.workload = workload + self.state = state + self.substrate = substrate + + def generate_certificate_request(self, param: str | None, internal: bool): + """Generate a TLS Certificate request.""" + key: bytes + if param is None: + key = generate_private_key() + else: + key = parse_tls_file(param) + + sans = self.get_new_sans() + csr = generate_csr( + private_key=key, + subject=self._get_subject_name(), + organization=self._get_subject_name(), + sans=sans["sans_dns"], + sans_ip=sans["sans_ip"], + ) + self.set_tls_secret(internal, SECRET_KEY_LABEL, key.decode("utf-8")) + self.set_tls_secret(internal, SECRET_CSR_LABEL, csr.decode("utf-8")) + self.set_tls_secret(internal, SECRET_CERT_LABEL, None) + + label = "int" if internal else "ext" + + self.state.unit_peer_data.update({f"{label}_certs_subject": self._get_subject_name()}) + + def generate_new_csr(self, internal: bool) -> tuple[bytes, bytes]: + """Requests the renewal of a certificate. + + Returns: + old_csr: The old certificate signing request. + new_csr: the new_certificate signing request. + """ + key_str = self.get_tls_secret(internal, SECRET_KEY_LABEL) + old_csr_str = self.get_tls_secret(internal, SECRET_CSR_LABEL) + if not key_str or not old_csr_str: + raise Exception("Trying to renew a non existent certificate. Please fix.") + + key = key_str.encode("utf-8") + old_csr = old_csr_str.encode("utf-8") + sans = self.get_new_sans() + new_csr = generate_csr( + private_key=key, + subject=self._get_subject_name(), + organization=self._get_subject_name(), + sans=sans["sans_dns"], + sans_ip=sans["sans_ip"], + ) + logger.debug("Requesting a certificate renewal.") + + self.set_tls_secret(internal, SECRET_CSR_LABEL, new_csr.decode("utf-8")) + self.set_waiting_for_cert_to_update(waiting=True, internal=internal) + return old_csr, new_csr + + def get_new_sans(self) -> Sans: + """Create a list of DNS names for a MongoDB unit. + + Returns: + A list representing the hostnames of the MongoDB unit. + """ + unit_id = self.charm.unit.name.split("/")[1] + + sans = Sans( + sans_dns=[ + f"{self.charm.app.name}-{unit_id}", + socket.getfqdn(), + "localhost", + f"{self.charm.app.name}-{unit_id}.{self.charm.app.name}-endpoints", + ], + sans_ip=[str(self.state.bind_address)], + ) + + if ( + self.state.is_role(MongoDBRoles.MONGOS) + and self.state.app_peer_data.external_connectivity + ): + sans["sans_ip"].append(self.state.unit_peer_data.host) + + return sans + + def get_current_sans(self, internal: bool) -> Sans | None: + """Gets the current SANs for the unit cert.""" + # if unit has no certificates do not proceed. + if not self.state.tls.is_tls_enabled(internal=internal): + return None + + pem_file = self.get_tls_secret(internal, SECRET_CERT_LABEL) + if not pem_file: + logger.info("No PEM file but TLS enabled.") + raise Exception("No PEM file but TLS enabled. Please, fix.") + try: + cert = x509.load_pem_x509_certificate(pem_file.encode(), default_backend()) + sans = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value + sans_ip = [str(san) for san in sans.get_values_for_type(x509.IPAddress)] + sans_dns = [str(san) for san in sans.get_values_for_type(x509.DNSName)] + except x509.ExtensionNotFound: + sans_ip = [] + sans_dns = [] + + return Sans(sans_ip=sorted(sans_ip), sans_dns=sorted(sans_dns)) + + def get_tls_files(self, internal: bool) -> tuple[str | None, str | None]: + """Prepare TLS files in special MongoDB way. + + MongoDB needs two files: + โ€” CA file should have a full chain. + โ€” PEM file should have private key and certificate without certificate chain. + """ + scope = "internal" if internal else "external" + if not self.state.tls.is_tls_enabled(internal): + logging.debug(f"TLS disabled for {scope}") + return None, None + logging.debug(f"TLS *enabled* for {scope}, fetching data for CA and PEM files ") + + ca = self.get_tls_secret(internal, SECRET_CA_LABEL) + chain = self.get_tls_secret(internal, SECRET_CHAIN_LABEL) + ca_file = chain if chain else ca + + key = self.get_tls_secret(internal, SECRET_KEY_LABEL) + cert = self.get_tls_secret(internal, SECRET_CERT_LABEL) + pem_file = key + if cert: + pem_file = key + "\n" + cert if key else cert + + return ca_file, pem_file + + def disable_certificates_for_unit(self): + """Disables the certificates on relation broken.""" + for internal in [True, False]: + self.set_tls_secret(internal, SECRET_CA_LABEL, None) + self.set_tls_secret(internal, SECRET_CERT_LABEL, None) + self.set_tls_secret(internal, SECRET_CHAIN_LABEL, None) + + if self.state.is_role(MongoDBRoles.CONFIG_SERVER): + # self.state.cluster.update_ca_secret(new_ca=None) + # self.state.config_server.update_ca_secret(new_ca=None) + pass + + self.charm.status_manager.to_maintenance("Disabling TLS") + self.delete_certificates_from_workload() + self.workload.restart() + self.charm.status_manager.to_active(None) + + def delete_certificates_from_workload(self): + """Deletes the certificates from the workload.""" + logger.info("Deleting TLS certificate from VM") + + for file in self.workload.paths.tls_files: + self.workload.delete(file) + + def push_tls_files_to_workload(self) -> None: + """Pushes the TLS files on the workload.""" + external_ca, external_pem = self.get_tls_files(internal=False) + internal_ca, internal_pem = self.get_tls_files(internal=True) + if external_ca is not None: + self.workload.write(self.workload.paths.ext_ca_file, external_ca) + if external_pem is not None: + self.workload.write(self.workload.paths.ext_pem_file, external_pem) + if internal_ca is not None: + self.workload.write(self.workload.paths.int_ca_file, internal_ca) + if internal_pem is not None: + self.workload.write(self.workload.paths.int_pem_file, internal_pem) + + def set_certificates( + self, + certificate_signing_request: str, + secret_chain: list[str] | None, + certificate: str | None, + ca: str | None, + ): + """Sets the certificates.""" + int_csr = self.get_tls_secret(internal=True, label_name=SECRET_CSR_LABEL) + ext_csr = self.get_tls_secret(internal=False, label_name=SECRET_CSR_LABEL) + if ext_csr and certificate_signing_request.rstrip() == ext_csr.rstrip(): + logger.debug("The external TLS certificate available.") + internal = False + elif int_csr and certificate_signing_request.rstrip() == int_csr.rstrip(): + logger.debug("The internal TLS certificate available.") + internal = True + else: + logger.error("An unknown certificate is available -- ignoring.") + return + + self.set_tls_secret( + internal, + SECRET_CHAIN_LABEL, + "\n".join(secret_chain) if secret_chain is not None else None, + ) + self.set_tls_secret(internal, SECRET_CERT_LABEL, certificate) + self.set_tls_secret(internal, SECRET_CA_LABEL, ca) + self.set_waiting_for_cert_to_update(internal=internal, waiting=False) + + def set_waiting_for_cert_to_update(self, internal: bool, waiting: bool) -> None: + """Sets the databag.""" + scope = "int" if internal else "ext" + label_name = f"{scope}-{WAIT_CERT_UPDATE}" + self.state.unit_peer_data.update({label_name: json.dumps(waiting)}) + + def is_set_waiting_for_cert_to_update( + self, + internal: bool = False, + ) -> bool: + """Returns True if we are waiting for a cert to update.""" + scope = "int" if internal else "ext" + label_name = f"{scope}-{WAIT_CERT_UPDATE}" + + return json.loads(self.state.unit_peer_data.get(label_name) or "false") + + def is_waiting_for_both_certs(self) -> bool: + """Returns a boolean indicating whether additional certs are needed.""" + if not self.get_tls_secret(internal=True, label_name=SECRET_CERT_LABEL): + logger.debug("Waiting for internal certificate.") + return True + if not self.get_tls_secret(internal=False, label_name=SECRET_CERT_LABEL): + logger.debug("Waiting for external certificate.") + return True + + return False + + def _get_subject_name(self) -> str: + """Generate the subject name for CSR.""" + # In sharded MongoDB deployments it is a requirement that all subject names match across + # all cluster components. The config-server name is the source of truth across mongos and + # shard deployments. + if not self.state.is_role(MongoDBRoles.CONFIG_SERVER): + # until integrated with config-server use current app name as + # subject name + return self.state.config_server_name or self.charm.app.name + + return self.charm.app.name + + def set_tls_secret(self, internal: bool, label_name: str, contents: str | None) -> None: + """Sets TLS secret, based on whether or not it is related to internal connections.""" + scope = "int" if internal else "ext" + label_name = f"{scope}-{label_name}" + if not contents: + self.state.secrets.remove(Scope.UNIT, label_name) + return + self.state.secrets.set(label_name, contents, Scope.UNIT) + + def get_tls_secret(self, internal: bool, label_name: str) -> str | None: + """Gets TLS secret, based on whether or not it is related to internal connections.""" + scope = "int" if internal else "ext" + label_name = f"{scope}-{label_name}" + return self.state.secrets.get_for_key(Scope.UNIT, label_name) diff --git a/single_kernel_mongo/state/__init__.py b/single_kernel_mongo/state/__init__.py new file mode 100644 index 00000000..95235c00 --- /dev/null +++ b/single_kernel_mongo/state/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The charm state for mongo charms (databags + model information).""" diff --git a/single_kernel_mongo/state/abstract_state.py b/single_kernel_mongo/state/abstract_state.py new file mode 100644 index 00000000..82a31428 --- /dev/null +++ b/single_kernel_mongo/state/abstract_state.py @@ -0,0 +1,56 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The charm state for mongo charms (databags + model information).""" + +from typing import Generic, TypeVar + +from ops.model import Application, Relation, Unit + +from single_kernel_mongo.config.literals import Substrates +from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import ( # type: ignore + Data, +) + +PData = TypeVar("PData", bound=Data, covariant=True) + + +class AbstractRelationState(Generic[PData]): + """Relation state object.""" + + def __init__( + self, + relation: Relation | None, + data_interface: PData, + component: Unit | Application | None, + substrate: Substrates | None = None, + ): + self.relation = relation + self.data_interface = data_interface + self.component = component + self.substrate = substrate + self.relation_data = self.data_interface.as_dict(self.relation.id) if self.relation else {} + + def __bool__(self) -> bool: + """Boolean evaluation based on the existence of self.relation.""" + try: + return bool(self.relation) + except AttributeError: + return False + + def update(self, items: dict[str, str]) -> None: + """Writes to relation_data.""" + delete_fields = [key for key in items if not items[key]] + update_content = {k: items[k] for k in items if k not in delete_fields} + + self.relation_data.update(update_content) + + for field in delete_fields: + del self.relation_data[field] + + def get(self, key: str) -> str: + """Gets a key.""" + if not self.relation: + return "" + return ( + self.data_interface.fetch_relation_field(relation_id=self.relation.id, field=key) or "" + ) diff --git a/single_kernel_mongo/state/app_peer_state.py b/single_kernel_mongo/state/app_peer_state.py new file mode 100644 index 00000000..67b5f22e --- /dev/null +++ b/single_kernel_mongo/state/app_peer_state.py @@ -0,0 +1,172 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The peer relation databag.""" + +import json +from enum import Enum + +from ops.model import Application, Relation +from typing_extensions import override + +from single_kernel_mongo.config.literals import SECRETS_APP +from single_kernel_mongo.core.structured_config import MongoDBRoles +from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import ( # type: ignore + DataPeerData, +) +from single_kernel_mongo.state.abstract_state import AbstractRelationState + + +class AppPeerDataKeys(str, Enum): + """Enum to access the app peer data keys.""" + + managed_users_key = "managed-users-key" + db_initialised = "db_initialised" + role = "role" + replica_set_hosts = "replica_set_hosts" + keyfile = "keyfile" + external_connectivity = "external-connectivity" + + +class AppPeerReplicaSet(AbstractRelationState[DataPeerData]): + """State collection for replicaset relation.""" + + component: Application + + def __init__( + self, + relation: Relation | None, + data_interface: DataPeerData, + component: Application, + role: MongoDBRoles, + ): + super().__init__(relation, data_interface, component) + self.data_interface = data_interface + self._role = role + + @override + def update(self, items: dict[str, str]) -> None: + """Overridden update to allow for same interface, but writing to local app bag.""" + if not self.relation: + return + + for key, value in items.items(): + # note: relation- check accounts for dynamically created secrets + if key in SECRETS_APP or key.startswith("relation-"): + if value: + self.data_interface.set_secret(self.relation.id, key, value) + else: + self.data_interface.delete_secret(self.relation.id, key) + else: + self.data_interface.update_relation_data(self.relation.id, {key: value}) + + @property + def role(self) -> MongoDBRoles: + """The role. + + Either from the app databag or from the default from config. + """ + databag_role: str = str(self.relation_data.get(AppPeerDataKeys.role.value)) + if not self.relation or not databag_role: + return self._role + return MongoDBRoles(databag_role) + + @role.setter + def role(self, value: str) -> None: + self.update({"role": value}) + + def is_role(self, role_name: str) -> bool: + """Checks if the application is running in the provided role.""" + return self.role == role_name + + @property + def db_initialised(self) -> bool: + """Whether the db is initialised or not yet.""" + if not self.relation: + return False + return json.loads(self.relation_data.get(AppPeerDataKeys.db_initialised.value, "false")) + + @db_initialised.setter + def db_initialised(self, value: bool): + if isinstance(value, bool): + self.update({AppPeerDataKeys.db_initialised.value: json.dumps(value)}) + else: + raise ValueError( + f"'db_initialised' must be a boolean value. Provided: {value} is of type {type(value)}" + ) + + @property + def replica_set_hosts(self) -> list[str]: + """Returns the stored list of replica set hosts.""" + if not self.relation: + return [] + + return json.loads(self.relation_data.get(AppPeerDataKeys.replica_set_hosts.value, "[]")) + + @replica_set_hosts.setter + def replica_set_hosts(self, value: list[str]) -> None: + self.update({AppPeerDataKeys.replica_set_hosts.value: json.dumps(value)}) + + @property + def managed_users(self) -> set[str]: + """Returns the stored set of managed-users.""" + if not self.relation: + return set() + + return set( + json.loads(self.relation_data.get(AppPeerDataKeys.managed_users_key.value, "[]")) + ) + + @managed_users.setter + def managed_users(self, value: set[str]) -> None: + """Stores the managed users set.""" + self.update({AppPeerDataKeys.managed_users_key.value: json.dumps(value)}) + + @property + def keyfile(self) -> str | None: + """Gets the keyfile from the app databag.""" + if not self.relation: + return None + + return self.relation_data.get(AppPeerDataKeys.keyfile.value, "") + + @keyfile.setter + def keyfile(self, keyfile: str): + """Stores the keyfile in the app databag.""" + self.update({AppPeerDataKeys.keyfile.value: keyfile}) + + def set_user_created(self, user: str): + """Stores the flag stating if user was created.""" + self.update({f"{user}-user-created": json.dumps(True)}) + + def is_user_created(self, user: str) -> bool: + """Has the user already been created?""" + return json.loads(self.relation_data.get(f"{user}-user-created", "false")) + + def set_user_password(self, user: str, password: str): + """Stores a user password in the app databag.""" + self.update({f"{user}-password": password}) + + def get_user_password(self, user: str) -> str: + """Returns the user password.""" + return self.relation_data.get(f"{user}-password", "") + + @property + def replica_set(self) -> str: + """The replica set name.""" + return self.component.name + + @property + def external_connectivity(self) -> bool: + """Is the external connectivity tag in the databag?""" + return json.loads( + self.relation_data.get(AppPeerDataKeys.external_connectivity.value, "false") + ) + + @external_connectivity.setter + def external_connectivity(self, value: bool) -> None: + if isinstance(value, bool): + self.update({AppPeerDataKeys.external_connectivity.value: json.dumps(value)}) + else: + raise ValueError( + f"'external-connectivity' must be a boolean value. Provided: {value} is of type {type(value)}" + ) diff --git a/single_kernel_mongo/state/charm_state.py b/single_kernel_mongo/state/charm_state.py new file mode 100644 index 00000000..ae73d7ec --- /dev/null +++ b/single_kernel_mongo/state/charm_state.py @@ -0,0 +1,363 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The general charm state.""" + +from __future__ import annotations + +import logging +from functools import cached_property +from ipaddress import IPv4Address, IPv6Address +from typing import TYPE_CHECKING, TypeVar + +from ops import Object, Relation, Unit + +from single_kernel_mongo.config.literals import ( + SECRETS_UNIT, + CharmRole, + MongoPorts, + Scope, + Substrates, +) +from single_kernel_mongo.config.relations import ( + ExternalRequirerRelations, + RelationNames, +) +from single_kernel_mongo.config.roles import Role +from single_kernel_mongo.core.secrets import SecretCache +from single_kernel_mongo.core.structured_config import MongoConfigModel, MongoDBRoles +from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import ( + DataPeerData, + DataPeerOtherUnitData, + DataPeerUnitData, +) +from single_kernel_mongo.state.app_peer_state import ( + AppPeerReplicaSet, +) +from single_kernel_mongo.state.cluster_state import ClusterState +from single_kernel_mongo.state.models import ClusterData +from single_kernel_mongo.state.tls_state import TLSState +from single_kernel_mongo.state.unit_peer_state import ( + UnitPeerReplicaSet, +) +from single_kernel_mongo.utils.mongo_config import MongoConfiguration +from single_kernel_mongo.utils.mongodb_users import ( + BackupUser, + MongoDBUser, + MonitorUser, + OperatorUser, + RoleNames, +) + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + +logger = logging.getLogger() + +T = TypeVar("T", bound=MongoConfigModel) + + +class CharmState(Object): + """All the charm states.""" + + def __init__(self, charm: AbstractMongoCharm[T], role: Role, charm_role: CharmRole): + super().__init__(parent=charm, key="charm_state") + self.role = role + self.charm_role = charm_role + self.config = charm.parsed_config + self.substrate: Substrates = self.role.substrate + self.secrets = SecretCache(charm) + + self.peer_app_interface = DataPeerData( + self.model, + relation_name=RelationNames.PEERS.value, + ) + self.peer_unit_interface = DataPeerUnitData( + self.model, + relation_name=RelationNames.PEERS.value, + additional_secret_fields=SECRETS_UNIT, + ) + + # BEGIN: Relations + + @property + def peer_relation(self) -> Relation | None: + """The replica set peer relation.""" + return self.model.get_relation(RelationNames.PEERS.value) + + @property + def peers_units(self) -> set[Unit]: + """Get peers units in a safe way.""" + if not self.peer_relation: + return set() + return self.peer_relation.units + + @property + def client_relations(self) -> set[Relation]: + """The set of client relations.""" + return set(self.model.relations[RelationNames.DATABASE]) + + @property + def cluster_relation(self) -> Relation | None: + """The Cluster relation.""" + return self.model.get_relation(RelationNames.CLUSTER) + + @property + def shard_relations(self) -> list[Relation]: + """The set of shard relations.""" + return self.model.relations[RelationNames.SHARDING] + + @property + def config_server_relation(self) -> Relation | None: + """The config-server relation if it exists.""" + return self.model.get_relation(RelationNames.CONFIG_SERVER) + + @property + def s3_relation(self) -> Relation | None: + """The S3 relation if it exists.""" + return self.model.get_relation(ExternalRequirerRelations.S3_CREDENTIALS) + + # END: Relations + + # BEGIN: State Accessors + + @property + def app_peer_data(self) -> AppPeerReplicaSet: + """The app peer relation data.""" + return AppPeerReplicaSet( + relation=self.peer_relation, + data_interface=self.peer_app_interface, + component=self.model.app, + role=self.config.role, + ) + + @property + def unit_peer_data(self) -> UnitPeerReplicaSet: + """This unit peer relation data.""" + return UnitPeerReplicaSet( + relation=self.peer_relation, + data_interface=self.peer_unit_interface, + component=self.model.unit, + substrate=self.substrate, + ) + + @property + def units(self) -> set[UnitPeerReplicaSet]: + """Grabs all units in the current peer relation, including this unit. + + Returns: + Set of UnitPeerReplicaSet in the current peer relation, including this unit. + """ + _units = set() + for unit, data_interface in self.peer_units_data_interfaces.items(): + _units.add( + UnitPeerReplicaSet( + relation=self.peer_relation, + data_interface=data_interface, + component=unit, + substrate=self.substrate, + ) + ) + _units.add(self.unit_peer_data) + + return _units + + @property + def cluster(self) -> ClusterState: + """The cluster state of the current running App.""" + return ClusterState( + relation=self.cluster_relation, + data_interface=ClusterData(self.model, RelationNames.CLUSTER), + component=self.model.app, + ) + + @property + def tls(self) -> TLSState: + """A view of the TLS status from the local unit databag.""" + return TLSState(relation=self.peer_relation, secrets=self.secrets) + + # END: State Accessors + + # BEGIN: Helpers + def is_role(self, role: MongoDBRoles) -> bool: + """Is the charm in the correct role?""" + return self.app_peer_data.role == role + + @property + def db_initialised(self) -> bool: + """Is the DB initialised?""" + return self.app_peer_data.db_initialised + + @property + def upgrade_in_progress(self) -> bool: + """Is the charm in upgrade?""" + return False + + @property + def bind_address(self) -> IPv4Address | IPv6Address | str: + """The network binding address from the peer relation.""" + bind_address = None + if self.peer_relation: + if binding := self.model.get_binding(self.peer_relation): + bind_address = binding.network.bind_address + + return bind_address or "" + + @property + def planned_units(self) -> int: + """Return the planned units for the charm.""" + return self.model.app.planned_units() + + @cached_property + def peer_units_data_interfaces(self) -> dict[Unit, DataPeerOtherUnitData]: + """The cluster peer relation.""" + return { + unit: DataPeerOtherUnitData( + model=self.model, unit=unit, relation_name=RelationNames.PEERS.value + ) + for unit in self.peers_units + } + + @property + def app_hosts(self) -> set[str]: + """Retrieve the hosts associated with MongoDB application.""" + return {unit.host for unit in self.units} + + @property + def internal_hosts(self) -> set[str]: + """Internal hosts for internal access.""" + return {unit.internal_address for unit in self.units} + + @property + def host_port(self) -> int: + """Retrieve the port associated with MongoDB application.""" + if self.is_role(MongoDBRoles.MONGOS): + if self.config["expose_external"]: + return self.unit_peer_data.node_port + return MongoPorts.MONGOS_PORT + return MongoPorts.MONGODB_PORT + + @property + def config_server_name(self) -> str | None: + """Gets the config server name.""" + if self.charm_role == CharmRole.MONGOS: + if self.cluster_relation: + return self.cluster_relation.app.name + return None + if self.is_role(MongoDBRoles.SHARD): + if self.shard_relations: + return self.shard_relations[0].app.name + return None + logger.info( + "Component %s is not a shard, cannot be integrated to a config-server.", self.role + ) + return None + + # END: Helpers + + # BEGIN: Configuration accessors + + def mongodb_config_for_user( + self, + user: MongoDBUser, + hosts: set[str] = set(), + replset: str | None = None, + standalone: bool = False, + ) -> MongoConfiguration: + """Returns a mongodb-specific MongoConfiguration object for the provided user. + + Either user.hosts or hosts should be a non empty set. + + Returns: + A MongoDB configuration object. + + Raises: + Exception if neither user.hosts nor hosts is non empty. + """ + if not user.hosts and not hosts: + raise Exception("Invalid call: no host in user nor as a parameter.") + return MongoConfiguration( + replset=replset or self.app_peer_data.replica_set, + database=user.database_name, + username=user.username, + password=self.app_peer_data.get_user_password(user.username), + hosts=hosts or user.hosts, + port=MongoPorts.MONGODB_PORT, + roles=user.roles, + tls_external=self.tls.external_enabled, + tls_internal=self.tls.internal_enabled, + standalone=standalone, + ) + + def mongos_config_for_user( + self, + user: MongoDBUser, + hosts: set[str] = set(), + ) -> MongoConfiguration: + """Returns a mongos-specific MongoConfiguration object for the provided user. + + Either user.hosts or hosts should be a non empty set. + + Returns: + A MongoDB configuration object. + + Raises: + Exception if neither user.hosts nor hosts is non empty. + """ + if not user.hosts and not hosts: + raise Exception("Invalid call: no host in user nor as a parameter.") + return MongoConfiguration( + database=user.database_name, + username=user.username, + password=self.app_peer_data.get_user_password(user.username), + hosts=hosts or user.hosts, + port=MongoPorts.MONGOS_PORT, + roles=user.roles, + tls_external=self.tls.external_enabled, + tls_internal=self.tls.internal_enabled, + ) + + @property + def backup_config(self) -> MongoConfiguration: + """Mongo Configuration for the backup user.""" + return self.mongodb_config_for_user(BackupUser, standalone=True) + + @property + def monitor_config(self) -> MongoConfiguration: + """Mongo Configuration for the monitoring user.""" + return self.mongodb_config_for_user(MonitorUser) + + @property + def operator_config(self) -> MongoConfiguration: + """Mongo Configuration for the operator user.""" + return self.mongodb_config_for_user(OperatorUser, hosts=self.app_hosts) + + @property + def mongos_config(self) -> MongoConfiguration: + """Mongos Configuration for the mongos user.""" + username = self.secrets.get_for_key(Scope.APP, key="username") + password = self.secrets.get_for_key(Scope.APP, key="password") + if not username or not password: + raise Exception("Missing credentials.") + + return MongoConfiguration( + database=f"{self.model.app.name}_{self.model.name}", + username=username, + password=password, + hosts=self.internal_hosts, + # unlike the vm mongos charm, the K8s charm does not communicate with the unix socket + port=MongoPorts.MONGOS_PORT, + roles={RoleNames.ADMIN}, + tls_external=self.tls.external_enabled, + tls_internal=self.tls.internal_enabled, + ) + + @property + def mongo_config(self) -> MongoConfiguration: + """The mongo configuration to use by default for charm interactions.""" + if self.charm_role == CharmRole.MONGODB: + return self.operator_config + return self.mongos_config + + # END: Configuration accessors diff --git a/single_kernel_mongo/state/cluster_state.py b/single_kernel_mongo/state/cluster_state.py new file mode 100644 index 00000000..e341c2a8 --- /dev/null +++ b/single_kernel_mongo/state/cluster_state.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The Cluster state.""" + +from enum import Enum + +from ops import Application +from ops.model import Relation + +from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import Data +from single_kernel_mongo.state.abstract_state import AbstractRelationState + + +class ClusterStateKeys(str, Enum): + """Cluster State Model.""" + + database = "database" + extra_user_roles = "extra-user-roles" + alias = "alias" + external_node_connectivity = "external-node-connectivity" + config_server_db = "config-server-db" + + +class ClusterState(AbstractRelationState[Data]): + """The stored state for the Cluster relation.""" + + component: Application + + def __init__(self, relation: Relation | None, data_interface: Data, component: Application): + super().__init__(relation, data_interface=data_interface, component=component) + self.data_interface = data_interface + + @property + def config_server_uri(self) -> str: + """Is TLS enabled.""" + return self.relation_data.get(ClusterStateKeys.config_server_db, "") diff --git a/single_kernel_mongo/state/config_server_state.py b/single_kernel_mongo/state/config_server_state.py new file mode 100644 index 00000000..24290097 --- /dev/null +++ b/single_kernel_mongo/state/config_server_state.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The Cluster state.""" + +from enum import Enum + +from ops import Application +from ops.model import Relation + +from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import Data +from single_kernel_mongo.state.abstract_state import AbstractRelationState + + +class ConfigServerKeys(str, Enum): + """Cluster State Model.""" + + database = "database" + + +class ConfigServerState(AbstractRelationState[Data]): + """The stored state for the ConfigServer Relation.""" + + component: Application + + def __init__(self, relation: Relation | None, data_interface: Data, component: Application): + super().__init__(relation, data_interface=data_interface, component=component) + self.data_interface = data_interface diff --git a/single_kernel_mongo/state/models.py b/single_kernel_mongo/state/models.py new file mode 100644 index 00000000..862a03a8 --- /dev/null +++ b/single_kernel_mongo/state/models.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Some useful relational models.""" + +from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import ( + ProviderData, + RequirerData, +) + + +class ClusterData(ProviderData, RequirerData): # type: ignore[misc] + """Broker provider data model.""" + + SECRET_FIELDS = [ + "username", + "password", + "tls", + "tls-ca", + "uris", + "key-file", + "int-ca-secret", + ] + + +class ConfigServerData(ProviderData, RequirerData): # type: ignore[misc] + """Config Server data interface.""" + + SECRET_FIELDS = [ + "username", + "password", + "tls", + "tls-ca", + "uris", + "key-file", + "operator-password", + "backup-password", + "int-ca-secret", + ] diff --git a/single_kernel_mongo/state/tls_state.py b/single_kernel_mongo/state/tls_state.py new file mode 100644 index 00000000..49e486a4 --- /dev/null +++ b/single_kernel_mongo/state/tls_state.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The TLS state.""" + +from ops import Relation +from ops.model import Unit + +from single_kernel_mongo.config.literals import Scope +from single_kernel_mongo.core.secrets import SecretCache + +SECRET_KEY_LABEL = "key-secret" +SECRET_CA_LABEL = "ca-secret" +SECRET_CERT_LABEL = "cert-secret" +SECRET_CSR_LABEL = "csr-secret" +SECRET_CHAIN_LABEL = "chain-secret" +WAIT_CERT_UPDATE = "wait-cert-updated" +INT_CERT_SECRET_KEY = "int-cert-secret" +EXT_CERT_SECRET_KEY = "ext-cert-secret" + + +class TLSState: + """The stored state for the TLS relation.""" + + component: Unit + + def __init__(self, relation: Relation | None, secrets: SecretCache): + self.relation = relation + self.secrets = secrets + + @property + def internal_enabled(self) -> bool: + """Is internal TLS enabled.""" + return ( + self.relation is not None + and self.secrets.get_for_key(Scope.UNIT, INT_CERT_SECRET_KEY) is not None + ) + + @property + def external_enabled(self) -> bool: + """Is external TLS enabled.""" + return ( + self.relation is not None + and self.secrets.get_for_key(Scope.UNIT, EXT_CERT_SECRET_KEY) is not None + ) + + def is_tls_enabled(self, internal: bool) -> bool: + """Is TLS enabled for ::internal.""" + match internal: + case True: + return self.internal_enabled + case False: + return self.external_enabled diff --git a/single_kernel_mongo/state/unit_peer_state.py b/single_kernel_mongo/state/unit_peer_state.py new file mode 100644 index 00000000..09a9f692 --- /dev/null +++ b/single_kernel_mongo/state/unit_peer_state.py @@ -0,0 +1,92 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The peer unit relation databag.""" + +from enum import Enum +from functools import cached_property + +from ops.model import Relation, Unit + +from single_kernel_mongo.config.literals import MongoPorts, Substrates +from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import ( # type: ignore + DataPeerUnitData, +) +from single_kernel_mongo.managers.k8s import K8sManager +from single_kernel_mongo.state.abstract_state import AbstractRelationState + + +class UnitPeerRelationKeys(str, Enum): + """The peer relation model.""" + + private_address = "private-address" + ingress_address = "ingress-address" + egress_subnets = "egress-subnets" + + +class UnitPeerReplicaSet(AbstractRelationState[DataPeerUnitData]): + """State collection for unit data.""" + + component: Unit + + def __init__( + self, + relation: Relation | None, + data_interface: DataPeerUnitData, + component: Unit, + substrate: Substrates, + ): + super().__init__(relation, data_interface, component, None) + self.data_interface = data_interface + self.substrate = substrate + self.unit = component + self.k8s = K8sManager( + pod_name=self.pod_name, + namespace=self.unit._backend.model_name, + ) + + @property + def pod_name(self) -> str: + """K8S only: The pod name.""" + return self.unit.name.replace("/", "-") + + @property + def unit_id(self) -> int: + """The id of the unit from the unit name. + + e.g mongodb/2 --> 2 + """ + return int(self.unit.name.split("/")[1]) + + @property + def internal_address(self) -> str: + """The address for internal communication between brokers.""" + if self.substrate == "vm": + return str(self.relation_data.get(UnitPeerRelationKeys.private_address.value)) + + if self.substrate == "k8s": + return f"{self.unit.name.split('/')[0]}-{self.unit_id}.{self.unit.name.split('/')[0]}-endpoints" + + return "" + + @property + def host(self) -> str: + """Return the hostname of a unit.""" + if self.substrate == "vm": + return self.internal_address + return self.node_ip or self.internal_address + + @cached_property + def node_ip(self) -> str: + """The IPV4/IPV6 IP address the Node the unit is on. + + K8s-only. + """ + return self.k8s.get_node_ip(self.pod_name) + + @cached_property + def node_port(self) -> int: + """The port for this unit. + + K8s-only. + """ + return self.k8s.get_node_port(MongoPorts.MONGOS_PORT) diff --git a/single_kernel_mongo/status.py b/single_kernel_mongo/status.py new file mode 100644 index 00000000..2e0203ef --- /dev/null +++ b/single_kernel_mongo/status.py @@ -0,0 +1,55 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Placeholder for status handling.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ops.framework import Object +from ops.model import ( + ActiveStatus, + BlockedStatus, + ErrorStatus, + MaintenanceStatus, + StatusBase, + WaitingStatus, +) + +if TYPE_CHECKING: + from single_kernel_mongo.abstract_charm import AbstractMongoCharm + + +class StatusManager(Object): + """Status Manager.""" + + def __init__(self, charm: AbstractMongoCharm): + super().__init__(parent=charm, key="status") + self.charm = charm + + def set_and_share_status(self, status: StatusBase | None): + """Sets the unit status.""" + self.charm.unit.status = status or ActiveStatus() + + def to_blocked(self, message: str): + """Sets status to blocked.""" + self.set_and_share_status(BlockedStatus(message)) + + def to_waiting(self, message: str): + """Sets status to waiting.""" + self.set_and_share_status(WaitingStatus(message)) + + def to_active(self, message: str | None): + """Sets status to active.""" + if message is None: + self.set_and_share_status(ActiveStatus()) + return + self.set_and_share_status(ActiveStatus(message)) + + def to_maintenance(self, message: str): + """Sets status to maintenance.""" + self.set_and_share_status(MaintenanceStatus(message)) + + def to_error(self, message: str): + """Sets status to error.""" + self.set_and_share_status(ErrorStatus(message)) diff --git a/single_kernel_mongo/templates/__init__.py b/single_kernel_mongo/templates/__init__.py new file mode 100644 index 00000000..6631d2ee --- /dev/null +++ b/single_kernel_mongo/templates/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""This module exists only to enable templates file discovery.""" diff --git a/single_kernel_mongo/templates/logrotate.j2 b/single_kernel_mongo/templates/logrotate.j2 new file mode 100644 index 00000000..a9a3dd23 --- /dev/null +++ b/single_kernel_mongo/templates/logrotate.j2 @@ -0,0 +1,17 @@ +{{logs_directory}}/*.log +{ + rotate {{max_rotations}} + size {{max_log_size}} + missingok + notifempty + create 0600 {{mongo_user}} {{mongo_user}} + compress + delaycompress + nomail + nocopytruncate + sharedscripts + postrotate + PID=$(pgrep -f "mongod.*--logpath={{logs_directory}}/mongodb.log") + /bin/kill -SIGUSR1 $PID + endscript +} diff --git a/single_kernel_mongo/utils/__init__.py b/single_kernel_mongo/utils/__init__.py new file mode 100644 index 00000000..401c9393 --- /dev/null +++ b/single_kernel_mongo/utils/__init__.py @@ -0,0 +1,3 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Utils and helpers for mongo charms.""" diff --git a/single_kernel_mongo/utils/event_helpers.py b/single_kernel_mongo/utils/event_helpers.py new file mode 100644 index 00000000..b30dbd79 --- /dev/null +++ b/single_kernel_mongo/utils/event_helpers.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Events helper to fail/succeed events.""" + +from logging import Logger + +from ops.charm import ActionEvent +from ops.framework import EventBase + + +def fail_action_with_error_log( + logger: Logger, event: ActionEvent, action: str, message: str +) -> None: + """Fails an action with the provided error log.""" + logger.error("%s failed: %s", action.capitalize(), message) + event.fail(message) + + +def defer_event_with_info_log(logger: Logger, event: EventBase, action: str, message: str) -> None: + """Defer an action with the provided error log.""" + logger.info("Deferring %s: %s", action, message) + event.defer() + + +def success_action_with_info_log( + logger: Logger, event: ActionEvent, action: str, results: dict[str, str] +) -> None: + """Succeed an action with log.""" + logger.info("%s completed successfully", action.capitalize()) + event.set_results(results) diff --git a/single_kernel_mongo/utils/helpers.py b/single_kernel_mongo/utils/helpers.py new file mode 100644 index 00000000..bed7a22b --- /dev/null +++ b/single_kernel_mongo/utils/helpers.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Some helpers functions that doesn't belong anywhere else.""" + +import base64 +import re + + +def parse_tls_file(raw_content: str) -> bytes: + """Parse TLS files from both plain text or base64 format.""" + if re.match(r"(-+(BEGIN|END) [A-Z ]+-+)", raw_content): + return ( + re.sub( + r"(-+(BEGIN|END) [A-Z ]+-+)", + "\\1", + raw_content, + ) + .rstrip() + .encode("utf-8") + ) + return base64.b64decode(raw_content) + + +def hostname_from_hostport(host: str) -> str: + """Takes hostname:port and returns hostname.""" + return host.split(":")[0] diff --git a/single_kernel_mongo/utils/mongo_config.py b/single_kernel_mongo/utils/mongo_config.py new file mode 100644 index 00000000..3620bf1a --- /dev/null +++ b/single_kernel_mongo/utils/mongo_config.py @@ -0,0 +1,116 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Code for interactions with MongoDB.""" + +from dataclasses import dataclass +from itertools import chain +from urllib.parse import quote_plus, urlencode + +from single_kernel_mongo.config.literals import MongoPorts +from single_kernel_mongo.core.exceptions import AmbiguousConfigError +from single_kernel_mongo.utils.mongodb_users import ( + REGULAR_ROLES, + DBPrivilege, + UserRole, +) + +ADMIN_AUTH_SOURCE = {"authSource": "admin"} + + +@dataclass +class MongoConfiguration: + """Class for Mongo configurations usable my mongos and mongodb. + + โ€” replset: name of replica set + โ€” database: database name. + โ€” username: username. + โ€” password: password. + โ€” hosts: full list of hosts to connect to, needed for the URI. + โ€” tls_external: indicator for use of internal TLS connection. + โ€” tls_internal: indicator for use of external TLS connection. + """ + + database: str + username: str + password: str + hosts: set[str] + roles: set[str] + tls_external: bool + tls_internal: bool + port: MongoPorts | None = None + replset: str | None = None + standalone: bool = False + + @property + def formatted_hosts(self) -> set[str]: + """The formatted list of hosts.""" + if self.port: + return {f"{host}:{self.port}" for host in self.hosts} + return self.hosts + + @property + def formatted_replset(self) -> dict: + """Formatted replicaSet parameter.""" + if self.replset: + return {"replicaSet": quote_plus(self.replset)} + return {} + + @property + def formatted_auth_source(self) -> dict: + """Formatted auth source.""" + if self.database != "admin": + return ADMIN_AUTH_SOURCE + return {} + + @property + def uri(self) -> str: + """Return URI concatenated from fields.""" + if self.port == MongoPorts.MONGOS_PORT and self.replset: + raise AmbiguousConfigError("Mongos cannot support replica set") + + if self.standalone and not self.port: + raise AmbiguousConfigError("Standalone connection needs a port") + + if self.standalone: + return ( + f"mongodb://{quote_plus(self.username)}:" + f"{quote_plus(self.password)}@" + f"localhost:{self.port}/?authSource=admin" + ) + + complete_hosts = ",".join(self.formatted_hosts) + replset = self.formatted_replset + auth_source = self.formatted_auth_source + + # Dict of all parameters. + parameters = replset | auth_source + + return ( + f"mongodb://{quote_plus(self.username)}:" + f"{quote_plus(self.password)}@" + f"{complete_hosts}/{quote_plus(self.database)}?" + f"{urlencode(parameters)}" + ) + + @property + def supported_roles(self) -> list[DBPrivilege]: + """The supported roles for this configuration.""" + default_role = UserRole( + [ + DBPrivilege(role="readWrite", db=self.database), + DBPrivilege(role="enableSharding", db=self.database), + ] + ) + all_roles = REGULAR_ROLES | {"default": default_role} + return list(chain.from_iterable(all_roles[role] for role in self.roles)) + + +EMPTY_CONFIGURATION = MongoConfiguration( + "", + "", + "", + set(), + set(), + False, + False, +) diff --git a/single_kernel_mongo/utils/mongo_connection.py b/single_kernel_mongo/utils/mongo_connection.py new file mode 100644 index 00000000..60149506 --- /dev/null +++ b/single_kernel_mongo/utils/mongo_connection.py @@ -0,0 +1,330 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Definition of MongoDB Connections.""" + +import logging +import re +from typing import Any + +from bson import json_util +from pymongo import MongoClient +from pymongo.errors import OperationFailure, PyMongoError +from tenacity import ( + RetryError, + Retrying, + before_log, + retry, + stop_after_attempt, + stop_after_delay, + wait_fixed, +) + +from single_kernel_mongo.utils.helpers import hostname_from_hostport +from single_kernel_mongo.utils.mongo_config import MongoConfiguration +from single_kernel_mongo.utils.mongodb_users import DBPrivilege, SystemDBS + +logger = logging.getLogger(__name__) + + +class NotReadyError(PyMongoError): + """Raised when mongo is not ready.""" + + ... + + +class MongoConnection: + """In this class we create connection object to Mongo[s/db]. + + This class is meant for agnositc functions in mongos and mongodb. + + Real connection is created on the first call to Mongo[s/db]. + Delayed connectivity allows to firstly check database readiness + and reuse the same connection for an actual query later in the code. + + Connection is automatically closed when object destroyed. + Automatic close allows to have more clean code. + + Note that connection when used may lead to the following pymongo errors: ConfigurationError, + ConfigurationError, OperationFailure. It is suggested that the following pattern be adopted + when using MongoDBConnection: + + with MongoMongos(MongoConfig) as mongo: + try: + mongo. + except ConfigurationError, OperationFailure: + + """ + + def __init__(self, config: MongoConfiguration, uri: str | None = None, direct: bool = False): + """A MongoDB client interface. + + Args: + config: MongoDB Configuration object. + uri: allow using custom MongoDB URI, needed for replSet init. + direct: force a direct connection to a specific host, avoiding + reading replica set configuration and reconnection. + """ + self.config = config + + if uri is None: + uri = config.uri + + self.client: MongoClient = MongoClient( + uri, + directConnection=direct, + connect=False, + serverSelectionTimeoutMS=1000, + connectTimeoutMS=2000, + ) + + def __enter__(self): + """Return a reference to the new connection.""" + return self + + def __exit__(self, *args, **kwargs): + """Disconnect from MongoDB client.""" + self.client.close() + + @property + def is_ready(self) -> bool: + """Is the MongoDB server ready for services requests. + + Returns: + True if services is ready False otherwise. Retries over a period of 60 seconds times to + allow server time to start up. + """ + try: + for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(3)): + with attempt: + # The ping command is cheap and does not require auth. + self.client.admin.command("ping") + except RetryError: + return False + + return True + + @retry( + stop=stop_after_attempt(3), + wait=wait_fixed(5), + reraise=True, + before=before_log(logger, logging.DEBUG), + ) + def init_replset(self) -> None: + """Create replica set config the first time. + + Raises: + ConfigurationError, ConfigurationError, OperationFailure + """ + config = { + "_id": self.config.replset, + "members": [{"_id": i, "host": h} for i, h in enumerate(self.config.hosts)], + } + try: + self.client.admin.command("replSetInitiate", config) + except OperationFailure as e: + if e.code not in (13, 23): # Unauthorized, AlreadyInitialized + # Unauthorized error can be raised only if initial user were + # created the step after this. + # AlreadyInitialized error can be raised only if this step + # finished. + logger.error("Cannot initialize replica set. error=%r", e) + raise e + pass + + def create_user(self, config: MongoConfiguration, roles: list[DBPrivilege] | None = None): + """Create user. + + Grant read and write privileges for specified database. + """ + self.client.admin.command( + "createUser", + value=config.username, + pwd=config.password, + roles=roles or config.supported_roles, + mechanisms=["SCRAM-SHA-256"], + ) + + def update_user(self, config: MongoConfiguration): + """Update grants on database.""" + self.client.admin.command( + "updateUser", + value=config.username, + roles=config.supported_roles, + ) + + def set_user_password(self, username: str, password: str): + """Update the password.""" + self.client.admin.command( + "updateUser", + value=username, + pwd=password, + ) + + def drop_user(self, username: str): + """Drop user.""" + self.client.admin.command("dropUser", username) + + def create_role(self, role_name: str, privileges: dict, roles: list = []): + """Creates a new role. + + Args: + role_name: name of the role to be added. + privileges: privileges to be associated with the role. + roles: List of roles from which this role inherits privileges. + """ + try: + self.client.admin.command("createRole", role_name, privileges=[privileges], roles=roles) + except OperationFailure as e: + if e.code == 51002: + logger.info("Role already exists") + return + logger.error("Cannot add role. error=%r", e) + raise + + def set_replicaset_election_priority( + self, priority: int, ignore_member: str | None = None + ) -> None: + """Set the election priority for the entire replica set.""" + rs_config = self.client.admin.command("replSetGetConfig") + rs_config = rs_config["config"] + rs_config["version"] += 1 + + # keep track of the original configuration before setting the priority, reconfiguring the + # replica set can result in primary re-election, which would would like to avoid when + # possible. + original_rs_config = rs_config + + for member in rs_config["members"]: + if member["host"] == ignore_member: + continue + + member["priority"] = priority + + if original_rs_config == rs_config: + return + + logger.debug("rs_config: %r", rs_config) + self.client.admin.command("replSetReconfig", rs_config) + + def get_replset_members(self) -> set[str]: + """Get a replica set members. + + Returns: + A set of the replica set members as reported by mongod. + + Raises: + ConfigurationError, ConfigurationError, OperationFailure + """ + rs_status = self.client.admin.command("replSetGetStatus") + curr_members = [hostname_from_hostport(member["name"]) for member in rs_status["members"]] + return set(curr_members) + + @retry( + stop=stop_after_attempt(20), + wait=wait_fixed(3), + reraise=True, + before=before_log(logger, logging.DEBUG), + ) + def remove_replset_member(self, hostname: str) -> None: + """Remove member from replica set config inside MongoDB. + + Raises: + ConfigurationError, ConfigurationError, OperationFailure, NotReadyError + """ + rs_config = self.client.admin.command("replSetGetConfig") + rs_status = self.client.admin.command("replSetGetStatus") + + # When we remove member, to avoid issues when majority members is removed, we need to + # remove next member only when MongoDB forget the previous removed member. + if any(member.get("stateStr", "") == "REMOVED" for member in rs_status.get("members", [])): + # removing from replicaset is fast operation, lets @retry(3 times with a 5sec timeout) + # before giving up. + raise NotReadyError + + # avoid downtime we need to reelect new primary if removable member is the primary. + if self.primary(rs_status) == hostname: + logger.debug("Stepping down from primary.") + self.client.admin.command("replSetStepDown", {"stepDownSecs": "60"}) + + rs_config["config"]["version"] += 1 + rs_config["config"]["members"] = [ + member + for member in rs_config["config"]["members"] + if hostname != hostname_from_hostport(member["host"]) + ] + logger.debug("rs_config: %r", json_util.dumps(rs_config["config"])) + self.client.admin.command("replSetReconfig", rs_config["config"]) + + def add_replset_member(self, hostname: str) -> None: + """Adds a member to replicaset config inside MongoDB. + + Raises: + ConfigurationError, ConfigurationError, OperationFailure, NotReadyError + """ + rs_config = self.client.admin.command("replSetGetConfig") + rs_status = self.client.admin.command("replSetGetStatus") + + # When we add a new member, MongoDB transfer data from existing member to new. + # Such operation reduce performance of the cluster. To avoid huge performance + # degradation, before adding new members, it is needed to check that all other + # members finished init sync. + if self.is_any_sync(rs_status): + raise NotReadyError + + # Avoid reusing IDs, according to the doc + # https://www.mongodb.com/docs/manual/reference/replica-configuration/ + max_id = max([int(member["_id"]) for member in rs_config["config"]["members"]]) + + new_member = {"_id": max_id + 1, "host": hostname} + + rs_config["config"]["version"] += 1 + rs_config["config"]["members"].append(new_member) + logger.debug("rs_config: %r", rs_config["config"]) + self.client.admin.command("replSetReconfig", rs_config["config"]) + + def get_databases(self) -> set[str]: + """Return list of all non-default databases.""" + databases: list[str] = self.client.list_database_names() + return {db for db in databases if db not in SystemDBS} + + def drop_database(self, database: str): + """Drop a non-default database.""" + if database in SystemDBS: + logger.info(f"Not dropping system DB {database}.") + return + self.client.drop_database(database) + + def get_users(self) -> set[str]: + """Add a new member to replica set config inside MongoDB.""" + users_info = self.client.admin.command("usersInfo") + return { + user_obj["user"] + for user_obj in users_info["users"] + if re.match(r"^relation-\d+$", user_obj["user"]) + } + + def primary(self, status: dict[str, Any] | None = None) -> str: + """Returns the primary replica host.""" + status = status or self.client.admin.command("replSetGetStatus") + + for member in status["members"]: + # check replica's current state + if member["stateStr"] == "PRIMARY": + return hostname_from_hostport(member["name"]) + + raise Exception("No primary found.") + + @staticmethod + def is_any_sync(rs_status: dict[str, Any]) -> bool: + """Returns true if any replica set members are syncing data. + + Checks if any members in replica set are syncing data. Note it is recommended to run only + one sync in the cluster to not have huge performance degradation. + + Args: + rs_status: current state of replica set as reported by mongod. + """ + return any( + member["stateStr"] in ["STARTUP", "STARTUP2", "ROLLBACK", "RECOVERING"] + for member in rs_status["members"] + ) diff --git a/single_kernel_mongo/utils/mongodb_users.py b/single_kernel_mongo/utils/mongodb_users.py new file mode 100644 index 00000000..bc19b09e --- /dev/null +++ b/single_kernel_mongo/utils/mongodb_users.py @@ -0,0 +1,170 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Definition of MongoDB users and their configuration.""" + +from enum import Enum +from typing import Any, NewType, TypedDict + +from pydantic import BaseModel, Field, computed_field + +from single_kernel_mongo.config.literals import LOCALHOST, InternalUsers + + +class DBPrivilege(TypedDict, total=False): + """A DB Privilege on db.""" + + role: str + db: str + collection: str + + +UserRole = NewType("UserRole", list[DBPrivilege]) + + +class SystemDBS(str, Enum): + """MongoDB System databases.""" + + ADMIN = "admin" + LOCAL = "local" + CONFIG = "config" + + +class RoleNames(str, Enum): + """Charm defined roles.""" + + ADMIN = "admin" + MONITOR = "monitor" + BACKUP = "backup" + DEFAULT = "default" + OPERATOR = "operator" + + +OPERATOR_ROLE = UserRole( + [ + DBPrivilege(role="userAdminAnyDatabase", db="admin"), + DBPrivilege(role="readWriteAnyDatabase", db="admin"), + DBPrivilege(role="clusterAdmin", db="admin"), + ] +) + +REGULAR_ROLES = { + RoleNames.ADMIN: UserRole( + [ + DBPrivilege(role="userAdminAnyDatabase", db="admin"), + DBPrivilege(role="readWriteAnyDatabase", db="admin"), + DBPrivilege(role="userAdmin", db="admin"), + DBPrivilege(role="enableSharding", db="admin"), + ] + ), + RoleNames.MONITOR: UserRole( + [ + DBPrivilege(role="explainRole", db="admin"), + DBPrivilege(role="clusterMonitor", db="admin"), + DBPrivilege(role="read", db="local"), + ] + ), + RoleNames.BACKUP: UserRole( + [ + DBPrivilege(db="admin", role="readWrite", collection=""), + DBPrivilege(db="admin", role="backup"), + DBPrivilege(db="admin", role="clusterMonitor"), + DBPrivilege(db="admin", role="restore"), + DBPrivilege(db="admin", role="pbmAnyAction"), + ] + ), +} + + +class MongoDBUser(BaseModel): + """Base model for MongoDB users.""" + + username: str = Field(default="") + database_name: str = Field(default="") + roles: set[str] = Field(default=set()) + privileges: dict[str, Any] = Field(default={}) + mongodb_role: str = Field(default="") + hosts: set[str] = Field(default=set()) + + @computed_field # type: ignore[misc] + @property + def password_key_name(self) -> str: + """Returns the key name for the password of the user.""" + return f"{self.username}-password" + + # DEPRECATE: All the following methods are for backward compatibility and + # will be deprecated soon + def get_username(self) -> str: + """Returns the username of the user.""" + return self.username + + def get_password_key_name(self) -> str: + """Returns the key name for the password of the user.""" + return self.password_key_name + + def get_database_name(self) -> str: + """Returns the database of the user.""" + return self.database_name + + def get_roles(self) -> set[str]: + """Returns the role of the user.""" + return self.roles + + def get_mongodb_role(self) -> str: + """Returns the MongoDB role of the user.""" + return self.mongodb_role + + def get_privileges(self) -> dict: + """Returns the privileges of the user.""" + return self.privileges + + def get_hosts(self) -> set[str]: + """Returns the hosts of the user.""" + return self.hosts + + +OperatorUser = MongoDBUser( + username=InternalUsers.OPERATOR, + database_name=SystemDBS.ADMIN, + roles={RoleNames.DEFAULT}, +) + +MonitorUser = MongoDBUser( + username=InternalUsers.MONITOR, + database_name=SystemDBS.ADMIN, + roles={RoleNames.MONITOR}, + privileges={ + "resource": {"db": "", "collection": ""}, + "actions": [ + "listIndexes", + "listCollections", + "dbStats", + "dbHash", + "collStats", + "find", + ], + }, + mongodb_role="explainRole", + hosts={LOCALHOST}, # MongoDB Exporter can only connect to one replica. +) + +BackupUser = MongoDBUser( + username=InternalUsers.BACKUP, + roles={RoleNames.BACKUP}, + privileges={"resource": {"anyResource": True}, "actions": ["anyAction"]}, + mongodb_role="pbmAnyAction", + hosts={LOCALHOST}, # pbm cannot make a direct connection if multiple hosts are used +) + + +CharmUsers = (OperatorUser.username, BackupUser.username, MonitorUser.username) + + +def get_user_from_username(username: str) -> MongoDBUser: + """Returns the key name for the password of the user.""" + if username == OperatorUser.username: + return OperatorUser + if username == MonitorUser.username: + return MonitorUser + if username == BackupUser.username: + return BackupUser + raise ValueError(f"Unknown user: {username}") diff --git a/single_kernel_mongo/workload/__init__.py b/single_kernel_mongo/workload/__init__.py new file mode 100644 index 00000000..0cc9eb4f --- /dev/null +++ b/single_kernel_mongo/workload/__init__.py @@ -0,0 +1,109 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""The different workloads and their code for mongo charms.""" + +from single_kernel_mongo.config.literals import Substrates +from single_kernel_mongo.core.k8s_worload import KubernetesWorkload +from single_kernel_mongo.core.vm_workload import VMWorkload +from single_kernel_mongo.workload.backup_workload import PBMWorkload +from single_kernel_mongo.workload.log_rotate_workload import LogRotateWorkload +from single_kernel_mongo.workload.mongodb_workload import MongoDBWorkload +from single_kernel_mongo.workload.mongos_workload import MongosWorkload +from single_kernel_mongo.workload.monitor_workload import MongoDBExporterWorkload + + +class VMMongoDBWorkload(MongoDBWorkload, VMWorkload): + """VM MongoDB Workload implementation.""" + + ... + + +class VMMongosWorkload(MongosWorkload, VMWorkload): + """VM Mongos Workload implementation.""" + + ... + + +class VMPBMWorkload(PBMWorkload, VMWorkload): + """VM PBM Workload implementation.""" + + ... + + +class VMLogRotateDBWorkload(LogRotateWorkload, VMWorkload): + """VM logrotate Workload implementation.""" + + ... + + +class VMMongoDBExporterWorkload(MongoDBExporterWorkload, VMWorkload): + """VM mongodb exporter Workload implementation.""" + + ... + + +class KubernetesMongoDBWorkload(MongoDBWorkload, KubernetesWorkload): + """Kubernetes MongoDB Workload implementation.""" + + ... + + +class KubernetesMongosWorkload(MongosWorkload, KubernetesWorkload): + """Kubernetes Mongos Workload implementation.""" + + ... + + +class KubernetesPBMWorkload(PBMWorkload, KubernetesWorkload): + """Kubernetes PBM Workload implementation.""" + + ... + + +class KubernetesLogRotateDBWorkload(LogRotateWorkload, KubernetesWorkload): + """Kubernetes logrotate Workload implementation.""" + + ... + + +class KubernetesMongoDBExporterWorkload(MongoDBExporterWorkload, KubernetesWorkload): + """Kubernetes mongodb exporter Workload implementation.""" + + ... + + +def get_mongodb_workload_for_substrate(substrate: Substrates) -> type[MongoDBWorkload]: + """Return substrate appropriate workload.""" + if substrate == "k8s": + return KubernetesMongoDBWorkload + return VMMongoDBWorkload + + +def get_mongos_workload_for_substrate(substrate: Substrates) -> type[MongosWorkload]: + """Return substrate appropriate workload.""" + if substrate == "k8s": + return KubernetesMongosWorkload + return VMMongosWorkload + + +def get_pbm_workload_for_substrate(substrate: Substrates) -> type[PBMWorkload]: + """Return substrate appropriate workload.""" + if substrate == "k8s": + return KubernetesPBMWorkload + return VMPBMWorkload + + +def get_logrotate_workload_for_substrate(substrate: Substrates) -> type[LogRotateWorkload]: + """Return substrate appropriate workload.""" + if substrate == "k8s": + return KubernetesLogRotateDBWorkload + return VMLogRotateDBWorkload + + +def get_mongodb_exporter_workload_for_substrate( + substrate: Substrates, +) -> type[MongoDBExporterWorkload]: + """Return substrate appropriate workload.""" + if substrate == "k8s": + return KubernetesMongoDBExporterWorkload + return VMMongoDBExporterWorkload diff --git a/single_kernel_mongo/workload/backup_workload.py b/single_kernel_mongo/workload/backup_workload.py new file mode 100644 index 00000000..717c5894 --- /dev/null +++ b/single_kernel_mongo/workload/backup_workload.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""PBM service workloads definition.""" + +from pathlib import Path + +from ops import Container +from ops.pebble import Layer +from typing_extensions import override + +from single_kernel_mongo.config.roles import ROLES +from single_kernel_mongo.core.workload import MongoPaths, WorkloadBase + + +class PBMPaths(MongoPaths): + """PBM Specific paths.""" + + @property + def pbm_config(self) -> Path: + """PBM Configuration file path.""" + return Path(f"{self.etc_path}/pbm/pbm_config.yaml") + + +class PBMWorkload(WorkloadBase): + """MongoDB Workload definition.""" + + service = "pbm-agent" + layer_name = "pbm-agent" + bin_cmd = "pbm" + env_var = "PBM_MONGODB_URI" + snap_param = "pbm-uri" + paths: PBMPaths + + def __init__(self, container: Container | None) -> None: + super().__init__(container) + self.role = ROLES[self.substrate] + self.paths = PBMPaths(self.role) + + @property + @override + def layer(self) -> Layer: + """Returns the Pebble configuration layer for MongoDB Exporter.""" + environment = self.get_env().get(self.env_var) or self._env + + return Layer( + { + "summary": "pbm layer", + "description": "Pebble config layer for pbm", + "services": { + self.service: { + "override": "replace", + "summary": "pbm", + "command": "/usr/bin/pbm-agent", + "startup": "enabled", + "user": self.users.user, + "group": self.users.group, + "environment": {self.env_var: environment}, + } + }, + } + ) diff --git a/single_kernel_mongo/workload/log_rotate_workload.py b/single_kernel_mongo/workload/log_rotate_workload.py new file mode 100644 index 00000000..75ab514b --- /dev/null +++ b/single_kernel_mongo/workload/log_rotate_workload.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Logrotate workload definition.""" + +import jinja2 +from ops import Container +from ops.pebble import Layer +from typing_extensions import override + +from single_kernel_mongo.config.logrotate_config import LogRotateConfig +from single_kernel_mongo.config.roles import ROLES +from single_kernel_mongo.core.workload import MongoPaths, WorkloadBase + + +class LogRotateWorkload(WorkloadBase): + """MongoDB Workload definition.""" + + service = "logrotate" + layer_name = "log_rotate" + bin_cmd = "logrotate" + env_var = "" + snap_param = "" + + def __init__(self, container: Container | None) -> None: + super().__init__(container) + self.role = ROLES[self.substrate] + self.paths = MongoPaths(self.role) + + def build_template(self) -> None: + """Builds and renders the template.""" + data = LogRotateConfig.log_rotate_template.read_text() + template = jinja2.Template(data) + + rendered_template = template.render( + logs_directory=self.paths.logs_path, + mongo_user=self.users.user, + max_log_size=LogRotateConfig.max_log_size, + max_rotations=LogRotateConfig.max_rotations_to_keep, + ) + + self.write(path=LogRotateConfig.rendered_template, content=rendered_template) + self.exec(["chmod", "644", f"{LogRotateConfig.rendered_template}"]) + + @property + @override + def layer(self) -> Layer: + """Returns the Pebble configuration layer for MongoDB.""" + return Layer( + { + "summary": "Log rotate layer", + "description": "Pebble config layer for rotating mongodb logs", + "services": { + self.service: { + "summary": "log rotate", + # Pebble errors out if the command exits too fast (1s). + "command": f"sh -c 'logrotate {LogRotateConfig.rendered_template}; sleep 1'", + "startup": "enabled", + "override": "replace", + "backoff-delay": "1m0s", + "backoff-factor": 1, + "user": self.users.user, + "group": self.users.group, + } + }, + } + ) diff --git a/single_kernel_mongo/workload/mongodb_workload.py b/single_kernel_mongo/workload/mongodb_workload.py new file mode 100644 index 00000000..bc9fd2f1 --- /dev/null +++ b/single_kernel_mongo/workload/mongodb_workload.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""MongoDB and Mongos workloads definition.""" + +from ops import Container +from ops.pebble import Layer +from typing_extensions import override + +from single_kernel_mongo.config.roles import ROLES +from single_kernel_mongo.core.workload import MongoPaths, WorkloadBase + + +class MongoDBWorkload(WorkloadBase): + """MongoDB Workload definition.""" + + service = "mongod" + layer_name = "mongod" + bin_cmd = "mongosh" + env_var = "MONGOD_ARGS" + snap_param = "mongod-args" + + def __init__(self, container: Container | None) -> None: + super().__init__(container) + self.role = ROLES[self.substrate] + self.paths = MongoPaths(self.role) + + @property + @override + def layer(self) -> Layer: + """Returns the Pebble configuration layer for MongoDB.""" + environment = self.get_env().get(self.env_var) or self._env + return Layer( + { + "summary": "mongod layer", + "description": "Pebble config layer for replicated mongod", + "services": { + self.service: { + "override": "replace", + "summary": "mongod", + "command": "/usr/bin/mongod ${MONGOD_ARGS}", + "startup": "enabled", + "user": self.users.user, + "group": self.users.group, + "environment": {self.env_var: environment}, + } + }, + } + ) diff --git a/single_kernel_mongo/workload/mongos_workload.py b/single_kernel_mongo/workload/mongos_workload.py new file mode 100644 index 00000000..10e84520 --- /dev/null +++ b/single_kernel_mongo/workload/mongos_workload.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""MongoDB and Mongos workloads definition.""" + +from ops import Container +from ops.pebble import Layer +from typing_extensions import override + +from single_kernel_mongo.config.roles import ROLES +from single_kernel_mongo.core.workload import MongoPaths, WorkloadBase + + +class MongosWorkload(WorkloadBase): + """MongoDB Workload definition.""" + + service = "mongos" + layer_name = "mongos" + bin_cmd = "mongosh" + env_var = "MONGOS_ARGS" + snap_param = "mongos-args" + + def __init__(self, container: Container | None) -> None: + super().__init__(container) + self.role = ROLES[self.substrate] + self.paths = MongoPaths(self.role) + + @property + @override + def layer(self) -> Layer: + """Returns a Pebble configuration layer for Mongos.""" + environment = self.get_env().get(self.env_var) or self._env + + layer_config = { + "summary": "mongos layer", + "description": "Pebble config layer for mongos router", + "services": { + self.service: { + "override": "replace", + "summary": "mongos", + "command": "/usr/bin/mongos ${MONGOS_ARGS}", + "startup": "enabled", + "user": self.users.user, + "group": self.users.group, + "environment": {self.env_var: environment}, + } + }, + } + return Layer(layer_config) # type: ignore diff --git a/single_kernel_mongo/workload/monitor_workload.py b/single_kernel_mongo/workload/monitor_workload.py new file mode 100644 index 00000000..9f4785f7 --- /dev/null +++ b/single_kernel_mongo/workload/monitor_workload.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""MongoDB exporter workloads definition.""" + +from ops import Container +from ops.pebble import Layer +from typing_extensions import override + +from single_kernel_mongo.config.roles import ROLES +from single_kernel_mongo.core.workload import MongoPaths, WorkloadBase + + +class MongoDBExporterWorkload(WorkloadBase): + """MongoDB Workload definition.""" + + service = "mongodb-exporter" + layer_name = "mongodb_exporter" + bin_cmd = "mongosh" + env_var = "MONGODB_URI" + snap_param = "monitor-uri" + + def __init__(self, container: Container | None) -> None: + super().__init__(container) + self.role = ROLES[self.substrate] + self.paths = MongoPaths(self.role) + + @property + @override + def layer(self) -> Layer: + """Returns the Pebble configuration layer for MongoDB Exporter.""" + environment = self.get_env().get(self.env_var) or self._env + + return Layer( + { + "summary": "mongodb_exporter layer", + "description": "Pebble config layer for mongodb_exporter", + "services": { + self.layer_name: { + "override": "replace", + "summary": "mongodb_exporter", + "command": "mongodb_exporter --collector.diagnosticdata --compatible-mode", + "startup": "enabled", + "user": self.users.user, + "group": self.users.group, + "environment": {self.env_var: environment}, + } + }, + } + ) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 00000000..e3979c0f --- /dev/null +++ b/tests/integration/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e3979c0f --- /dev/null +++ b/tests/unit/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 00000000..78809027 --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,6 @@ +import pytest + + +@pytest.fixture(autouse=True) +def tenacity_wait(mocker): + mocker.patch("tenacity.nap.time") diff --git a/tests/unit/data/key.pem b/tests/unit/data/key.pem new file mode 100644 index 00000000..0d74ddf3 --- /dev/null +++ b/tests/unit/data/key.pem @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG5AIBAAKCAYEAzM8/tJXiKuINRzFEqOUTO7v8j9IH4piuZYm5oo9djujVeFRj +ASHdJf9mWza23yMZO4a+9Fdp3+Rx073EaKpouer8V2p/sYOA7/R8egEOUSOCG2sS +sE+uPU9i9M7uS4CCKUlYqSxIjAfuv+W1tAnNE1t4tQbupT1RElBIOnV70ZPrg6oF +hRc8AQj0D558zIFMsbNYLMwZks11ZHSwdrrO/J3mqFjtScnyDxw72V4i8UYyOe5H +mmJUfBS2+MrzsYFNulsTZOvyRFOvnMBdxQarONrYfe5GYvlBGApD7R1PM9DyY/Hy +imhOeTh+tDVkBD0dCFDtjpEs04OItn3+yYp8z1HwFimTKqkFPGDeyYHdalaBjRxB +yXSnUDfv9dbGTOR7wbiXOAeJ4P1bqNTCeBvh1LsqI3KmOiZnAl05XnN0g+IotYkE +5cBUl1V4IVq3NroiJW79zMQXhajBlxfxx39tSMjvQDHcZpQCrwVep1DuOV25svOw +36+MoZVtBZwtERUvAgMBAAECggGAKfOJqQPKf94tuhdtaOsDNYkcApmS4+dCUaoU +TCfxkgQ/NZaTcPfuxIm7/ZMHgk0+LeM72fDNzv6MEkMcqBO/v9VSOtWlskatcnkx +xgvRUu1jCRq1iUfI+OX57wsz5GM+RhawAnb/pS86yDPVNDuNthlWcD4rQ817fbTU +FqVZ52PVKfL5jnlj4RJka46bIYianQuPVB26X0eWdC0yuB8rprLxPCp68FJdMpsl +5zBRF/q47wTPxd++TmwvzmZD5GgHlat4rG0TSQn1iWN25YXGiAGwZQnLy1Pg0DPh +uTSZtUfAU03fwh8/r7uFay0iWFKKKQ0Pjrrmvyo4uMW3zshsXSYkMJnIpLflwPyJ +LdzzW9y74ZCCwYyHM38XGBOoMH1L07AO8mYcp3ABREVs2Inh4dx0YXE2BsNJRkFe +QH0K5mW24nteJQe8JV6KHDj0BtAVDmTPs5guK0KWrgV6Tt4sbZv9AXxhSrIeDFxB +oPKlM3mxHbYxZ/zdAXev2SE2A4oBAoHBAPnHhimR5ja4+qSusBHm9OkjtKE5OPYy +VDY3XnOeYcVqWvFojUeRse61MRJcJ0k0WYU+L+QGnsvyynJA54JkHy7PalKdQ6BR +UC1KIw6obLerJtZRKG0XNIdCyBdACm9xonnkzkjStks6qc5oAWQZUCZOaiuxwN+B +S1kQLZx/sxUZ2vAfyNnSmkq8rMTQnCI3V0/EhawrPTujhLQQb98loIgzSDjjvJ9v +Oc+PvBHnKgu98Tts5PBJyqzdHsnLOn4z/wKBwQDR6QSwtx2iQLjfLqjzkZld5kY2 +hlo3/yRWSsYaLRkuS/BVR24cGzrrolPpgRpz2M/NFLZsZsAu6HgH2oZB9VVIR+m3 +YbjWTvbL6ugUcHQw/UkMmjJXfyaA1AARCNh3vMJgnvKwjylfNPNfowuRScUGBwYv +yjZdhMOac7X7Z8qFWZhe2JfhtY/xKSDle3R8evAWxkWfEt5+yuwEXHqAckyuHddJ +Vp1awl4brYLnaQnvZlT9CC++6mRQeR6eWfcPXtECgcEAxtOUcDK7VoQYwE5tyVJX +Ud4iQtQSC6g0cJPSk6ZXnZXznobjLz1Crgl9bmf8vcD2IaJG7PR4I1C45gnUP28D +g3eH4wtccZlIgUSqc1iOcgXDfIZN78iw/5qhPCC2Lc/+zZjcvAs/INMpqCwSL2QW +8OcqjG2KYl/BYrwGErV0EC/sn4gtkXabazKVNHX6UyJKVizRWyNwMrEPSWZsJu0E +xGZN7zsZQrB/DEIWKwof5gjkpbEg7EVRGBlMEMW9YghxAoHBAJE9a/NiIGz932z9 +egVd/wJLrNVZTN9l+JyEjGHcRooUHSzEidmEtdYIsYrlu4ngwJej9z02M0/Bphu5 +BiBpQiMLLxml0iDm7jjWRtKtpJepCXxgzOhhoCCPj6Q5vJ3My6nTERU9WWD44FSS +GhTjOCIdhhhXEBrRHA6odYiInkZ6YcVulFimKfyxeoOOUmE3Jxx6//07LxwICBpU +aqapb4kiOK4T8ji0OC1qdyLrOOaepbuWiILO564OwDLNCPdnoQKBwGQ2NajA/s65 +7qn21p7cLviIG1eDf/CUQey70gTrJ/FHZn6OIDLu+6HX2IxCiWVtCNHzIxb4/ibd +y3Js9VXK8YOTwnD4ty6Ksjnx8cTjoC4PuzTBZs09FTp+jbKqgb8q2e38S2axszYo +IxMWVevwe4bRVKf2lyCkeQptMIh8lfGgCd470fcF0gfHpVcA7AGZpZOLfNmxRAls +3lNIXLWBn78lklMfiSNpl4xo4cluZzbSerBiXCChohLG8EmmIGIWTg== +-----END RSA PRIVATE KEY----- diff --git a/tests/unit/helpers.py b/tests/unit/helpers.py new file mode 100644 index 00000000..85faf56a --- /dev/null +++ b/tests/unit/helpers.py @@ -0,0 +1,20 @@ +import factory + +from single_kernel_mongo.config.literals import LOCALHOST, MongoPorts +from single_kernel_mongo.utils.mongo_config import MongoConfiguration + + +class MongoConfigurationFactory(factory.Factory): + class Meta: # noqa + model = MongoConfiguration + + hosts = {LOCALHOST} + database = "abadcafe" + username = "operator" + password = "deadbeef" + roles: set[str] = set() + tls_external = False + tls_internal = False + port = MongoPorts.MONGODB_PORT + replset = "cafebabe" + standalone = False diff --git a/tests/unit/mongodb_test_charm/LICENSE b/tests/unit/mongodb_test_charm/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/tests/unit/mongodb_test_charm/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tests/unit/mongodb_test_charm/actions.yaml b/tests/unit/mongodb_test_charm/actions.yaml new file mode 100644 index 00000000..dc966c90 --- /dev/null +++ b/tests/unit/mongodb_test_charm/actions.yaml @@ -0,0 +1,41 @@ +get-password: + description: + Fetch the password of the provided internal user of the charm, used for internal charm operations. + It is for internal charm users only, and SHOULD NOT be used by applications. + params: + username: + type: string + description: The username, the default value 'operator'. + Possible values - operator, backup, monitor. + +set-password: + description: Change the admin user's password, which is used by charm. + It is for internal charm users and SHOULD NOT be used by applications. + params: + username: + type: string + description: The username, the default value 'operator'. + Possible values - operator, backup, monitor. + password: + type: string + description: The password will be auto-generated if this option is not specified. + +create-backup: + description: Create a database backup. + S3 credentials are retrieved from a relation with the S3 integrator charm. + +list-backups: + description: List available backup_ids in the S3 bucket and path provided by the S3 integrator charm. + +restore: + description: Restore a database backup. + S3 credentials are retrieved from a relation with the S3 integrator charm. + params: + backup-id: + type: string + description: A backup-id to identify the backup to restore. Format of <%Y-%m-%dT%H:%M:%SZ> + remap-pattern: + type: string + description: + Optional, a pattern used to remap cluster component names when performing a restore. + Format of old_config_server_name=new_config_server_name,old_shard_name=new_shard_name diff --git a/tests/unit/mongodb_test_charm/charmcraft.yaml b/tests/unit/mongodb_test_charm/charmcraft.yaml new file mode 100644 index 00000000..f88877c0 --- /dev/null +++ b/tests/unit/mongodb_test_charm/charmcraft.yaml @@ -0,0 +1,32 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +type: charm +bases: + - build-on: + - name: "ubuntu" + channel: "22.04" + run-on: + - name: "ubuntu" + channel: "22.04" +parts: + charm: + charm-strict-dependencies: true + override-build: | + rustup default stable + craftctl default + build-snaps: + - rustup + build-packages: + - git + - build-essential + - libffi-dev + - libssl-dev + - pkg-config + - rustc + - cargo + version_data: + plugin: dump + source: . + prime: + - workload_version diff --git a/tests/unit/mongodb_test_charm/config.yaml b/tests/unit/mongodb_test_charm/config.yaml new file mode 100644 index 00000000..110ff403 --- /dev/null +++ b/tests/unit/mongodb_test_charm/config.yaml @@ -0,0 +1,16 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +options: + auto-delete: + type: boolean + description: | + When a relation is removed, auto-delete ensures that any relevant databases + associated with the relation are also removed + default: false + role: + description: | + role config option exists to deploy the charmed-mongodb application as a shard, + config-server, or as a replica set. + type: string + default: replication diff --git a/tests/unit/mongodb_test_charm/metadata.yaml b/tests/unit/mongodb_test_charm/metadata.yaml new file mode 100644 index 00000000..fbb44045 --- /dev/null +++ b/tests/unit/mongodb_test_charm/metadata.yaml @@ -0,0 +1,48 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +name: test-mongodb +display-name: Test MongoDB +description: | + Test charm for MongoDB single kernel. +docs: https://discourse.charmhub.io/t/charmed-mongodb-6-docs/12461 +source: https://github.com/canonical/mongodb-operator +issues: https://github.com/canonical/mongodb-operator/issues +website: + - https://ubuntu.com/data/mongodb + - https://charmhub.io/mongodb + - https://github.com/canonical/mongodb-operator + - https://chat.charmhub.io/charmhub/channels/data-platform +summary: A Test MongoDB operator charm +series: + - jammy + +storage: + mongodb: + type: filesystem + location: /var/snap/charmed-mongodb/common + +provides: + database: + interface: mongodb_client + cos-agent: + interface: cos_agent + config-server: + interface: shards + cluster: + interface: config-server + +peers: + database-peers: + interface: mongodb-peers + +requires: + s3-credentials: + interface: s3 + limit: 1 + certificates: + interface: tls-certificates + limit: 1 + sharding: + interface: shards + # shards can only relate to one config-server + limit: 1 diff --git a/tests/unit/mongodb_test_charm/requirements.txt b/tests/unit/mongodb_test_charm/requirements.txt new file mode 100644 index 00000000..48654835 --- /dev/null +++ b/tests/unit/mongodb_test_charm/requirements.txt @@ -0,0 +1,2 @@ +ops~=2.15.0 +git+https://github.com/Canonical/mongo-single-kernel-library.git@main#egg=mongo-single-kernel-library diff --git a/tests/unit/mongodb_test_charm/src/charm.py b/tests/unit/mongodb_test_charm/src/charm.py new file mode 100755 index 00000000..64971fe0 --- /dev/null +++ b/tests/unit/mongodb_test_charm/src/charm.py @@ -0,0 +1,21 @@ +#!/usr/bin/python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Test MongoDB charm.""" + +from ops.main import main + +from single_kernel_mongo.abstract_charm import AbstractMongoCharm +from single_kernel_mongo.config.relations import RelationNames +from single_kernel_mongo.core.structured_config import MongoDBCharmConfig + + +class MongoTestCharm(AbstractMongoCharm[MongoDBCharmConfig]): + config_type = MongoDBCharmConfig + substrate = "vm" + peer_rel_name = RelationNames.PEERS.value + name = "mongodb-test" + + +if __name__ == "__main__": + main(MongoTestCharm) diff --git a/tests/unit/mongodb_test_charm/workload_version b/tests/unit/mongodb_test_charm/workload_version new file mode 100644 index 00000000..b7ff1516 --- /dev/null +++ b/tests/unit/mongodb_test_charm/workload_version @@ -0,0 +1 @@ +6.0.6 diff --git a/tests/unit/test_basic.py b/tests/unit/test_basic.py new file mode 100644 index 00000000..1b7be369 --- /dev/null +++ b/tests/unit/test_basic.py @@ -0,0 +1,6 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + + +def test_basic(): + assert 1 == 1 diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py new file mode 100644 index 00000000..1ff8116c --- /dev/null +++ b/tests/unit/test_charm.py @@ -0,0 +1,90 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +from pathlib import Path + +import pytest +import yaml +from ops.model import BlockedStatus, WaitingStatus +from ops.testing import Harness +from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure + +from single_kernel_mongo.exceptions import WorkloadExecError + +from .mongodb_test_charm.src.charm import MongoTestCharm + +PYMONGO_EXCEPTIONS = [ + ConnectionFailure("error message"), + ConfigurationError("error message"), + OperationFailure("error message"), +] + +CONFIG = str(yaml.safe_load(Path("./tests/unit/mongodb_test_charm/config.yaml").read_text())) +ACTIONS = str(yaml.safe_load(Path("./tests/unit/mongodb_test_charm/actions.yaml").read_text())) +METADATA = str(yaml.safe_load(Path("./tests/unit/mongodb_test_charm/metadata.yaml").read_text())) + + +def setup_sercrets(harness: Harness): + harness.set_leader(True) + harness.charm.operator.on_leader_elected() + harness.set_leader(False) + + +@pytest.fixture +def mock_fs_interactions(mocker): + mocker.patch("single_kernel_mongo.core.vm_workload.VMWorkload.write") + mocker.patch("single_kernel_mongo.core.vm_workload.VMWorkload.copy_to_unit") + mocker.patch("single_kernel_mongo.core.vm_workload.VMWorkload.exec") + mocker.patch("pathlib.Path.mkdir") + + +@pytest.fixture +def harness() -> Harness: + harness = Harness(MongoTestCharm, meta=METADATA, actions=ACTIONS, config=CONFIG) + harness.add_relation("database-peers", "database-peers") + harness.begin() + with harness.hooks_disabled(): + harness.add_storage(storage_name="mongodb", count=1, attach=True) + return harness + + +def test_install_blocks_snap_install_failure(harness, mocker): + mocker.patch("single_kernel_mongo.core.vm_workload.VMWorkload.install", return_value=False) + harness.charm.on.install.emit() + assert harness.charm.unit.status == BlockedStatus("couldn't install MongoDB") + + +def test_snap_start_failure_leads_to_blocked_status(harness, mocker, mock_fs_interactions): + open_ports_mock = mocker.patch( + "single_kernel_mongo.managers.mongodb_operator.MongoDBOperator.open_ports" + ) + harness.set_leader(True) + harness.charm.on.start.emit() + open_ports_mock.assert_not_called() + assert harness.charm.unit.status == BlockedStatus("couldn't start MongoDB") + + +def test_on_start_mongod_not_ready_defer(harness, mocker, mock_fs_interactions): + mocker.patch("single_kernel_mongo.core.vm_workload.VMWorkload.start", return_value=True) + mocker.patch( + "single_kernel_mongo.utils.mongo_connection.MongoConnection.is_ready", + new_callable=mocker.PropertyMock(return_value=False), + ) + patched_mongo_initialise = mocker.patch( + "single_kernel_mongo.managers.mongo.MongoManager.initialise_replica_set" + ) + harness.set_leader(True) + harness.charm.on.start.emit() + assert harness.charm.unit.status == WaitingStatus("waiting for MongoDB to start") + patched_mongo_initialise.assert_not_called() + + +def test_start_unable_to_open_tcp_moves_to_blocked(harness, mocker, mock_fs_interactions): + mocker.patch("single_kernel_mongo.core.vm_workload.VMWorkload.start", return_value=True) + mocker.patch( + "single_kernel_mongo.managers.mongodb_operator.MongoDBOperator.open_ports", + side_effect=WorkloadExecError("open-port", 1, None, None), + ) + harness.set_leader(True) + harness.charm.on.start.emit() + assert harness.charm.unit.status == BlockedStatus("failed to open TCP port for MongoDB") diff --git a/tests/unit/test_config_manager.py b/tests/unit/test_config_manager.py new file mode 100644 index 00000000..f1be1f70 --- /dev/null +++ b/tests/unit/test_config_manager.py @@ -0,0 +1,221 @@ +import pytest + +from single_kernel_mongo.config.literals import CharmRole +from single_kernel_mongo.config.mongo_paths import VM_PATH +from single_kernel_mongo.core.structured_config import ( + MongoDBCharmConfig, + MongoDBRoles, + MongosCharmConfig, +) +from single_kernel_mongo.managers.config import ( + MongoDBConfigManager, + MongosConfigManager, +) +from single_kernel_mongo.state.app_peer_state import AppPeerReplicaSet +from single_kernel_mongo.state.charm_state import CharmState +from single_kernel_mongo.state.cluster_state import ClusterState +from single_kernel_mongo.state.tls_state import TLSState +from single_kernel_mongo.workload import VMMongoDBWorkload, VMMongosWorkload + + +@pytest.mark.parametrize( + "role,expected_parameter", + ( + (MongoDBRoles.CONFIG_SERVER, ["--configsvr"]), + (MongoDBRoles.SHARD, ["--shardsvr"]), + (MongoDBRoles.REPLICATION, []), + ), +) +def test_mongodb_config_manager(mocker, role: MongoDBRoles, expected_parameter: list): + mock = mocker.patch( + "single_kernel_mongo.lib.charms.operator_libs_linux.v1.snap.Snap.set", + ) + + mock_state = mocker.MagicMock(CharmState) + mock_app_state = mocker.MagicMock(AppPeerReplicaSet) + mock_state.app_peer_data = mock_app_state + mock_state.tls = mocker.MagicMock(TLSState) + mock_state.charm_role = CharmRole.MONGODB + mock_state.app_peer_data.replica_set = "deadbeef" + mock_state.app_peer_data.role = role + mock_state.tls.internal_enabled = False + mock_state.tls.external_enabled = False + workload = VMMongoDBWorkload(None) + config = MongoDBCharmConfig() + manager = MongoDBConfigManager( + config, + mock_state, + workload, + ) + + port_parameter = manager.port_parameter + replset_option = manager.replset_option + role_parameter = manager.role_parameter + db_path_argument = manager.db_path_argument + binding_ips = manager.binding_ips + log_options = manager.log_options + audit_options = manager.audit_options + auth_parameter = manager.auth_parameter + tls_parameters = manager.tls_parameters + + all_params = manager.build_parameters() + + assert port_parameter == ["--port 27017"] + assert replset_option == ["--replSet=deadbeef"] + assert role_parameter == expected_parameter + assert db_path_argument == [f"--dbpath={VM_PATH['mongod']['DATA']}"] + assert binding_ips == ["--bind_ip_all"] + assert log_options == [ + "--setParameter processUmask=037", + "--logRotate reopen", + "--logappend", + f"--logpath={VM_PATH['mongod']['LOGS']}/mongodb.log", + ] + assert audit_options == [ + "--auditDestination=file", + "--auditFormat=JSON", + f"--auditPath={VM_PATH['mongod']['LOGS']}/audit.log", + ] + assert auth_parameter == [ + "--auth", + "--clusterAuthMode=keyFile", + f"--keyFile={VM_PATH['mongod']['CONF']}/keyFile", + ] + assert tls_parameters == [] + + assert all_params == [ + binding_ips, + port_parameter, + auth_parameter, + tls_parameters, + log_options, + audit_options, + replset_option, + role_parameter, + db_path_argument, + ] + manager.set_environment() + + expected = " ".join([item for params in all_params for item in params]) + mock.assert_called_once_with({"mongod-args": expected}) + + +def test_mongos_config_manager(mocker): + mock = mocker.patch( + "single_kernel_mongo.lib.charms.operator_libs_linux.v1.snap.Snap.set", + ) + mock_state = mocker.MagicMock(CharmState) + mock_state.app_peer_data = mocker.MagicMock(AppPeerReplicaSet) + mock_state.charm_role = CharmRole.MONGOS + mock_state.cluster = mocker.MagicMock(ClusterState) + mock_state.cluster.config_server_uri = "mongodb://config-server-url" + mock_state.tls = mocker.MagicMock(TLSState) + mock_state.app_peer_data.external_connectivity = False + mock_state.tls.internal_enabled = False + mock_state.tls.external_enabled = False + workload = VMMongosWorkload(None) + config = MongosCharmConfig() + manager = MongosConfigManager( + config, + workload, + mock_state, + ) + + port_parameter = manager.port_parameter + binding_ips = manager.binding_ips + log_options = manager.log_options + audit_options = manager.audit_options + auth_parameter = manager.auth_parameter + tls_parameters = manager.tls_parameters + config_server_db_parameter = manager.config_server_db_parameter + + all_params = manager.build_parameters() + + assert port_parameter == ["--port 27018"] + assert binding_ips == [ + f"--bind-ip {VM_PATH['mongod']['VAR']}/mongodb-27018.sock", + "--filePermissions 0766", + ] + assert log_options == [ + "--setParameter processUmask=037", + "--logRotate reopen", + "--logappend", + f"--logpath={VM_PATH['mongod']['LOGS']}/mongodb.log", + ] + assert audit_options == [ + "--auditDestination=file", + "--auditFormat=JSON", + f"--auditPath={VM_PATH['mongod']['LOGS']}/audit.log", + ] + assert auth_parameter == [ + "--auth", + "--clusterAuthMode=keyFile", + f"--keyFile={VM_PATH['mongod']['CONF']}/keyFile", + ] + assert tls_parameters == [] + assert config_server_db_parameter == ["--configdb mongodb://config-server-url"] + + assert all_params == [ + binding_ips, + port_parameter, + auth_parameter, + tls_parameters, + log_options, + audit_options, + config_server_db_parameter, + ] + manager.set_environment() + expected_params = " ".join(item for param in all_params for item in param) + mock.assert_called_once_with({"mongos-args": expected_params}) + + +def test_mongodb_config_manager_tls_enabled(mocker): + mock_state = mocker.MagicMock(CharmState) + mock_app_state = mocker.MagicMock(AppPeerReplicaSet) + mock_state.app_peer_data = mock_app_state + mock_state.tls = mocker.MagicMock(TLSState) + mock_state.app_peer_data.replica_set = "deadbeef" + mock_state.app_peer_data.role = MongoDBRoles.REPLICATION + mock_state.tls.internal_enabled = True + mock_state.tls.external_enabled = True + workload = VMMongoDBWorkload(None) + config = MongoDBCharmConfig() + manager = MongoDBConfigManager( + config, + mock_state, + workload, + ) + + assert manager.auth_parameter == [ + "--auth", + "--clusterAuthMode=x509", + "--tlsAllowInvalidCertificates", + f"--tlsClusterCAFile={VM_PATH['mongod']['CONF']}/internal-ca.crt", + f"--tlsClusterFile={VM_PATH['mongod']['CONF']}/internal-cert.pem", + ] + assert manager.tls_parameters == [ + f"--tlsCAFile={VM_PATH['mongod']['CONF']}/external-ca.crt", + f"--tlsCertificateKeyFile={VM_PATH['mongod']['CONF']}/external-cert.pem", + "--tlsMode=preferTLS", + "--tlsDisabledProtocols=TLS1_0,TLS1_1", + ] + + +def test_mongos_default_config_server(mocker): + mock_state = mocker.MagicMock(CharmState) + mock_state.app_peer_data = mocker.MagicMock(AppPeerReplicaSet) + mock_state.app_peer_data.replica_set = "deadbeef" + mock_state.cluster = mocker.MagicMock(ClusterState) + mock_state.cluster.config_server_uri = "" + mock_state.tls = mocker.MagicMock(TLSState) + mock_state.app_peer_data.external_connectivity = False + mock_state.tls.internal_enabled = False + mock_state.tls.externalenabled = False + workload = VMMongoDBWorkload(None) + config = MongosCharmConfig() + manager = MongosConfigManager( + config, + workload, + mock_state, + ) + assert manager.config_server_db_parameter == ["--configdb deadbeef/127.0.0.1:27017"] diff --git a/tests/unit/test_helpers.py b/tests/unit/test_helpers.py new file mode 100644 index 00000000..bcbebdd6 --- /dev/null +++ b/tests/unit/test_helpers.py @@ -0,0 +1,20 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import base64 + +from single_kernel_mongo.utils.helpers import hostname_from_hostport, parse_tls_file + + +def test_hostname_from_hostport(): + hostname = "127.0.0.1:27017" + assert hostname_from_hostport(hostname) == "127.0.0.1" + + +def test_parse_tls_file_raw(): + with open("tests/unit/data/key.pem") as fd: + certificate = "".join(fd.readlines()).rstrip() + certificate_b64 = base64.b64encode(certificate.encode("utf-8")).decode("utf-8") + decoded = parse_tls_file(certificate) + assert decoded == certificate.encode("utf-8") + assert decoded == parse_tls_file(certificate_b64) diff --git a/tests/unit/test_mongodb_config.py b/tests/unit/test_mongodb_config.py new file mode 100644 index 00000000..6c91a4fc --- /dev/null +++ b/tests/unit/test_mongodb_config.py @@ -0,0 +1,64 @@ +import pytest +from parameterized import parameterized + +from single_kernel_mongo.config.literals import LOCALHOST, MongoPorts +from single_kernel_mongo.core.exceptions import AmbiguousConfigError +from single_kernel_mongo.utils.mongodb_users import ( + REGULAR_ROLES, + RoleNames, +) + +from .helpers import MongoConfigurationFactory + + +def test_configuration_ok(): + config = MongoConfigurationFactory.build() + assert config.formatted_hosts == {"127.0.0.1:27017"} + assert config.formatted_replset == {"replicaSet": "cafebabe"} + assert config.formatted_auth_source == {"authSource": "admin"} + + assert config.uri == ( + "mongodb://operator:deadbeef@127.0.0.1:27017/abadcafe?replicaSet=cafebabe&authSource=admin" + ) + + assert config.supported_roles == [] + + +@parameterized.expand([[RoleNames.ADMIN], [RoleNames.BACKUP], [RoleNames.MONITOR]]) +def test_configuration_with_roles(role: RoleNames): + config = MongoConfigurationFactory.build(roles={"default", role.value}) + + roles = config.supported_roles + expected_roles = [ + {"role": "readWrite", "db": config.database}, + {"role": "enableSharding", "db": config.database}, + ] + expected_system_roles = REGULAR_ROLES[role] + assert all(role in roles for role in expected_roles) + assert all(role in roles for role in expected_system_roles) + + +def test_invalid_configuration_port_replset(): + config = MongoConfigurationFactory.build(port=MongoPorts.MONGOS_PORT, replset="cafebabe") + + with pytest.raises(AmbiguousConfigError): + config.uri + + +def test_invalid_configuration_port_standalone(): + config = MongoConfigurationFactory.build(port=None, standalone=True) + with pytest.raises(AmbiguousConfigError): + config.uri + + +def test_valid_formatted(): + config = MongoConfigurationFactory.build(database="admin", replset=None, port=None) + + assert config.formatted_replset == {} + assert config.formatted_auth_source == {} + assert config.formatted_hosts == {LOCALHOST} + + +def test_standalone(): + config = MongoConfigurationFactory.build(standalone=True) + assert config.uri == "mongodb://operator:deadbeef@localhost:27017/?authSource=admin" diff --git a/tests/unit/test_mongodb_connection.py b/tests/unit/test_mongodb_connection.py new file mode 100644 index 00000000..97a7efe9 --- /dev/null +++ b/tests/unit/test_mongodb_connection.py @@ -0,0 +1,20 @@ +import mongomock +import pymongo +import pytest + +from single_kernel_mongo.utils.mongo_connection import MongoConnection + +from .helpers import MongoConfigurationFactory + + +@pytest.fixture +@mongomock.patch(servers=(("servers.example.org", 27017),)) +def mongo_connection(): + config = MongoConfigurationFactory.build() + with MongoConnection(config) as mongo: + mongo.client = pymongo.MongoClient("servers.example.org") + return mongo + + +def test_is_ready(mongo_connection): + assert mongo_connection.is_ready diff --git a/tests/unit/test_mongodb_paths.py b/tests/unit/test_mongodb_paths.py new file mode 100644 index 00000000..1a9adae4 --- /dev/null +++ b/tests/unit/test_mongodb_paths.py @@ -0,0 +1,27 @@ +from pathlib import Path + +from parameterized import parameterized + +from single_kernel_mongo.config.roles import ( + K8S_MONGO, + VM_MONGO, + Role, +) +from single_kernel_mongo.core.workload import MongoPaths + + +@parameterized.expand([[K8S_MONGO], [VM_MONGO]]) +def test_mongo_paths(role: Role): + paths = MongoPaths(role) + + assert paths.config_file.parent == Path(role.paths["CONF"]) + assert paths.keyfile.parent == Path(role.paths["CONF"]) + assert paths.log_file.parent == Path(role.paths["LOGS"]) + assert paths.audit_file.parent == Path(role.paths["LOGS"]) + assert paths.ext_pem_file.parent == Path(role.paths["CONF"]) + assert paths.ext_ca_file.parent == Path(role.paths["CONF"]) + assert paths.int_pem_file.parent == Path(role.paths["CONF"]) + assert paths.int_ca_file.parent == Path(role.paths["CONF"]) + assert paths.socket_path.parent == Path(role.paths["VAR"]) + + assert all(path.parent == Path(role.paths["CONF"]) for path in paths.tls_files) diff --git a/tests/unit/test_mongodb_users.py b/tests/unit/test_mongodb_users.py new file mode 100644 index 00000000..e2f86b88 --- /dev/null +++ b/tests/unit/test_mongodb_users.py @@ -0,0 +1,40 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import pytest +from parameterized import parameterized + +from single_kernel_mongo.utils.mongodb_users import ( + BackupUser, + MongoDBUser, + MonitorUser, + OperatorUser, + get_user_from_username, +) + +RANDOM_USER = MongoDBUser( + username="deadbeef", + database_name="abadcafe", + roles={"default"}, + privileges={"resource": {"anyResource": True}, "actions": ["anyAction"]}, + mongodb_role="", + hosts=set("127.0.0.1"), +) + + +@parameterized.expand([[BackupUser], [MonitorUser], [OperatorUser]]) +def test_users_username(user: MongoDBUser): + assert user.username == user.get_username() + assert user.database_name == user.get_database_name() + assert user.roles == user.get_roles() + assert user.mongodb_role == user.get_mongodb_role() + assert user.privileges == user.get_privileges() + assert user.hosts == user.get_hosts() + assert user.password_key_name == user.get_password_key_name() + + assert get_user_from_username(user.username) == user + + +def test_get_user_invalid_username(): + with pytest.raises(ValueError): + get_user_from_username("invalid") diff --git a/tests/unit/test_mongodb_workload.py b/tests/unit/test_mongodb_workload.py new file mode 100644 index 00000000..2697f16c --- /dev/null +++ b/tests/unit/test_mongodb_workload.py @@ -0,0 +1,292 @@ +import getpass +from pathlib import Path + +import pytest +from ops.pebble import Layer + +from single_kernel_mongo.config.literals import VmUser +from single_kernel_mongo.config.roles import ROLES +from single_kernel_mongo.core.workload import MongoPaths +from single_kernel_mongo.exceptions import WorkloadExecError, WorkloadServiceError +from single_kernel_mongo.lib.charms.operator_libs_linux.v1.snap import SnapError +from single_kernel_mongo.workload import ( + VMLogRotateDBWorkload, + VMMongoDBExporterWorkload, + VMMongoDBWorkload, + VMMongosWorkload, + VMPBMWorkload, +) + + +def test_mongodb_workload_init(monkeypatch): + workload = VMMongoDBWorkload(container=None) + + def mock_snap(*arg, **kwargs): + return "" + + monkeypatch.setattr(workload.mongod, "get", mock_snap) + assert workload.paths == MongoPaths(ROLES["vm"]) + assert workload.env_var == "MONGOD_ARGS" + assert workload.role == ROLES["vm"] + assert workload.container_can_connect + + assert workload.layer == Layer( + { + "summary": "mongod layer", + "description": "Pebble config layer for replicated mongod", + "services": { + "mongod": { + "override": "replace", + "summary": "mongod", + "command": "/usr/bin/mongod ${MONGOD_ARGS}", + "startup": "enabled", + "user": VmUser.user, # type: ignore + "group": VmUser.group, # type: ignore + "environment": {"MONGOD_ARGS": ""}, + } + }, + } + ) + + +def test_mongos_workload_init(monkeypatch): + workload = VMMongosWorkload(container=None) + + assert workload.paths == MongoPaths(ROLES["vm"]) + assert workload.env_var == "MONGOS_ARGS" + assert workload.role == ROLES["vm"] + + def mock_snap(*arg, **kwargs): + return "" + + monkeypatch.setattr(workload.mongod, "get", mock_snap) + + assert workload.layer == Layer( + { + "summary": "mongos layer", + "description": "Pebble config layer for mongos router", + "services": { + "mongos": { + "override": "replace", + "summary": "mongos", + "command": "/usr/bin/mongos ${MONGOS_ARGS}", + "startup": "enabled", + "user": VmUser.user, # type: ignore + "group": VmUser.group, # type: ignore + "environment": {"MONGOS_ARGS": ""}, + } + }, + } + ) + + +def test_mongodb_exporter_workload_init(monkeypatch): + workload = VMMongoDBExporterWorkload(container=None) + + def mock_snap(*arg, **kwargs): + return "" + + monkeypatch.setattr(workload.mongod, "get", mock_snap) + + assert workload.paths == MongoPaths(ROLES["vm"]) + assert workload.env_var == "MONGODB_URI" + assert workload.role == ROLES["vm"] + + assert workload.layer == Layer( + { + "summary": "mongodb_exporter layer", + "description": "Pebble config layer for mongodb_exporter", + "services": { + "mongodb_exporter": { + "override": "replace", + "summary": "mongodb_exporter", + "command": "mongodb_exporter --collector.diagnosticdata --compatible-mode", + "startup": "enabled", + "user": VmUser.user, # type: ignore + "group": VmUser.group, # type: ignore + "environment": {"MONGODB_URI": ""}, + } + }, + } + ) + + +def test_pbm_workload_init(monkeypatch): + workload = VMPBMWorkload(container=None) + + def mock_snap(*arg, **kwargs): + return "" + + monkeypatch.setattr(workload.mongod, "get", mock_snap) + + assert workload.paths == MongoPaths(ROLES["vm"]) + assert workload.paths.pbm_config == Path( + "/var/snap/charmed-mongodb/current/etc/pbm/pbm_config.yaml" + ) + assert workload.env_var == "PBM_MONGODB_URI" + assert workload.role == ROLES["vm"] + + assert workload.layer == Layer( + { + "summary": "pbm layer", + "description": "Pebble config layer for pbm", + "services": { + "pbm-agent": { + "override": "replace", + "summary": "pbm", + "command": "/usr/bin/pbm-agent", + "startup": "enabled", + "user": VmUser.user, # type: ignore + "group": VmUser.group, # type: ignore + "environment": {"PBM_MONGODB_URI": ""}, + } + }, + } + ) + + +def test_logrotate_workload_init(): + workload = VMLogRotateDBWorkload(container=None) + + assert workload.paths == MongoPaths(ROLES["vm"]) + assert workload.env_var == "" + assert workload.role == ROLES["vm"] + + assert workload.layer == Layer( + { + "summary": "Log rotate layer", + "description": "Pebble config layer for rotating mongodb logs", + "services": { + "logrotate": { + "summary": "log rotate", + "command": "sh -c 'logrotate /etc/logrotate.d/mongodb; sleep 1'", + "startup": "enabled", + "override": "replace", + "backoff-delay": "1m0s", + "backoff-factor": 1, + "user": VmUser.user, # type: ignore + "group": VmUser.group, # type: ignore + } + }, + } + ) + + +def test_snap_install_failure(monkeypatch): + def mock_snap_ensure(*args, **kwargs): + raise SnapError + + workload = VMMongoDBWorkload(container=None) + + monkeypatch.setattr(workload.mongod, "ensure", mock_snap_ensure) + + assert not workload.install() + + +def test_install_success(monkeypatch): + def mock_snap(*args, **kwargs): + return + + workload = VMMongoDBWorkload(container=None) + + monkeypatch.setattr(workload.mongod, "ensure", mock_snap) + monkeypatch.setattr(workload.mongod, "hold", mock_snap) + + assert workload.install() + + +def test_read_file(): + workload = VMMongoDBWorkload(container=None) + assert workload.read(Path("/nonexistent")) == [] + + +@pytest.mark.parametrize("command", [("start"), ("stop"), ("restart")]) +def test_command_success(monkeypatch, command): + def mock_snap(*args, **kwargs): + return + + workload = VMMongoDBWorkload(container=None) + monkeypatch.setattr(workload.mongod, command, mock_snap) + + assert getattr(workload, command)() is None + + +@pytest.mark.parametrize("command", [("start"), ("stop"), ("restart")]) +def test_command_success_failure(monkeypatch, caplog, command): + def mock_snap(*args, **kwargs): + raise SnapError + + workload = VMMongoDBWorkload(container=None) + monkeypatch.setattr(workload.mongod, command, mock_snap) + + caplog.clear() + with pytest.raises(WorkloadServiceError): + getattr(workload, command)() + # Check that we logged the SnapError + assert any( + record.levelname == "ERROR" and record.exc_info[0] == SnapError for record in caplog.records + ) + + +@pytest.mark.parametrize( + "value,expected", + [ + ({"mongod": {"active": True}}, True), + ({"mongod": {"active": False}}, False), + ({"mongod": {}}, False), + ({}, False), + ], +) +def test_active(mocker, value: dict, expected: bool): + mocker.patch( + "single_kernel_mongo.lib.charms.operator_libs_linux.v1.snap.Snap.services", + return_value=value, + new_callable=mocker.PropertyMock, + ) + workload = VMMongoDBWorkload(container=None) + assert workload.active() == expected + + +def test_exec(): + workload = VMMongoDBWorkload(container=None) + user = getpass.getuser() + user_exec = workload.exec(["whoami"]).strip() + assert user == user_exec + + +def test_exec_fail(mocker, caplog): + workload = VMMongoDBWorkload(container=None) + caplog.clear() + with pytest.raises(WorkloadExecError) as err: + workload.exec("false") + + assert err.value.return_code == 1 + assert err.value.cmd == "false" + assert any( + record.levelname == "ERROR" and record.msg == "cmd failed - cmd=false, stdout=, stderr=" + for record in caplog.records + ) + + +def test_run_bin_command(mocker): + mock = mocker.patch("single_kernel_mongo.workload.VMMongoDBWorkload.exec") + workload = VMMongoDBWorkload(container=None) + workload.run_bin_command("fail", []) + + mock.assert_called_once_with(command=["/snap/bin/charmed-mongodb.mongosh", "fail"], env={}) + + +def test_logrotate_build_template(monkeypatch, tmp_path): + tmp_file = tmp_path / "template.txt" + + def mock_write(path, content): + tmp_file.write_text(content) + + def mock_exec(*args): + return + + workload = VMLogRotateDBWorkload(container=None) + monkeypatch.setattr(workload, "write", mock_write) + monkeypatch.setattr(workload, "exec", mock_exec) + workload.build_template() + assert "mongodb/*.log" in tmp_file.read_text() diff --git a/tests/unit/test_structured_config.py b/tests/unit/test_structured_config.py new file mode 100644 index 00000000..087db557 --- /dev/null +++ b/tests/unit/test_structured_config.py @@ -0,0 +1,30 @@ +import pytest +from pydantic import ValidationError + +from single_kernel_mongo.core.structured_config import ( + MongoDBCharmConfig, + MongosCharmConfig, +) + + +def test_invalid_mongodb_config(): + with pytest.raises(ValidationError): + MongoDBCharmConfig.model_validate({"role": "wrong", "auto_delete": True}) # type: ignore + + +def test_invalid_mongos_config(): + with pytest.raises(ValidationError): + MongosCharmConfig.model_validate({"expose-external": "invalid"}) # type: ignore + + +def test_valid_mongodb_config(): + MongoDBCharmConfig.model_validate({"role": "replication", "auto-delete": True}) + MongoDBCharmConfig.model_validate({"role": "replication", "auto-delete": False}) + MongoDBCharmConfig.model_validate({"role": "shard", "auto-delete": False}) + MongoDBCharmConfig.model_validate({"role": "mongos", "auto-delete": False}) + MongoDBCharmConfig.model_validate({"role": "config-server", "auto-delete": False}) + + MongosCharmConfig.model_validate({"expose-external": "none", "auto-delete": False}) + MongosCharmConfig.model_validate({"expose-external": "nodeport", "auto-delete": False}) + MongosCharmConfig.model_validate({"expose-external": "none", "auto-delete": True}) + MongosCharmConfig.model_validate({"expose-external": "nodeport", "auto-delete": False}) diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..db14f095 --- /dev/null +++ b/tox.ini @@ -0,0 +1,60 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +[tox] +env_list = lint, unit + +[vars] +src_path = {tox_root}/single_kernel_mongo +tests_path = {tox_root}/tests +all_path = {[vars]src_path} {[vars]tests_path} + +[testenv] +set_env = + PY_COLORS = 1 +allowlist_externals = + poetry + +[testenv:format] +description = Apply coding style standards to code +commands_pre = + poetry install --only format +commands = + poetry lock --no-update + poetry run ruff check --config pyproject.toml --fix {[vars]all_path} + poetry run ruff format --config pyproject.toml {[vars]all_path} + +[testenv:build] +description = Builds the package +commands_pre = + poetry install +commands = + poetry lock --no-update + poetry build + + +[testenv:lint] +description = Check code against coding style standards +allowlist_externals = + {[testenv]allowlist_externals} + find +commands_pre = + poetry install --only lint +commands = + poetry check --lock + poetry run codespell {[vars]all_path} + poetry run ruff check --config pyproject.toml {[vars]all_path} + poetry run ruff format --check --config pyproject.toml --diff {[vars]all_path} + find {[vars]all_path} -type f \( -name "*.sh" -o -name "*.bash" \) -exec poetry run shellcheck --color=always \{\} + + +[testenv:unit] +description = Run unit tests +set_env = + {[testenv]set_env} +commands_pre = + poetry install --only main,charm-libs,unit +commands = + poetry run coverage run --source={[vars]src_path} \ + -m pytest -v --tb native -s {posargs} {[vars]tests_path}/unit + poetry run coverage report + poetry run coverage xml